From 972cc306253c452c06a33e8c59507f58c9a26c6d Mon Sep 17 00:00:00 2001 From: Terry Smith Date: Tue, 6 May 2025 16:02:07 -0300 Subject: [PATCH 01/16] chore: update poetry and tests to support arm machines --- Makefile | 16 +++++++++-- core/tests/test_utils.py | 1 - .../cosmosdb/tests/test_cosmosdb_emulator.py | 3 ++ .../cosmosdb/tests/test_cosmosdb_mongodb.py | 4 +++ modules/cosmosdb/tests/test_cosmosdb_nosql.py | 3 ++ modules/db2/tests/test_db2.py | 1 + .../elasticsearch/tests/test_elasticsearch.py | 2 ++ modules/google/tests/test_google.py | 4 ++- scripts/list_arm_extras.py | 28 +++++++++++++++++++ 9 files changed, 58 insertions(+), 4 deletions(-) create mode 100755 scripts/list_arm_extras.py diff --git a/Makefile b/Makefile index b5dbe88ea..5d687abac 100644 --- a/Makefile +++ b/Makefile @@ -3,16 +3,28 @@ PYTHON_VERSION ?= 3.10 IMAGE = testcontainers-python:${PYTHON_VERSION} -PACKAGES = core $(addprefix modules/,$(notdir $(wildcard modules/*))) +PACKAGES = core $(addprefix modules/,$(notdir $(filter %/, $(wildcard modules/*/)))) UPLOAD = $(addsuffix /upload,${PACKAGES}) -TESTS = $(addsuffix /tests,$(filter-out meta,${PACKAGES})) +TESTS = $(addsuffix /tests,$(filter-out meta,$(filter-out %.md %.txt,${PACKAGES}))) TESTS_DIND = $(addsuffix -dind,${TESTS}) DOCTESTS = $(addsuffix /doctests,$(filter-out modules/README.md,${PACKAGES})) +ARCH := $(shell uname -m) +ARM_ARCHS := arm64 aarch64 +IS_ARM := $(filter $(ARCH),$(ARM_ARCHS)) + +# List of safe extras (excluding 'db2') with original TOML keys +EXTRAS_LIST := $(shell $(PYTHON) scripts/list_arm_extras.py) install: ## Set up the project for development +ifeq ($(IS_ARM),$(ARCH)) + @echo "Detected ARM architecture, skipping 'db2' extra (ibm-db is incompatible)" + poetry install $(foreach extra,$(EXTRAS_LIST),--extras $(extra)) +else + @echo "Detected non-ARM architecture, installing all extras" poetry install --all-extras +endif poetry run pre-commit install build: ## Build the python package diff --git a/core/tests/test_utils.py b/core/tests/test_utils.py index 1923483ea..182b18eba 100644 --- a/core/tests/test_utils.py +++ b/core/tests/test_utils.py @@ -33,7 +33,6 @@ def test_is_windows(monkeypatch: MonkeyPatch) -> None: def test_is_arm(monkeypatch: MonkeyPatch) -> None: - assert not utils.is_arm() monkeypatch.setattr("platform.machine", lambda: "arm64") assert utils.is_arm() monkeypatch.setattr("platform.machine", lambda: "aarch64") diff --git a/modules/cosmosdb/tests/test_cosmosdb_emulator.py b/modules/cosmosdb/tests/test_cosmosdb_emulator.py index 542ddd11c..41653cd4a 100644 --- a/modules/cosmosdb/tests/test_cosmosdb_emulator.py +++ b/modules/cosmosdb/tests/test_cosmosdb_emulator.py @@ -1,7 +1,10 @@ import pytest from testcontainers.cosmosdb._emulator import CosmosDBEmulatorContainer +from testcontainers.core.utils import is_arm + +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_runs(): with CosmosDBEmulatorContainer(partition_count=1, bind_ports=False) as emulator: assert emulator.server_certificate_pem is not None diff --git a/modules/cosmosdb/tests/test_cosmosdb_mongodb.py b/modules/cosmosdb/tests/test_cosmosdb_mongodb.py index a50ee82ea..3c10ee19f 100644 --- a/modules/cosmosdb/tests/test_cosmosdb_mongodb.py +++ b/modules/cosmosdb/tests/test_cosmosdb_mongodb.py @@ -1,7 +1,10 @@ import pytest from testcontainers.cosmosdb import CosmosDBMongoEndpointContainer +from testcontainers.core.utils import is_arm + +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_requires_a_version(): with pytest.raises(AssertionError, match="A MongoDB version is required"): CosmosDBMongoEndpointContainer(mongodb_version=None) @@ -10,6 +13,7 @@ def test_requires_a_version(): CosmosDBMongoEndpointContainer(mongodb_version="4.0") +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_runs(): with CosmosDBMongoEndpointContainer(mongodb_version="4.0", partition_count=1, bind_ports=False) as emulator: assert emulator.env["AZURE_COSMOS_EMULATOR_ENABLE_MONGODB_ENDPOINT"] == "4.0" diff --git a/modules/cosmosdb/tests/test_cosmosdb_nosql.py b/modules/cosmosdb/tests/test_cosmosdb_nosql.py index a9460a1b0..a48a52ac8 100644 --- a/modules/cosmosdb/tests/test_cosmosdb_nosql.py +++ b/modules/cosmosdb/tests/test_cosmosdb_nosql.py @@ -1,7 +1,10 @@ import pytest from testcontainers.cosmosdb import CosmosDBNoSQLEndpointContainer +from testcontainers.core.utils import is_arm + +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_runs(): with CosmosDBNoSQLEndpointContainer(partition_count=1, bind_ports=False) as emulator: assert emulator.get_exposed_port(8081) is not None, "The NoSQL endpoint's port should be exposed" diff --git a/modules/db2/tests/test_db2.py b/modules/db2/tests/test_db2.py index 7b6ea844a..c354832ff 100644 --- a/modules/db2/tests/test_db2.py +++ b/modules/db2/tests/test_db2.py @@ -26,6 +26,7 @@ def test_docker_run_db2(version: str): # - sqlserver # - mongodb # - db2 +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_quoted_password(): user = "db2inst1" dbname = "testdb" diff --git a/modules/elasticsearch/tests/test_elasticsearch.py b/modules/elasticsearch/tests/test_elasticsearch.py index 661a550c6..5108bb40f 100644 --- a/modules/elasticsearch/tests/test_elasticsearch.py +++ b/modules/elasticsearch/tests/test_elasticsearch.py @@ -3,11 +3,13 @@ import pytest +from testcontainers.core.utils import is_arm from testcontainers.elasticsearch import ElasticSearchContainer # The versions below should reflect the latest stable releases @pytest.mark.parametrize("version", ["7.17.18", "8.12.2"]) +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_docker_run_elasticsearch(version): with ElasticSearchContainer(f"elasticsearch:{version}", mem_limit="3G") as es: resp = urllib.request.urlopen(es.get_url()) diff --git a/modules/google/tests/test_google.py b/modules/google/tests/test_google.py index 0c412d706..9b2229ac8 100644 --- a/modules/google/tests/test_google.py +++ b/modules/google/tests/test_google.py @@ -1,6 +1,7 @@ from queue import Queue from google.cloud.datastore import Entity +import time from testcontainers.core.waiting_utils import wait_for_logs from testcontainers.google import PubSubContainer, DatastoreContainer @@ -25,7 +26,8 @@ def test_pubsub_container(): # Receive the message queue = Queue() subscriber.subscribe(subscription_path, queue.put) - message = queue.get(timeout=1) + # timeout 10 is needed to account for slower arm machines + message = queue.get(timeout=10) assert message.data == b"Hello world!" message.ack() diff --git a/scripts/list_arm_extras.py b/scripts/list_arm_extras.py new file mode 100755 index 000000000..aebe3ff66 --- /dev/null +++ b/scripts/list_arm_extras.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +# used to generate the list of extras in the Makefile +import sys +from pathlib import Path + +# Support both Python 3.10 (needs tomli) and 3.11+ (has tomllib) +if sys.version_info >= (3, 11): + import tomllib +else: + try: + import tomli as tomllib + except ImportError: + ## Python <3.11 detected but 'tomli' is not installed, poetry add --group dev tomli + sys.exit(1) + +SKIPPED_EXTRAS = {"db2"} # skip incompatible extras + + +def get_filtered_extras() -> list[str]: + with Path("pyproject.toml").open("rb") as f: + data = tomllib.load(f) + extras = data["tool"]["poetry"]["extras"] + return [key for key in extras if key not in SKIPPED_EXTRAS] + + +if __name__ == "__main__": + sys.stdout.write(" ".join(get_filtered_extras()) + "\n") From 776c60210ad0fbd445832d8f10907c923d55fdd6 Mon Sep 17 00:00:00 2001 From: Terry Smith Date: Tue, 6 May 2025 17:38:43 -0300 Subject: [PATCH 02/16] chore: enable docker in docker tests to pass on arm machines --- Dockerfile | 15 +++++++++++++-- core/__init__.py | 0 core/tests/__init__.py | 0 core/tests/conftest.py | 10 +++++++++- core/tests/list_arm_extras.py | 18 ++++++++++++++++++ 5 files changed, 40 insertions(+), 3 deletions(-) create mode 100644 core/__init__.py create mode 100644 core/tests/__init__.py create mode 100644 core/tests/list_arm_extras.py diff --git a/Dockerfile b/Dockerfile index 865771fa1..001dbefc6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,8 @@ ARG PYTHON_VERSION=3.10 FROM python:${PYTHON_VERSION}-slim-bookworm +ARG POETRY_EXTRAS + +ENV PYTHONPATH=/workspace ENV POETRY_NO_INTERACTION=1 \ POETRY_VIRTUALENVS_IN_PROJECT=1 \ @@ -20,10 +23,18 @@ RUN bash -c 'python -m venv /opt/poetry-venv && source $_/bin/activate && pip in # install dependencies with poetry COPY pyproject.toml . COPY poetry.lock . -RUN poetry install --all-extras --with dev --no-root +RUN if [ "$POETRY_EXTRAS" = "" ]; then \ + poetry install --all-extras --with dev --no-root; \ + else \ + poetry install --extras "$POETRY_EXTRAS" --with dev --no-root; \ + fi # copy project source COPY . . # install project with poetry -RUN poetry install --all-extras --with dev +RUN if [ "$POETRY_EXTRAS" = "" ]; then \ + poetry install --all-extras --with dev; \ + else \ + poetry install --extras "$POETRY_EXTRAS" --with dev; \ + fi diff --git a/core/__init__.py b/core/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/core/tests/__init__.py b/core/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/core/tests/conftest.py b/core/tests/conftest.py index a86faa109..902899e69 100644 --- a/core/tests/conftest.py +++ b/core/tests/conftest.py @@ -3,8 +3,11 @@ import pytest from typing import Callable from testcontainers.core.container import DockerClient +from testcontainers.core.utils import is_arm import sys +from .list_arm_extras import get_arm_extras + PROJECT_DIR = Path(__file__).parent.parent.parent.resolve() @@ -24,11 +27,16 @@ def python_testcontainer_image() -> str: py_version = ".".join(map(str, sys.version_info[:2])) image_name = f"testcontainers-python:{py_version}" client = DockerClient() + build_args = {"PYTHON_VERSION": py_version} + + if is_arm(): + build_args["POETRY_EXTRAS"] = get_arm_extras() + client.build( path=str(PROJECT_DIR), tag=image_name, rm=False, - buildargs={"PYTHON_VERSION": py_version}, + buildargs=build_args, ) return image_name diff --git a/core/tests/list_arm_extras.py b/core/tests/list_arm_extras.py new file mode 100644 index 000000000..573505d5f --- /dev/null +++ b/core/tests/list_arm_extras.py @@ -0,0 +1,18 @@ +from pathlib import Path +from testcontainers.core.utils import is_arm + +try: + import tomllib # Python 3.11+ +except ImportError: + import tomli as tomllib + +SKIPPED_EXTRAS = {"db2"} # skip incompatible extras + + +def get_arm_extras(): + with Path("pyproject.toml").open("rb") as f: + data = tomllib.load(f) + + extras = data["tool"]["poetry"]["extras"] + skip = SKIPPED_EXTRAS + return " ".join(k for k in extras if k not in skip) From 59d19689ae629731cc5a7c2381cb01723244f2f4 Mon Sep 17 00:00:00 2001 From: Terry Smith Date: Tue, 6 May 2025 18:13:25 -0300 Subject: [PATCH 03/16] chore: add mac integration skips --- Makefile | 2 -- core/tests/test_core_registry.py | 9 +++++++++ core/tests/test_docker_in_docker.py | 8 ++++++++ core/tests/test_ryuk.py | 8 ++++++++ 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 5d687abac..bf7287e53 100644 --- a/Makefile +++ b/Makefile @@ -19,10 +19,8 @@ EXTRAS_LIST := $(shell $(PYTHON) scripts/list_arm_extras.py) install: ## Set up the project for development ifeq ($(IS_ARM),$(ARCH)) - @echo "Detected ARM architecture, skipping 'db2' extra (ibm-db is incompatible)" poetry install $(foreach extra,$(EXTRAS_LIST),--extras $(extra)) else - @echo "Detected non-ARM architecture, installing all extras" poetry install --all-extras endif poetry run pre-commit install diff --git a/core/tests/test_core_registry.py b/core/tests/test_core_registry.py index 384b06693..36e4730f9 100644 --- a/core/tests/test_core_registry.py +++ b/core/tests/test_core_registry.py @@ -18,8 +18,13 @@ from testcontainers.core.waiting_utils import wait_container_is_ready from testcontainers.registry import DockerRegistryContainer +from testcontainers.core.utils import is_mac +@pytest.mark.skipif( + is_mac(), + reason="Docker Desktop on macOS does not support insecure private registries without daemon reconfiguration", +) def test_missing_on_private_registry(monkeypatch): username = "user" password = "pass" @@ -41,6 +46,10 @@ def test_missing_on_private_registry(monkeypatch): wait_container_is_ready(test_container) +@pytest.mark.skipif( + is_mac(), + reason="Docker Desktop on macOS does not support local insecure registries over HTTP without modifying daemon settings", +) @pytest.mark.parametrize( "image,tag,username,password", [ diff --git a/core/tests/test_docker_in_docker.py b/core/tests/test_docker_in_docker.py index b07f80e9a..02b8e1fc4 100644 --- a/core/tests/test_docker_in_docker.py +++ b/core/tests/test_docker_in_docker.py @@ -15,6 +15,7 @@ from testcontainers.core.container import DockerContainer from testcontainers.core.docker_client import DockerClient, LOGGER from testcontainers.core.utils import inside_container +from testcontainers.core.utils import is_mac from testcontainers.core.waiting_utils import wait_for_logs @@ -36,6 +37,7 @@ def _wait_for_dind_return_ip(client, dind): return docker_host_ip +@pytest.mark.skipif(is_mac(), reason="Docker socket forwarding (socat) is unsupported on Docker Desktop for macOS") def test_wait_for_logs_docker_in_docker(): # real dind isn't possible (AFAIK) in CI # forwarding the socket to a container port is at least somewhat the same @@ -64,6 +66,9 @@ def test_wait_for_logs_docker_in_docker(): not_really_dind.remove() +@pytest.mark.skipif( + is_mac(), reason="Bridge networking and Docker socket forwarding are not supported on Docker Desktop for macOS" +) def test_dind_inherits_network(): client = DockerClient() try: @@ -158,6 +163,9 @@ def test_find_host_network_in_dood() -> None: assert DockerClient().find_host_network() == os.environ[EXPECTED_NETWORK_VAR] +@pytest.mark.skipif( + is_mac(), reason="Docker socket mounting and container networking do not work reliably on Docker Desktop for macOS" +) @pytest.mark.skipif(not Path(tcc.ryuk_docker_socket).exists(), reason="No docker socket available") def test_dood(python_testcontainer_image: str) -> None: """ diff --git a/core/tests/test_ryuk.py b/core/tests/test_ryuk.py index 5d6b208af..76556d4f4 100644 --- a/core/tests/test_ryuk.py +++ b/core/tests/test_ryuk.py @@ -8,9 +8,14 @@ from testcontainers.core.config import testcontainers_config from testcontainers.core.container import Reaper from testcontainers.core.container import DockerContainer +from testcontainers.core.utils import is_mac from testcontainers.core.waiting_utils import wait_for_logs +@pytest.mark.skipif( + is_mac(), + reason="Ryuk container reaping is unreliable on Docker Desktop for macOS due to VM-based container lifecycle handling", +) @pytest.mark.inside_docker_check def test_wait_for_reaper(monkeypatch: MonkeyPatch): Reaper.delete_instance() @@ -41,6 +46,9 @@ def test_wait_for_reaper(monkeypatch: MonkeyPatch): Reaper.delete_instance() +@pytest.mark.skipif( + is_mac(), reason="Ryuk disabling behavior is unreliable on Docker Desktop for macOS due to Docker socket emulation" +) @pytest.mark.inside_docker_check def test_container_without_ryuk(monkeypatch: MonkeyPatch): Reaper.delete_instance() From c0653d0d72e0f5ae17ffe97a2d8abcf1f5f2d18f Mon Sep 17 00:00:00 2001 From: Terry Smith Date: Wed, 7 May 2025 14:05:15 -0300 Subject: [PATCH 04/16] refactor: experiment with simplifying build using unreliable markers which seems to work with ibm specifically --- Dockerfile | 15 ++------------- Makefile | 13 +------------ core/__init__.py | 0 core/tests/__init__.py | 0 core/tests/conftest.py | 10 +--------- core/tests/list_arm_extras.py | 18 ------------------ poetry.lock | 8 ++++---- pyproject.toml | 2 +- scripts/list_arm_extras.py | 28 ---------------------------- 9 files changed, 9 insertions(+), 85 deletions(-) delete mode 100644 core/__init__.py delete mode 100644 core/tests/__init__.py delete mode 100644 core/tests/list_arm_extras.py delete mode 100755 scripts/list_arm_extras.py diff --git a/Dockerfile b/Dockerfile index 001dbefc6..865771fa1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,5 @@ ARG PYTHON_VERSION=3.10 FROM python:${PYTHON_VERSION}-slim-bookworm -ARG POETRY_EXTRAS - -ENV PYTHONPATH=/workspace ENV POETRY_NO_INTERACTION=1 \ POETRY_VIRTUALENVS_IN_PROJECT=1 \ @@ -23,18 +20,10 @@ RUN bash -c 'python -m venv /opt/poetry-venv && source $_/bin/activate && pip in # install dependencies with poetry COPY pyproject.toml . COPY poetry.lock . -RUN if [ "$POETRY_EXTRAS" = "" ]; then \ - poetry install --all-extras --with dev --no-root; \ - else \ - poetry install --extras "$POETRY_EXTRAS" --with dev --no-root; \ - fi +RUN poetry install --all-extras --with dev --no-root # copy project source COPY . . # install project with poetry -RUN if [ "$POETRY_EXTRAS" = "" ]; then \ - poetry install --all-extras --with dev; \ - else \ - poetry install --extras "$POETRY_EXTRAS" --with dev; \ - fi +RUN poetry install --all-extras --with dev diff --git a/Makefile b/Makefile index bf7287e53..ebcd08625 100644 --- a/Makefile +++ b/Makefile @@ -3,26 +3,15 @@ PYTHON_VERSION ?= 3.10 IMAGE = testcontainers-python:${PYTHON_VERSION} -PACKAGES = core $(addprefix modules/,$(notdir $(filter %/, $(wildcard modules/*/)))) +PACKAGES = core $(addprefix modules/,$(notdir $(wildcard modules/*))) UPLOAD = $(addsuffix /upload,${PACKAGES}) TESTS = $(addsuffix /tests,$(filter-out meta,$(filter-out %.md %.txt,${PACKAGES}))) TESTS_DIND = $(addsuffix -dind,${TESTS}) DOCTESTS = $(addsuffix /doctests,$(filter-out modules/README.md,${PACKAGES})) -ARCH := $(shell uname -m) -ARM_ARCHS := arm64 aarch64 -IS_ARM := $(filter $(ARCH),$(ARM_ARCHS)) - -# List of safe extras (excluding 'db2') with original TOML keys -EXTRAS_LIST := $(shell $(PYTHON) scripts/list_arm_extras.py) - install: ## Set up the project for development -ifeq ($(IS_ARM),$(ARCH)) - poetry install $(foreach extra,$(EXTRAS_LIST),--extras $(extra)) -else poetry install --all-extras -endif poetry run pre-commit install build: ## Build the python package diff --git a/core/__init__.py b/core/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/tests/__init__.py b/core/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/core/tests/conftest.py b/core/tests/conftest.py index 902899e69..a86faa109 100644 --- a/core/tests/conftest.py +++ b/core/tests/conftest.py @@ -3,11 +3,8 @@ import pytest from typing import Callable from testcontainers.core.container import DockerClient -from testcontainers.core.utils import is_arm import sys -from .list_arm_extras import get_arm_extras - PROJECT_DIR = Path(__file__).parent.parent.parent.resolve() @@ -27,16 +24,11 @@ def python_testcontainer_image() -> str: py_version = ".".join(map(str, sys.version_info[:2])) image_name = f"testcontainers-python:{py_version}" client = DockerClient() - build_args = {"PYTHON_VERSION": py_version} - - if is_arm(): - build_args["POETRY_EXTRAS"] = get_arm_extras() - client.build( path=str(PROJECT_DIR), tag=image_name, rm=False, - buildargs=build_args, + buildargs={"PYTHON_VERSION": py_version}, ) return image_name diff --git a/core/tests/list_arm_extras.py b/core/tests/list_arm_extras.py deleted file mode 100644 index 573505d5f..000000000 --- a/core/tests/list_arm_extras.py +++ /dev/null @@ -1,18 +0,0 @@ -from pathlib import Path -from testcontainers.core.utils import is_arm - -try: - import tomllib # Python 3.11+ -except ImportError: - import tomli as tomllib - -SKIPPED_EXTRAS = {"db2"} # skip incompatible extras - - -def get_arm_extras(): - with Path("pyproject.toml").open("rb") as f: - data = tomllib.load(f) - - extras = data["tool"]["poetry"]["extras"] - skip = SKIPPED_EXTRAS - return " ".join(k for k in extras if k not in skip) diff --git a/poetry.lock b/poetry.lock index 89b14b07f..e0b52f8c8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "alabaster" @@ -1617,7 +1617,7 @@ description = "Python DBI driver for DB2 (LUW, zOS, i5) and IDS" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"db2\"" +markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and extra == \"db2\"" files = [ {file = "ibm_db-3.2.3-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:3399466141c29704f4e8ba709a67ba27ab413239c0244c3c4510126e946ff603"}, {file = "ibm_db-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e12ff6426d4f718e1ff6615e64a2880bd570826f19a031c82dbf296714cafd7d"}, @@ -1659,7 +1659,7 @@ description = "SQLAlchemy support for IBM Data Servers" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"db2\"" +markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and extra == \"db2\"" files = [ {file = "ibm_db_sa-0.4.1-py3-none-any.whl", hash = "sha256:49926ba9799e6ebd9ddd847141537c83d179ecf32fe24b7e997ac4614d3f616a"}, {file = "ibm_db_sa-0.4.1.tar.gz", hash = "sha256:a46df130a3681646490925cf4e1bca12b46283f71eea39b70b4f9a56e95341ac"}, @@ -4986,4 +4986,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4.0" -content-hash = "5c400cc87dc9708588ee8d7d50646de789235732d868b74ebc43f1cf2a403c88" +content-hash = "91d17ef0905329e552e8181a3b12f03e4c5244a7f23b9278168588e76c2a5802" diff --git a/pyproject.toml b/pyproject.toml index 51a93a340..90689fbc7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,7 +118,7 @@ httpx = { version = "*", optional = true } azure-cosmos = { version = "*", optional = true } cryptography = { version = "*", optional = true } trino = { version = "*", optional = true } -ibm_db_sa = { version = "*", optional = true } +ibm_db_sa = { version = "*", optional = true, markers = "platform_machine != 'aarch64' and platform_machine != 'arm64'" } [tool.poetry.extras] arangodb = ["python-arango"] diff --git a/scripts/list_arm_extras.py b/scripts/list_arm_extras.py deleted file mode 100755 index aebe3ff66..000000000 --- a/scripts/list_arm_extras.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 - -# used to generate the list of extras in the Makefile -import sys -from pathlib import Path - -# Support both Python 3.10 (needs tomli) and 3.11+ (has tomllib) -if sys.version_info >= (3, 11): - import tomllib -else: - try: - import tomli as tomllib - except ImportError: - ## Python <3.11 detected but 'tomli' is not installed, poetry add --group dev tomli - sys.exit(1) - -SKIPPED_EXTRAS = {"db2"} # skip incompatible extras - - -def get_filtered_extras() -> list[str]: - with Path("pyproject.toml").open("rb") as f: - data = tomllib.load(f) - extras = data["tool"]["poetry"]["extras"] - return [key for key in extras if key not in SKIPPED_EXTRAS] - - -if __name__ == "__main__": - sys.stdout.write(" ".join(get_filtered_extras()) + "\n") From 28d702476e79ca637c5919b94070eb143cf0da21 Mon Sep 17 00:00:00 2001 From: Terry Smith Date: Thu, 8 May 2025 12:55:09 -0300 Subject: [PATCH 05/16] fix: resolve failed checks --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2500175a8..9c820ffa5 100644 --- a/Makefile +++ b/Makefile @@ -6,10 +6,11 @@ IMAGE = testcontainers-python:${PYTHON_VERSION} PACKAGES = core $(addprefix modules/,$(notdir $(wildcard modules/*))) UPLOAD = $(addsuffix /upload,${PACKAGES}) -TESTS = $(addsuffix /tests,$(filter-out meta,$(filter-out %.md %.txt,${PACKAGES}))) +TESTS = $(addsuffix /tests,$(filter-out meta,${PACKAGES})) TESTS_DIND = $(addsuffix -dind,${TESTS}) DOCTESTS = $(addsuffix /doctests,$(filter-out modules/README.md,${PACKAGES})) + install: ## Set up the project for development poetry install --all-extras poetry run pre-commit install From 765cdce3e6469e43aceecb5baf48ae17e527cb52 Mon Sep 17 00:00:00 2001 From: Terry Date: Fri, 9 May 2025 17:02:10 -0300 Subject: [PATCH 06/16] feat: scaffolding out a new docs site --- .gitignore | 3 + Dockerfile.docs | 5 + Makefile | 41 ++ docs/_headers | 2 + docs/_redirects | 0 docs/contributing.md | 101 ++++ docs/contributing_docs.md | 109 ++++ docs/css/extra.css | 128 +++++ docs/css/tc-header.css | 389 +++++++++++++ docs/favicon.ico | Bin 0 -> 15406 bytes docs/features/creating_container.md | 1 + docs/getting_help.md | 10 + docs/icons/github.svg | 4 + docs/icons/slack.svg | 10 + docs/icons/stackoverflow.svg | 5 + docs/icons/twitter.svg | 4 + docs/index.md | 36 ++ docs/js/tc-header.js | 45 ++ docs/language-logos/dotnet.svg | 7 + docs/language-logos/go.svg | 10 + docs/language-logos/haskell.svg | 6 + docs/language-logos/java.svg | 17 + docs/language-logos/nodejs.svg | 5 + docs/language-logos/python.svg | 8 + docs/language-logos/ruby.svg | 125 +++++ docs/language-logos/rust.svg | 57 ++ docs/logo.png | Bin 0 -> 67366 bytes docs/logo.svg | 92 +++ docs/modules/mysql.md | 93 ++++ docs/poetry.lock | 829 ++++++++++++++++++++++++++++ docs/pyproject.toml | 25 + docs/requirements.txt | 36 ++ docs/testcontainers-logo.svg | 22 + docs/theme/main.html | 10 + docs/theme/partials/header.html | 140 +++++ docs/theme/partials/nav.html | 79 +++ docs/theme/partials/tc-header.html | 157 ++++++ mkdocs.yml | 48 ++ poetry.lock | 2 +- pyproject.toml | 92 ++- 40 files changed, 2704 insertions(+), 49 deletions(-) create mode 100644 Dockerfile.docs create mode 100644 docs/_headers create mode 100644 docs/_redirects create mode 100644 docs/contributing.md create mode 100644 docs/contributing_docs.md create mode 100644 docs/css/extra.css create mode 100644 docs/css/tc-header.css create mode 100644 docs/favicon.ico create mode 100644 docs/features/creating_container.md create mode 100644 docs/getting_help.md create mode 100644 docs/icons/github.svg create mode 100644 docs/icons/slack.svg create mode 100644 docs/icons/stackoverflow.svg create mode 100644 docs/icons/twitter.svg create mode 100644 docs/index.md create mode 100644 docs/js/tc-header.js create mode 100644 docs/language-logos/dotnet.svg create mode 100644 docs/language-logos/go.svg create mode 100644 docs/language-logos/haskell.svg create mode 100644 docs/language-logos/java.svg create mode 100644 docs/language-logos/nodejs.svg create mode 100644 docs/language-logos/python.svg create mode 100644 docs/language-logos/ruby.svg create mode 100644 docs/language-logos/rust.svg create mode 100644 docs/logo.png create mode 100644 docs/logo.svg create mode 100644 docs/modules/mysql.md create mode 100644 docs/poetry.lock create mode 100644 docs/pyproject.toml create mode 100644 docs/requirements.txt create mode 100644 docs/testcontainers-logo.svg create mode 100644 docs/theme/main.html create mode 100644 docs/theme/partials/header.html create mode 100644 docs/theme/partials/nav.html create mode 100644 docs/theme/partials/tc-header.html create mode 100644 mkdocs.yml diff --git a/.gitignore b/.gitignore index 18837562c..9da9d0d32 100644 --- a/.gitignore +++ b/.gitignore @@ -72,3 +72,6 @@ venv .python-version .env .github-token + +# docs build +site/ diff --git a/Dockerfile.docs b/Dockerfile.docs new file mode 100644 index 000000000..10944a7d2 --- /dev/null +++ b/Dockerfile.docs @@ -0,0 +1,5 @@ +FROM python:3.11-slim + +RUN pip install poetry + +WORKDIR /docs diff --git a/Makefile b/Makefile index 9c820ffa5..e4b0241ce 100644 --- a/Makefile +++ b/Makefile @@ -68,3 +68,44 @@ clean-all: clean ## Remove all generated files and reset the local virtual envir .PHONY: help help: ## Display command usage @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +## -------------------------------------- + +DOCS_CONTAINER=mkdocs-container +DOCS_IMAGE=mkdocs-poetry +DOCS_DOCKERFILE := Dockerfile.docs + +.PHONY: clean-docs +clean-docs: + @echo "Destroying docs" + docker rm -f $(DOCS_CONTAINER) || true + docker rmi $(DOCS_IMAGE) || true + +.PHONY: docs-ensure-image +docs-ensure-image: + @if [ -z "$$(docker images -q $(DOCS_IMAGE))" ]; then \ + docker build -f $(DOCS_DOCKERFILE) -t $(DOCS_IMAGE) . ; \ + fi + +.PHONY: serve-docs +serve-docs: docs-ensure-image + docker run --rm --name $(DOCS_CONTAINER) -it -p 8000:8000 \ + -v $(PWD):/testcontainers-go \ + -w /testcontainers-go \ + $(DOCS_IMAGE) bash -c "\ + cd docs && poetry install --no-root && \ + poetry run mkdocs serve -f ../mkdocs.yml -a 0.0.0.0:8000" + +.PHONY: watch-docs +watch-docs: docs-ensure-image + docker run --rm --name $(DOCS_CONTAINER) -it -p 8000:8000 \ + -v $(PWD):/testcontainers-go \ + -w /testcontainers-go \ + $(DOCS_IMAGE) bash -c "\ + cd docs && poetry install --no-root && \ + poetry run mkdocs serve -f ../mkdocs.yml -a 0.0.0.0:8000" --live-reload + +# Needed if dependencies are added to the docs site +.PHONY: export-docs-deps +export-docs-deps: + cd docs && poetry export --without-hashes --output requirements.txt diff --git a/docs/_headers b/docs/_headers new file mode 100644 index 000000000..e59f34a29 --- /dev/null +++ b/docs/_headers @@ -0,0 +1,2 @@ +/search/search_index.json + Access-Control-Allow-Origin: * diff --git a/docs/_redirects b/docs/_redirects new file mode 100644 index 000000000..e69de29bb diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 000000000..bf5a3d639 --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,101 @@ +# Contributing + +`Testcontainers for Go` is open source, and we love to receive contributions from our community — you! + +There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests, or writing code for the core library or for a technology module. + +In any case, if you like the project, please star the project on [GitHub](https://github.com/testcontainers/testcontainers-go/stargazers) and help spread the word :) +Also join our [Slack workspace](http://slack.testcontainers.org) to get help, share your ideas, and chat with the community. + +## Questions + +GitHub is reserved for bug reports and feature requests; it is not the place for general questions. +If you have a question or an unconfirmed bug, please visit our [Slack workspace](https://testcontainers.slack.com/); +feedback and ideas are always welcome. + +## Code contributions + +If you have a bug fix or new feature that you would like to contribute, please find or open an [issue](https://github.com/testcontainers/testcontainers-go/issues) first. +It's important to talk about what you would like to do, as there may already be someone working on it, +or there may be context to be aware of before implementing the change. + +Next would be to fork the repository and make your changes in a feature branch. **Please do not commit changes to the `main` branch**, +otherwise we won't be able to contribute to your changes directly in the PR. + +### Submitting your changes + +Please just be sure to: + +- follow the style, naming and structure conventions of the rest of the project. +- make commits atomic and easy to merge. +- use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) for the PR title. This will help us to understand the nature of the changes, and to generate the changelog after all the commits in the PR are squashed. + - Please use the `feat!`, `chore!`, `fix!`... types for breaking changes, as these categories are considered as `breaking change` in the changelog. Please use the `!` to denote a breaking change. + - Please use the `security` type for security fixes, as these categories are considered as `security` in the changelog. + - Please use the `feat` type for new features, as these categories are considered as `feature` in the changelog. + - Please use the `fix` type for bug fixes, as these categories are considered as `bug` in the changelog. + - Please use the `docs` type for documentation updates, as these categories are considered as `documentation` in the changelog. + - Please use the `chore` type for housekeeping commits, including `build`, `ci`, `style`, `refactor`, `test`, `perf` and so on, as these categories are considered as `chore` in the changelog. + - Please use the `deps` type for dependency updates, as these categories are considered as `dependencies` in the changelog. + +!!!important +There is a GitHub Actions workflow that will check if your PR title follows the conventional commits convention. If not, it contributes a failed check to your PR. +To know more about the conventions, please refer to the [workflow file](https://github.com/testcontainers/testcontainers-go/blob/main/.github/workflows/conventions.yml). + +- use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) for your commit messages, as it improves the readability of the commit history, and the review process. Please follow the above conventions for the PR title. +- unless necessary, please try to **avoid pushing --force** to the published branch you submitted a PR from, as it makes it harder to review the changes from a given previous state. +- apply format running `make lint-all`. It will run `golangci-lint` for the core and modules with the configuration set in the root directory of the project. Please be aware that the lint stage on CI could fail if this is not done. + - For linting just the modules: `make -C modules lint-modules` + - For linting just the examples: `make -C examples lint-examples` + - For linting just the modulegen: `make -C modulegen lint` +- verify all tests are passing. Build and test the project with `make test-all` to do this. + _ For a given module or example, go to the module or example directory and run `make test`. + _ If you find an `ld warning` message on MacOS, you can ignore it. It is indeed a warning: https://github.com/golang/go/issues/61229 + + > === Errors + > ld: warning: '/private/var/folders/3y/8hbf585d4yl6f8j5yzqx6wz80000gn/T/go-link-2319589277/000018.o' has malformed LC_DYSYMTAB, expected 98 undefined symbols to start at index 1626, found 95 undefined symbols starting at index 1626 + +- when updating the `go.mod` file, please run `make tidy-all` to ensure all modules are updated. + +## Documentation contributions + +The _Testcontainers for Go_ documentation is a static site built with [MkDocs](https://www.mkdocs.org/). +We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme, which offers a number of useful extensions to MkDocs. + +We publish our documentation using Netlify. + +### Adding code snippets + +To include code snippets in the documentation, we use the [codeinclude plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin), which uses the following syntax: + +> <!--codeinclude-->
> [Human readable title for snippet](./relative_path_to_example_code.go) targeting_expression
> [Human readable title for snippet](./relative_path_to_example_code.go) targeting_expression
> <!--/codeinclude-->
+ +Where each title snippet in the same `codeinclude` block would represent a new tab +in the snippet, and each `targeting_expression` would be: + +- `block:someString` or +- `inside_block:someString` + +Please refer to the [codeinclude plugin documentation](https://github.com/rnorth/mkdocs-codeinclude-plugin) for more information. + +### Previewing rendered content + +From the root directory of the repository, you can use the following command to build and serve the documentation locally: + +```shell +make serve-docs +``` + +It will use a Docker container to install the required dependencies and start a local server at `http://localhost:8000`. + +Once finished, you can destroy the container with the following command: + +```shell +make clean-docs +``` + +### PR Preview deployments + +Note that documentation for pull requests will automatically be published by Netlify as 'deploy previews'. +These deployment previews can be accessed via the `deploy/netlify` check that appears for each pull request. + +Please check the GitHub comment Netlify posts on the PR for the URL to the deployment preview. diff --git a/docs/contributing_docs.md b/docs/contributing_docs.md new file mode 100644 index 000000000..343e683ff --- /dev/null +++ b/docs/contributing_docs.md @@ -0,0 +1,109 @@ +# Contributing to documentation + +The Testcontainers for Java documentation is a static site built with [MkDocs](https://www.mkdocs.org/). +We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme, which offers a number of useful extensions to MkDocs. + +In addition we use a [custom plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin) for inclusion of code snippets. + +We publish our documentation using Netlify. + +## Previewing rendered content + +### Using Docker locally + +The root of the project contains a `docker-compose.yml` file. Simply run `docker-compose up` and then access the docs at [http://localhost:8000](http://localhost:8000). + +### Using Python locally + +* Ensure that you have Python 3.8.0 or higher. +* Set up a virtualenv and run `pip install -r requirements.txt` in the `testcontainers-java` root directory. +* Once Python dependencies have been installed, run `mkdocs serve` to start a local auto-updating MkDocs server. + +### PR Preview deployments + +Note that documentation for pull requests will automatically be published by Netlify as 'deploy previews'. +These deployment previews can be accessed via the `deploy/netlify` check that appears for each pull request. + +## Codeincludes + +The Gradle project under `docs/examples` is intended to hold compilable, runnable example code that can be included as +snippets into the documentation at build-time. + +As a result, we can have more confidence that code samples shown in the documentation is valid. + +We use a custom plugin for MkDocs to include snippets into our docs. + +A codeinclude block will resemble a regular markdown link surrounded by a pair of XML comments, e.g.: + + + +
<!--codeinclude-->
+[Human readable title for snippet](./relative_path_to_example_code.java) targeting_expression
+<!--/codeinclude-->
+
+ +Where `targeting_expression` could be: + +* `block:someString` or +* `inside_block:someString` + +If these are provided, the macro will seek out any line containing the token `someString` and grab the next curly brace +delimited block that it finds. `block` will grab the starting line and closing brace, whereas `inside_block` will omit +these. + +e.g., given: +```java + +public class FooService { + + public void doFoo() { + foo.doSomething(); + } + + ... + +``` + +If we use `block:doFoo` as our targeting expression, we will have the following content included into our page: + +```java +public void doFoo() { + foo.doSomething(); +} +``` + +Whereas using `inside_block:doFoo` we would just have the inner content of the method included: + +```java +foo.doSomething(); +``` + +Note that: + +* Any code included will have its indentation reduced +* Every line in the source file will be searched for an instance of the token (e.g. `doFoo`). If more than one line + includes that token, then potentially more than one block could be targeted for inclusion. It is advisable to use a + specific, unique token to avoid unexpected behaviour. + +When we wish to include a section of code that does not naturally appear within braces, we can simply insert our token, +with matching braces, in a comment. +While a little ugly, this has the benefit of working in any context and is easy to understand. +For example: + +```java +public class FooService { + + public void boringMethod() { + doSomethingBoring(); + + // doFoo { + doTheThingThatWeActuallyWantToShow(); + // } + } + + +``` diff --git a/docs/css/extra.css b/docs/css/extra.css new file mode 100644 index 000000000..04eb018a0 --- /dev/null +++ b/docs/css/extra.css @@ -0,0 +1,128 @@ +h1, h2, h3, h4, h5, h6 { + font-family: 'Rubik', sans-serif; +} + +[data-md-color-scheme="testcontainers"] { + --md-primary-fg-color: #00bac2; + --md-accent-fg-color: #361E5B; + --md-typeset-a-color: #0C94AA; + --md-primary-fg-color--dark: #291A3F; + --md-default-fg-color--lightest: #F2F4FE; + --md-footer-fg-color: #361E5B; + --md-footer-fg-color--light: #746C8F; + --md-footer-fg-color--lighter: #C3BEDE; + --md-footer-bg-color: #F7F9FD; + --md-footer-bg-color--dark: #F7F9FD; +} + +.card-grid { + display: grid; + gap: 10px; +} + +.tc-version { + font-size: 1.1em; + text-align: center; + margin: 0; +} + +@media (min-width: 680px) { + .card-grid { + grid-template-columns: repeat(3, 1fr); + } +} + +body .card-grid-item { + display: flex; + align-items: center; + gap: 20px; + border: 1px solid #C3BEDE; + border-radius: 6px; + padding: 16px; + font-weight: 600; + color: #9991B5; + background: #F2F4FE; +} + +body .card-grid-item:hover, +body .card-grid-item:focus { + color: #9991B5; +} + +.card-grid-item[href] { + color: var(--md-primary-fg-color--dark); + background: transparent; +} + +.card-grid-item[href]:hover, +.card-grid-item[href]:focus { + background: #F2F4FE; + color: var(--md-primary-fg-color--dark); +} + +.community-callout-wrapper { + padding: 30px 10px 0 10px; +} + +.community-callout { + color: #F2F4FE; + background: linear-gradient(10.88deg, rgba(102, 56, 242, 0.4) 9.56%, #6638F2 100%), #291A3F; + box-shadow: 0px 20px 45px rgba(#9991B5, 0.75); + border-radius: 10px; + padding: 20px; +} + +.community-callout h2 { + font-size: 1.15em; + margin: 0 0 20px 0; + color: #F2F4FE; + text-align: center; +} + +.community-callout ul { + list-style: none; + padding: 0; + display: flex; + justify-content: space-between; + gap: 10px; + margin-top: 20px; + margin-bottom: 0; +} + +.community-callout a { + transition: opacity 0.2s ease; +} + +.community-callout a:hover { + opacity: 0.5; +} + +.community-callout a img { + height: 1.75em; + width: auto; + aspect-ratio: 1; +} + +@media (min-width: 1220px) { + .community-callout-wrapper { + padding: 40px 0 0; + } + + .community-callout h2 { + font-size: 1.25em; + } + + .community-callout a img { + height: 2em; + } +} + +@media (min-width: 1600px) { + .community-callout h2 { + font-size: 1.15em; + } + + .community-callout a img { + height: 1.75em; + } +} diff --git a/docs/css/tc-header.css b/docs/css/tc-header.css new file mode 100644 index 000000000..de78d636e --- /dev/null +++ b/docs/css/tc-header.css @@ -0,0 +1,389 @@ + +:root { + --color-catskill: #F2F4FE; + --color-catskill-45: rgba(242, 244, 254, 0.45); + --color-mist: #E7EAFB; + --color-fog: #C3C7E6; + --color-smoke: #9991B5; + --color-smoke-75: rgba(153, 145, 181, 0.75); + --color-storm: #746C8F; + --color-topaz: #00BAC2; + --color-pacific: #17A6B2; + --color-teal: #027F9E; + --color-eggplant: #291A3F; + --color-plum: #361E5B; + +} + +#site-header { + color: var(--color-storm); + background: #fff; + font-family: 'Rubik', Arial, Helvetica, sans-serif; + font-size: 12px; + line-height: 1.5; + position: relative; + width: 100%; + z-index: 4; + display: flex; + align-items: center; + justify-content: space-between; + gap: 20px; + padding: 20px; +} + +body.tc-header-active #site-header { + z-index: 5; +} + +#site-header .brand { + display: flex; + justify-content: space-between; + gap: 20px; + width: 100%; +} + +#site-header .logo { + display: flex; +} + +#site-header .logo img, +#site-header .logo svg { + height: 30px; + width: auto; + max-width: 100%; +} + +#site-header #mobile-menu-toggle { + background: none; + border: none; + display: flex; + align-items: center; + gap: 10px; + cursor: pointer; + color: var(--color-eggplant); + padding: 0; + margin: 0; + font-weight: 500; +} + +body.mobile-menu #site-header #mobile-menu-toggle { + color: var(--color-topaz); +} + +#site-header ul { + list-style: none; + padding: 0; + margin: 0; +} + +#site-header nav { + display: none; +} + +#site-header .menu-item { + display: flex; +} + +#site-header .menu-item button, +#site-header .menu-item a { + min-height: 30px; + display: flex; + gap: 6px; + align-items: center; + border: none; + background: none; + cursor: pointer; + padding: 0; + font-weight: 500; + color: var(--color-eggplant); + text-decoration: none; + font-size: 14px; + transition: color 0.2s ease; + white-space: nowrap; +} + +#site-header .menu-item .badge { + color: white; + font-size: 10px; + padding: 2px 6px; + background-color: #0FD5C6; // somehow $topaz is too dark for me. +text-align: center; + text-decoration: none; + display: inline-block; + border-radius: 6px; + &:hover { + + } +} + +#site-header .menu-item button:hover, +#site-header .menu-item a:hover { + color: var(--color-topaz); +} + +#site-header .menu-item button .icon-external, +#site-header .menu-item a .icon-externa { + margin-left: auto; + opacity: .3; + flex-shrink: 0; +} + +#site-header .menu-item button .icon-caret, +#site-header .menu-item a .icon-caret { + opacity: .3; + height: 8px; +} + +#site-header .menu-item button .icon-slack, +#site-header .menu-item a .icon-slack, +#site-header .menu-item button .icon-github, +#site-header .menu-item a .icon-github { + height: 18px; +} + +#site-header .menu-item .menu-dropdown { + flex-direction: column; +} + +body #site-header .menu-item .menu-dropdown { + display: none; +} + +#site-header .menu-item.has-children.active .menu-dropdown { + display: flex; + z-index: 10; +} + +#site-header .menu-dropdown-item + .menu-dropdown-item { + border-top: 1px solid var(--color-mist); +} + +#site-header .menu-dropdown-item a { + display: flex; + gap: 10px; + align-items: center; + padding: 10px 20px; + font-weight: 500; + color: var(--color-eggplant); + text-decoration: none; + transition: + color 0.2s ease, + background 0.2s ease; +} + +#site-header .menu-dropdown-item a .icon-external { + margin-left: auto; + color: var(--color-fog); + flex-shrink: 0; + opacity: 1; +} + +#site-header .menu-dropdown-item a:hover { + background-color: var(--color-catskill-45); +} + +#site-header .menu-dropdown-item a:hover .icon-external { + color: var(--color-topaz); +} + +#site-header .menu-dropdown-item a img { + height: 24px; +} + +.md-header { + background-color: var(--color-catskill); + color: var(--color-eggplant); +} + +.md-header.md-header--shadow { + box-shadow: none; +} + +.md-header__inner.md-grid { + max-width: 100%; + padding: 1.5px 20px; +} + +[dir=ltr] .md-header__title { + margin: 0; +} + +.md-header__topic:first-child { + font-size: 16px; + font-weight: 500; + font-family: 'Rubik', Arial, Helvetica, sans-serif; +} + +.md-header__title.md-header__title--active .md-header__topic, +.md-header__title[data-md-state=active] .md-header__topic { + opacity: 1; + pointer-events: all; + transform: translateX(0); + transition: none; + z-index: 0; +} + +.md-header__topic a { + max-width: 100%; + overflow: hidden; + text-overflow: ellipsis; + transition: color .2s ease; +} + +.md-header__topic a:hover { + color: var(--color-topaz); +} + +div.md-header__source { + width: auto; +} + +div.md-source__repository { + max-width: 100%; +} + +.md-main { + padding: 0 12px; +} + +@media screen and (min-width: 60em) { + form.md-search__form { + background-color: #FBFBFF; + color: var(--color-storm); + } + + form.md-search__form:hover { + background-color: #fff; + } + + .md-search__input + .md-search__icon { + color: var(--color-plum); + } + + .md-search__input::placeholder { + color: var(--color-smoke); + } +} + +@media (min-width: 500px) { + #site-header { + font-size: 16px; + padding: 20px 40px; + } + #site-header .logo img, + #site-header .logo svg { + height: 48px; + } + + #site-header .menu-item button .icon-caret, + #site-header .menu-item a .icon-caret { + height: 10px; + } + + #site-header .menu-item button .icon-slack, + #site-header .menu-item a .icon-slack, + #site-header .menu-item button .icon-github, + #site-header .menu-item a .icon-github { + height: 24px; + } + + .md-header__inner.md-grid { + padding: 5px 40px; + } + + .md-main { + padding: 0 32px; + } +} + +@media (min-width: 1024px) { + #site-header #mobile-menu-toggle { + display: none; + } + + #site-header nav { + display: block; + } + + #site-header .menu { + display: flex; + justify-content: center; + gap: 30px; + } + + #site-header .menu-item { + align-items: center; + position: relative; + } + + #site-header .menu-item button, + #site-header .menu-item a { + min-height: 48px; + gap: 8px; + font-size: 16px; + } + + #site-header .menu-item .menu-dropdown { + position: absolute; + top: 100%; + right: -8px; + border: 1px solid var(--color-mist); + border-radius: 6px; + background: #fff; + box-shadow: 0px 30px 35px var(--color-smoke-75); + min-width: 200px; + } +} + + +@media (max-width: 1023px) { + #site-header { + flex-direction: column; + } + + body.mobile-tc-header-active #site-header { + z-index: 5; + } + + body.mobile-menu #site-header nav { + display: flex; + } + + #site-header nav { + position: absolute; + top: calc(100% - 5px); + width: calc(100% - 80px); + flex-direction: column; + border: 1px solid var(--color-mist); + border-radius: 6px; + background: #fff; + box-shadow: 0px 30px 35px var(--color-smoke-75); + min-width: 200px; + } + + #site-header .menu-item { + flex-direction: column; + } + #site-header .menu-item + .menu-item { + border-top: 1px solid var(--color-mist); + } + + #site-header .menu-item button, + #site-header .menu-item a { + padding: 10px 20px; + } + + #site-header .menu-item.has-children.active .menu-dropdown { + border-top: 1px solid var(--color-mist); + } + + #site-header .menu-dropdown-item a { + padding: 10px 20px 10px 30px; + } +} + +@media (max-width: 499px) { + #site-header nav { + width: calc(100% - 40px); + } +} diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..311a0acaa373f197360673f8ac6bc1b3c38c7e7f GIT binary patch literal 15406 zcmeHO3vg7`8NQ;n1(Fc*fUvtaBoIQ9&1N6>-t|3=PDkqlQE+Rqe*fLv&Aq$#CcA7vI%a3? z+fLGTNLP*M_muNWu@%_#GDI=+_+!jVEj zs6ZVwL5o;9ZfX5OcJBpYQ8^lRD6a$^(s`*i4&=M-XtOmUD*v7A^wGp}AMtb8A#Vv5 zcx_hwOcY9f6c(ksQ1{dNQiblnxs>+)sy}UQ%%QtR52yN}3WY`gCt-BlFb;-KiG* zT?)Vk-ecRCdxei4Uss)kp?PgJ-Fi7}ne7L*7IMgAxvlmaWs~~B#_vWypRxTe8ts$Qn$>}N`F)#76R)YHGPO&%RCR{jJ&-U!;It9!bCtNuboZYO9S#`SHa z*blw?H9hRF8{(_9YL{twz)_V4J6a8TUukrc@5+}UQC@Lfu7_QGkNj!{;T+h^OxWl@ zjPw+mKXxR|y|hBLef>Ac=EIO(ZeSml35c-G`xDUpy}oaLo_Vp0o@g$h!>{$FR`~JN za|cs{r$*I3s{chU0);RB`L!WW*yu4y|5lwY@ z5_NI{^v}{TTq%_OP_OI8`NeebXg`Cj6KQwHza|I1x2(*jd+SQ#7?>7l_pze?^b1Bk zo6$~(K4aLtG`E+2$GGg3|Np=Jb+r!Fk3H?Vr|kdGYgotr(uQ?lWqd7|xQ^9m>hGD- z=RHgJ>)?^!iM*P6_~4Knsk4|Kh6($g?|R`*qlT{KOSAG$KHEGLm};ydpnuy);Vz>U5pRms#iuZG%oCEG@Hr<0b= zb*uYAeLJ4(LZZ?>*5MhENT<7Z>^H9BJ(j*de!lqviPkQ2#r?sWhg>w{JjtkI)C285 z)Y+uVyQ9<9_kd9K1H^W2#oF{=MjVLgjPlaFt7Cg_{WsA4XUHMVu7zE9)pt7I0!7|J z$YVX$(O>In>7UhaqVnkVc7J|4eWz15Z1;E~4&?>bt45ojVK2`G21~AV`pTp(#_J8B z`%Xmkza4b=4D0z!`u)DO*ZY_V;XIJD0?Y@byows1yZ;o^L>H|4d zd6o^%kPm^vsw|GP0rNiC-wACTz(;(BxI=qLl&<5r+Zh7a03%T7E8}+tZ#C8>E0>B zXwS3RbQtlpR>aYscp#VNUsXnvMkdecT0hVs_>dX{HmSI_2j_Rz8|SDZcI6Jp>n)95 z;(bE7L<`54(UvXwYWz`+!vMb)hiiQ#i<;&QqT8=1r}4v**F@Z(s!LJ+BmBT^V+tyV zFzrl!wU7hWj1lzv2K?dYYX6{P#IDfnS|@FOJfHTx+~1hb(&6X*BFD|PEzeQos?&Wg zOPgX^5mEkKxJU||OstpVZUL-k5cAv(yETt%!&aurZd$%Tr2R+CV{&Xin*6o>u6Ez{ za5mi+97FG&J`soHf^S<6S$|+9?8os%oQrhG64T<>FyKNuDSgqc9j5psm5M!eR@o|%R1G#VDPs-nz;lCZW5^Wz4&x@GJxT~^s<*(<7 z^DY|}<-w*n*U;(nSjMU1zpr6+z{He zw_ME+alWUmtsJ=)JNPi3OS8hQm7h-hd}d?si#X4=x6CxJrRJ@oBPjZKQOdeF)%aQ7 zhmrR@@X7#sYJaII?!;V(%16sEimuPY-Yj;$Pm_1z{Z!&tZ5+7UpLHOgR}s%yB;vJt zSI-u1#rR-8uvRm>P8I$`$NN+JbGDANkLl+?HJ;S5u@kvrK5MxqCy9ri#Lx2Pv!452 z%A?&DHm>1H_paJhE5^o#0)tKU6n-9?`(Mt3KO71>$vpRK#h=Ot`&;Itht~i5Ugli6 zn6a%`VNNWM*%bDD7#l5*;LKSD`zg-L<2<~ZI9Hz-+X8+*hn}>5?}(p#tlukZ&z$GJ z`9g_S-IH0~zdIfo`^tsL8J{l^)70o2?#R=d#ZOQsa4a6N<@XRL`o_>EzW+!3>KZR? ze59KA>_c~WQvKc9E4l!1MT*si|5$dw!FP&tfFYoG~ER|2O1Heo<%lx`8`>KGBQ^E(-{b zzI8<&1-X@jma#;{Z>Qo6{?a*a+R>W6E_Up2(gU;HG}Qy#7C9PV|C;lDL7UslF=Z>f zR^{BLj&nq!{5E9pMY1k9uB6bcANy$Cau+csmZ_E9@VAy0C-J$6nHQVqGE{ks$``PK zH^X-Ccu5e>wZfNbIiE2`fA^s8cg=b5(}Z%F7L4=K`iITOIAdSi=%PDr@Y47Z$#xR& z7qWuP?+oTui>cyEr5?xM#};}<;N12`HNOxa)19$Bwu8w|@v90RK_^ zM`QYTc($Mq|9=L&+-sSWG{f#d-qkMMH6~B}f6Zx=xtSicGMq!I0uSww>A%fvipy|) z%sI6{LCr|5-I;Wc + + diff --git a/docs/icons/slack.svg b/docs/icons/slack.svg new file mode 100644 index 000000000..1b371770b --- /dev/null +++ b/docs/icons/slack.svg @@ -0,0 +1,10 @@ + + + + + + diff --git a/docs/icons/stackoverflow.svg b/docs/icons/stackoverflow.svg new file mode 100644 index 000000000..0cf51ec46 --- /dev/null +++ b/docs/icons/stackoverflow.svg @@ -0,0 +1,5 @@ + + + + diff --git a/docs/icons/twitter.svg b/docs/icons/twitter.svg new file mode 100644 index 000000000..a6a902ce7 --- /dev/null +++ b/docs/icons/twitter.svg @@ -0,0 +1,4 @@ + + + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..11593916d --- /dev/null +++ b/docs/index.md @@ -0,0 +1,36 @@ +# Testcontainers for Python + +

Not using Python? Here are other supported languages!

+
+ Java + Go + .NET + Node.js + Python + Rust + Haskell + Ruby +
+ +## About Testcontainers for Python + +_Testcontainers for Python_ is a Python library that supports JUnit tests, providing lightweight, throwaway instances of common databases, Selenium web browsers, or anything else that can run in a Docker container. + +Testcontainers make the following kinds of tests easier: + +- **Data access layer integration tests**: use a containerized instance of a MySQL, PostgreSQL or Oracle database to test your data access layer code for complete compatibility, but without requiring complex setup on developers' machines and safe in the knowledge that your tests will always start with a known DB state. Any other database type that can be containerized can also be used. +- **Application integration tests**: for running your application in a short-lived test mode with dependencies, such as databases, message queues or web servers. + +## Prerequisites + +## License + +See [LICENSE](https://raw.githubusercontent.com/testcontainers/testcontainers-python/main/LICENSE). + +## Attributions + +## Copyright + +Copyright (c) 2015-2021 Richard North and other authors. + +See [AUTHORS](https://raw.githubusercontent.com/testcontainers/testcontainers-python/main/AUTHORS) for contributors. diff --git a/docs/js/tc-header.js b/docs/js/tc-header.js new file mode 100644 index 000000000..7d51ebf6b --- /dev/null +++ b/docs/js/tc-header.js @@ -0,0 +1,45 @@ +const mobileToggle = document.getElementById("mobile-menu-toggle"); +const mobileSubToggle = document.getElementById("mobile-submenu-toggle"); +function toggleMobileMenu() { + document.body.classList.toggle('mobile-menu'); + document.body.classList.toggle("mobile-tc-header-active"); +} +function toggleMobileSubmenu() { + document.body.classList.toggle('mobile-submenu'); +} +if (mobileToggle) + mobileToggle.addEventListener("click", toggleMobileMenu); +if (mobileSubToggle) + mobileSubToggle.addEventListener("click", toggleMobileSubmenu); + +const allParentMenuItems = document.querySelectorAll("#site-header .menu-item.has-children"); +function clearActiveMenuItem() { + document.body.classList.remove("tc-header-active"); + allParentMenuItems.forEach((item) => { + item.classList.remove("active"); + }); +} +function setActiveMenuItem(e) { + clearActiveMenuItem(); + e.currentTarget.closest(".menu-item").classList.add("active"); + document.body.classList.add("tc-header-active"); +} +allParentMenuItems.forEach((item) => { + const trigger = item.querySelector(":scope > a, :scope > button"); + + trigger.addEventListener("click", (e) => { + if (e.currentTarget.closest(".menu-item").classList.contains("active")) { + clearActiveMenuItem(); + } else { + setActiveMenuItem(e); + } + }); + + trigger.addEventListener("mouseenter", (e) => { + setActiveMenuItem(e); + }); + + item.addEventListener("mouseleave", (e) => { + clearActiveMenuItem(); + }); +}); diff --git a/docs/language-logos/dotnet.svg b/docs/language-logos/dotnet.svg new file mode 100644 index 000000000..496753d54 --- /dev/null +++ b/docs/language-logos/dotnet.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/docs/language-logos/go.svg b/docs/language-logos/go.svg new file mode 100644 index 000000000..58ba79abd --- /dev/null +++ b/docs/language-logos/go.svg @@ -0,0 +1,10 @@ + + + + + + + diff --git a/docs/language-logos/haskell.svg b/docs/language-logos/haskell.svg new file mode 100644 index 000000000..eb6de3776 --- /dev/null +++ b/docs/language-logos/haskell.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/docs/language-logos/java.svg b/docs/language-logos/java.svg new file mode 100644 index 000000000..d9080555a --- /dev/null +++ b/docs/language-logos/java.svg @@ -0,0 +1,17 @@ + + + + + + + + + diff --git a/docs/language-logos/nodejs.svg b/docs/language-logos/nodejs.svg new file mode 100644 index 000000000..34af396b0 --- /dev/null +++ b/docs/language-logos/nodejs.svg @@ -0,0 +1,5 @@ + + + diff --git a/docs/language-logos/python.svg b/docs/language-logos/python.svg new file mode 100644 index 000000000..c7ba2353b --- /dev/null +++ b/docs/language-logos/python.svg @@ -0,0 +1,8 @@ + + + + diff --git a/docs/language-logos/ruby.svg b/docs/language-logos/ruby.svg new file mode 100644 index 000000000..05537cedf --- /dev/null +++ b/docs/language-logos/ruby.svg @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/language-logos/rust.svg b/docs/language-logos/rust.svg new file mode 100644 index 000000000..1691f56bb --- /dev/null +++ b/docs/language-logos/rust.svg @@ -0,0 +1,57 @@ + + + diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..88961b3e3f0c0520a48e91b06e353a73fce35594 GIT binary patch literal 67366 zcmbTe2|SeT`!+tdP_h$A!&sA@Y{d|=B}5XHeJ5+#XU38xd&-`%)1s21q$~+#Us@5_ z8ZCqo#w`Eq9_s1&{=UEWeLw%_^XZ|Cxv%@auJb&P^Ei%klFUyS?B2O|CkzJLZDgo- z5(c9O-_qnW(Sk4g8!N5B7iNFMGeI!eE-vT~4J^Nq4+dj+;b~=jkb`2JCa&`B-pov&~+KdqLbkRhdK4dCy>aXkS;b|Bi=xP~$ z!pb?^+gaTOfz%Q@5~=~-z}Gd{Q7F{c=R%N1s3wB?z8c_r=%3{fLe!T8dut+epbHAw zn3@ae`UScQsmsdCILph+3#qEhDyS+rE2_FVI!OyD$}1|$$*ajJsK_WPYA7AjP*xDy z{tp2z80g}raZ>Nt_T|9eG!Y)b!TuU@av>ohvLQ;ceu3_C3hL_Wa`K9Dii$Ge6*56# z7lIu_WiAAX{C)>L*C6LWPyb*~zY9XpJ32b~T@2PlfW>azg|Gjgx4jUw{iMJ%lM8k9 zms60Hhwh5{LKo*huk*hc=tI4@i?f`okE^fih2S9YT7^Ha_4n`#_6zdx`(NJr&yW9g z0q}H9P5)fuzplmC_s=DQg7q(f+t_{}|Mk^DR$>0GawlDb{4NGMyXs#8%M^j`#$Q7> z(A6>6FVM=*&*%4Eng3q0kfO4zypW`+qqFA)=q3*Q^((G=j=`>)2x#AA6y#+TRjm{h zG!)e|6qF9iD{9EgZ@tvi&&AU%?7zKKNnS%q-|k@RZ9_aAXIS*Mnl7Fq^DyQ zTKN50SfSv{iz<;{`)*_o^ce1yF%+S38g7#+VenybsxxBY^KY7DAa*>>w3j&Hq&uaS z%oU1smZ%l$v|_btS@o(ZO+ff2ht;vGhaFBoK{3-A5naz7*ZcTbHQ+1r zCH^0UPFF=OlG@wN?J_x7<4BY3eWD}#LJCJUag#9i&ePM$eB)_cUHk48Oy3SX%OCyo zR6^L|dLZL_v>;>gGcC{K!*P3+m!5Y$5JQKJf5@=utTOL5+Ls59YsZ&HF$MfY&9ev_ zKlm9N%krv&&7zl|zm3>pqPLQ@92;kQdH%qL&V}$Mz3&OQxbDx4OI6m(v7zB7<3&fL z2*Ly{5u_f2VPaofhVe`%!Jj!Y+QN42RD$k_9 zE$Z<|OE3=kO#INdtK$!gDK;g$nxExx;QP}9-q)say&Dh66{87F9H$Tx29R_MttAte zI!~EK?poNDpcd`MxX+$7?O^ae%;2N?+SQw;<@}rU9}dMb7uzJ6pQe@1XVR}z*g@K` zFhpE4zb!+f_r@!Rd@{-WsP7=L_@QE^d4Yw{)M3e*fp*R%R6=E9f)_9ARD;EXq)?kG zJjy(}soL%HJ)_ezof@-W7?Ums^A5n_6$ccIf)TKchp-yDcXnWc`A006pojk1D71&QjdOo|8B;)s@!MGwdOnlfMwGWnIkiL#b$r z3g%u_UdmoO?8lP=u^Ym}Zc;SdvK+ifnsU`RBT;LQC$9NET-t_pq2W!Uns|d%rZ>F3 zY`2EFZuUyDFCi+BSvFUDYfd;1VBh&jPrZ77)Y>B)fxP#GgH-%r$uGjm*j;ji6*j~~ zP^@*9!edcd4|M5CC$FLv@AG}F#E=D#wM#RwPqTWzbj%DJ=pBtq7?5vH*X6_YpWXTF z(mp@U$x<{gAt6E|SbcJwy;{f8F6yNBFntemo{A-Nv47J2b!H&}T~53DO2yr`3`TVJ z%*+~i#*nM$j&2AG;L^SNXfxgENE=%Di2b)d?C)c9>vFrBY&>JIYRLEC16!jSQe9kJ z523={0Bh0mP#HnDR_Br*zjynJE`7;OZq?A!^(M-ty8gT4E&`I-v)uD;T**j$OalFcN5pmA|9`s$pG;KT6zmX^yLZNN#^8`UN5)3>b+{rk^?(`$w-GV2Fn_!pPUzyP z$(L5YPw0%yG7kC1hP;XS<+8{IV2U0PG@C>Jl@ymu zKbOzdB=RZ3Fz~2+U+0vtr#}Ysa;<%FenLU0Bo+Odv2*~dFjW@$^ND7#nmK;aIN$ag z*T(|!U2a;-+wmYxBa|dBpqsCT1WRSCg&XUvF*4(^neZ$6Ym8417A(metgq@-4mp>J zU36}MzqXjNRG8A|h1uk=R8yub6g2$<2(irpUx!Q3c}C*4DJy12PbcJb6l+VAziEww zP2PCAdR}S0NPYmBVcyQh@X;Z8a_S*S^rZ&>+)Pm~wkFiFM?+CROmlrmesX(D!% z`H}ug?2sot^P?-nTurJ5YhN}!u7p;*4HlmqASoDKXow72jnyxNpBsR48A()zSLbNg zNfXRQPnJ;d`QU%Sw}x$I2AlowaB;*RAjW;;4NC|_F$KN)^7SWfB8p=dN&P{F?jDIx zr`oj9Fprz5o__gbC%s>41y=T2(XvY%bsn(qvtHuDwEI{QTi9O74BPZw{A{PcmJSzF zE@vi)AS!tBZJgA*GyP5_nVk(1A(Tih&EGU7nv-dTcuDDe@w>8jdk&)AMkw5udMzn8 z5(k*mTLzFz6Y8ySP0S`Y>F`*KB!2C~;g*G2OAWT=0sBrGPE_0J;=Y@ibH6HIIp=pc zV^#l2S}x9pPTg;U=Ut3hM=N_c6hw=1J|o4U2pP@@?nxP`1URu8jB?lTR8GA_X!lesbA?SEs7SHBRUckC1nSs@3&2;>lVc0=(;26j5p`Nc=cudZ! z8XkLO7yg#?kv;g++7!?G5j*kNa($iL*GLw7q-Yu;dUQRcRkPt6oXH4A(hZ#JgPH$? z%ZJh4fk%_uX@pQoX0#`6r0hS9^`aX`_H$yNh__8iVDRzsM$}|k!;CS;YopHxfnu4w zV%f)u&3yq9>BBR<8?Q1K(!X-tdNj%gj+L1&;bEEu#;}Jz?zM}Y4sk;L2ZEOWsuKax zdo?SiL2@Nwx4CM`qON8vha-M5jxxP^Q8>cJ^5>QD)iuKU-WXSH80o|2j{O7)vdFWu z_{E#Si!92iHD(ul^An#S{ytmP4ux1Er?$~NnJC%A_9Ot6qJw^(4%O}YgoXaK+NTdI z?skE%Tg$2$Vq<4&rRJrbvtA$}g^eX%!y)OuHvde|l9wLJ`8@qtyWpL1Oi?sf%sI4m z3EPw|Zh~#iXNNg#vv>8qpx1WVFh_hHlI?MQ!mP?Mf322gSCUSnNG`8|0`Cf zwMFua#2pnIj)x<*hrbR#8YZ=#rIkDjo%4*`RAa= z@8*1UxkNZue?q9rBnI}HD@GgLc9fEFRO(LVTp9syp)x>HqLz=TRP-p9wo1Ho>1J8D zp5U@E%)?u|FC8i)vS1~PmM0pK4~{DFY*=U>eROG)ntx8ksakN2>AF|~PgGy`V zaA+L9R8^G7C>3pZ8Xy3>?+JkeV1MSi;Eig-D%mOWlw86p`=BcQTcA+fH< zU#?u4Pfwn`NulRb%o#4BC}(jqopTU;vJcBwm~qs2jcv0v+QI$fQqBDa+V`ATlp~6e zFE4n6nK@^@T>NM`I^=k&a(a!~{^1dhP38w)e-@?^FIw+o>bln|y*l<3b1@AR@6N&K zm)DaXTb`OyvaY!be=Mi10zITyet;tZBrp&SN9;FLAET+RqFH`We3lTHudg@64s{g+ z&AIF(17@|G8%WTcPAG{bs(r`Svs^@l_=KTM=jH0p!QwiH~gD zieo~&t(#c^d_!!yt4ow`>mz74dwAN$w;h+P%70R-D}OCPoejfg7qzqTPm)*dOQ$9g zPGLEtMgN;JcBUD7xcJRrM&WT@$#aaRh1ebCo89kUi%uEwzfNC}bG*$RjAph2h($N8 zL8C5_c+nGYmT}tim{EP(Ny>P~R3M?qlAZNC6Jd?v^V$)@TG(OK7Z^@SQ0U=3w(ksh zqZU?}^1EXIINDlFDH$|2r^xtjBY%})QpqGU>|N}8sd{*D0P3|7KtLe*JQD_8$gL-z ziN#4PmOC4fiBV~UlKoBB>auqXX04MSKkgbqO9ZOR3Y8vs5zuxKdG1W{EBh1c*84hz zQ!)oSniI%*o%jbiV}sv!)6B(TOxEtD-VU)2rtIT_4z5bPGPu;4sXB098QK*7jaGnB z5w(pV*-15`?|h73(N#B&oa5c#=*9bL9&>%5lV`%5Gc1j`8ee@T=X*SPb;>G!07)zR zESdN&K6{kyn3^c}P2HztvdKd2S@y5IBOAh>Q_(xuTPMhCi;Vk%D84mjd+m_V|0ABU z@arJOah=J4aqjk9)`6IMr_BrkKZOA8E@Qk+gw%4Bws=;WIO7oYV%752e;+<+&9^$LZy{UAgb&y!*% zJ?*vd?pBxk=>|BlT=`}LCjbC8um+Es^|A?!v(93P4k_AjeAt0msKXqE0#(310DuUa zzQ7qfakk)wriT?{_iiqcK!U-KND%{+t`IK@r={~BJZ$`cCxfu~bB_WZ&K|0U?>9c< z+xkvA_+ZOY9*N#@)qI~>=|eJ3kWQmr@G~q!d}wSiGKMFZlJYx^{1vUbpE+S})$dUQ z`fI1pmLwhKNm;-S)|`0m`tsMqylf%`Pc6OOh|lg3mu+-?ZooM&lg&j; zYfQ5`-llg}VI7rnVWnO_PQ+a{gLO%2a`0LyUQnMWFN1ik_ds=}F7$i9r6&z)jj_qA zW{V(=EdfJebN-{^EnfbUJCQC&+hz*hh-uXd=Z$@xvAb1$^*$_vVRq=BQ`k})vdCtE7^GGRhSdh$1_Y5p8h?N;h=&e5nGR z!7~}xr2c*LuV zZs2j4&@WxEM;RhtI{ENlHXY&vpS}TEb0vzJN8bNPxYb@&WoE}ER%Udotf^8S`_WXCPv zBgh$$&-5ChE`5nk&Ik_9Z<5yr14ddh=Xs$UN2anU55cTzFW2EF^Zg`7F|8LnZ}_7V zuA-ZCSk6r>8WDM7UR+;W+5`L47QL=YFF8IDG4~EvSv&o0*hg~dX6k}p#5R7T=j=^g zv7|zSe9-EH+lS&ujG=6ECI68A5v#g5c;Gbg{Y=7d440~pIvzB{k5RTCB34rle1C08 zDZ@>)Mj(>u9a;`EH`3@+SVdL`(H_&-S(2rzXy`3JdHw{}UE{J2I5KP!O6T{?Zi6J5 z%an|TJx~zs@ST-j%JZg`%!UUKk^ppFuo@(dy8)1NrX=qQ@$VDSCNOwEMXKq^aI!Rv zY1!CESVqGvnast_amP*y5BD5wwmqvovxk{I!=GrhewY;Z46S`;Q)_RZ>bUf~*8SDce=n=7t!-;@fLfubXDFfLw>hZN)f$mH| zye&INNlW>9N-vk?%lwK}Z$v5@+dFYyROH>@Srik$EPjcmIF0NaJ~@SFL{E=YX;Yj; z_DizGEj4+~EDC7gx@U0gUlK$INV=NOcw(m44>Hcx!%9-%3Gz97xJ8(@UDVKf!nKZ) zO_8f7he)a7ut6UH(X8tH%~2IHy<#w{bn`#Fp{dZ6o;tO)b@e$VrZqjHQVPE_N8Iks zLX)0!!8ayXC$BuTec~xuOi_NX>HM{si=(5Mfe7bJDsta2+#cF$?v-7jlLA)ikcS5G&@>mA7m42=&)Kh$TV z$%+d(dbl)uL#bKRtpG#5QNC8fdrnlFn~>0`aK8u<+PJgWfL`9(ta!vRo1K+zDK=X4 z@o5;V+M%J5Ps*Mkb9|TRy2XDSX7T6}wTA}btGCuk6e_7VhAEUuuf^&IqxDJ|3C^6! zQU(7|d;9{KqcRg`Nf}1ka|Lez1LzQPS=F)qcAc$d3FTpqPx#!PtM-wFeXA+!3GJZ6 ziHUH?p%Fd0=KgJMQ&Q{bs&FepIP{KR?k>HYT{WhH)A?10l*cE2K`Bf~K_HWQ{|uKT zFl`$;T+{@5)Az`BrzPfdJ!^qY{nJ-v-i@%{K$<6gO()(t5e`xh0|O!b>7-se>!)`~ zOyj5b5!Muaq6$YbSE>}u`FaabBV9cJpEwWZ9pU=K$-H5!U zvY-xowKK4i+@1GXg8wDHCxV8+$aH<(g@9?cgHJ~JNvD6GN6^L`~PZoVdS8?bRTQcL`|t;H1bTq5Rp0ki52#VHTK zWBG{lHbH`Q*A)o;g#x2d?WA(FbqAb=x))IXZFMe4h>U2G*EFGW^t zvHDOxzxif{5%gfgJ{(ZBF*uY=YB3oyKv|_5*4Vb+4mwIh$dTvd7dosrV3tWprwNbr zFPES(e)M^;nM?2`XYAPOQS)D&3~Qnfzrc_EA`8M2)W)u21mApAE_tR^A+>bVp_)b= znU9eEXz-m8Wq2X4vJlE*)i`p|$K!7|0KaqZgH3x@?WybQ<${V!x~HawEpPwq?0wo^ zr7c4PtFF?))4$;=`*dBrdXH0xbs;Nhc$dZT^dHik->U?5D8jxhE)tFhWgo8b?2v?) zUPPVs97t6|VKKxPV+Z|hy*g9xnbkdPcUPyjc7J0sU2%*PDDdNG`Xu=?R-nH;b$iCY z;EAkMCj5RcUz3PUbKj*3-f+o^v+EI-M*$#Xr{_eI;wKxk{>MgT}#w#J#u@Oo4Uk{_K&%zJ-Ha z#~gra3L7!%21}8t*z8V``a_edA!iL?msfNz6auysyBN#=x{J*s8YI={>EnNs>Pih= z=^k&nm&|*UP}SYXB>?=nXVlv`ape1nw#C}P;MgJ##X@-7uT~W@zh;Shgo53AD_!e8 z`_%3q0kl!lf|8;8a_P)P9tEu4{9&5KfDN7Lb)cP$cwYz|kwiCVfc?srrI*ftQ=*1U zta3L+w7+q4?h?-~i<9vUM&Hd+=sM_ru)*&66R<(Q+hDFOWQKS|0DGiAT$@LLJA`l$ zLPC=r8_^i@lSQ5C-1J=}=D=6?KJyo{unrT9=n0J7ES&)@p1%3=&%+uoH6n9OATb1B zkUy=IWgoN%?v#alkB@ShNV}^qgRGY1UH^)04vOVp?d48gUiDRTb~j#e3*~KW;bN{V zbg4^GZWB+UZfxd@=bw=Dk{Grc#(l+aq9q@@Gj0~b_k2j`-f$Mviq=|sSFstQZ00B4 z`57zpyT~4-ae#!5e~=*CYuNd<2Q4izu7^xJzqMeQZfXB$0bB3gHRY z8T;2(dEOeWtk2DNzUdGi?yuhxhW-tg3u^iUWWXJ z^&L*^-20;H{h7AT*L$xnz{-zOl6DIs24c^pq4*0$WzK$)Z0(E^9NBQR=Hn>e|F(5Z z*54I1ZWjNZD{M-xZ1`EKc7BJ7=>`11+44DjbiWO4hDlsgyE(*qJ}dwPS|FfUAkCAS z+E${NAgKwJx0X-arD%C!pMW1^`1lj3f0^6>`Lu{NF0?7l9>CM zLS*-~4nhT%1LiZ%>&Gu${k63bsq)ohC7WgE1{g3sVztUVg5{|u?%(e-)++COxVsZ31e!IL@;4s z*{*wNR2RUm_vb#IJUs)x{zw3Hj|itGl);MxKpRt@7D}YW6vsP#e>xQRgzLL$VfcQM zLBq<;dmBas^kwbu0YS8m^n^`KT^1B8f$L|YrZirH{TPM3pmlHHu*PvO!PR-7qb&6o z{pqQJ?xiSSzNG>H;g2Ip9eUb;bOWtXZS{k^+Ag`e3CI7H>uyiG584{<4i@1M7*M_1 z<5@XyB{>|8=oH?BJwc#vxNbI1o@S z-3CJ2mqT_ffhp23=@K${9D+>;A4ocsARpo6&k2T$5gp`&zH>PK8k4IbEFy`?fdnJ% z?@I}K#*wrw)b!17{hRZqM#h6gNCUa((L#?&1so*L_=IJFQTz1fwXB8oI|FzdmgM~Xu5|W6AcEN|S5~oA`_j&IlBrpj`;0*2AVN9CM zs`R912N?QE6|=-7zwVN5=3ntJm>OuJomNt=EMT4gmbVHxF=qiYX{U_524oq-JE#ox ziCdi5S}%b98jI0=Nk${DY-Ub3<>WHpJAL&@#TmIo23XQ!dh#%wm}^os|1Thbcg zlSnU=p9;wS^w(J+BClkFqsmxuJt@w%fYVWBQ;-^sYSjP@R@HNWCi_2-mw8Q8gT}Yd z4T{4YD#3=dz%> zuo(l=m**_3pC)u9l1k7aCPjM6b+`%rZwu`MjheF8Bv|kXlNWJk9JKI^7>h^sOT7{g zGp47IKF3UjX}bQD&y4o$nw!UCp~KKcZ!V3*m2QsaGfl@sgudSyeN<88K_%n&pgrX7 z{fnwT!c|`-2D&4&bF91clwD%Tv5a9Hi<)=o^(oh4;5?R^NBkKZ!`Ol|Cqta&qpEdt zIpRRY0AzlWa)>h4|EzChk3{cXPh(i*Ka|E_JrO7mM9C6iO`slvbsWEA9S~l6;$Lr4ItI#yKP+f zw1l61x{6^H##^-YINC4jj(jXgQ?-~H{&N@<1VCV3#k1yoeDH>zq+n4(h2=>r&QXYz zozl&3UR~^>GSa1+owsB(fKs)^`ACCg?dz@{UK-wG!+=jef0X7K3xO@H|5=L;T8cFl z!5%|qKhn^RhCk5xu-S8}&4)5Px`SqXVo!sL>Aus84@=wKsR-^PXNf+ucAGUImCWDL zV?20o_4Y$HlX{7^>Xv2TVE~jzht~x)qz6diF$-)?SXGsAY$iPd&x%R0Ze@>Q%AAnq z?@C?C3PPkj@BhCcDNa<^l@%_IpTODiQlyxiAYmr#5Xz?$-l!kL4(hZI zak!Ofd?KnF6VbSTvKW287e2C4a*)ZEyZTtZ##f1lDyB1)*DeC~bA7+cKLRtc8mz03 zvpqu`Il~AnP%2_aK?d4VOL=#1C&g~n(@2~4jX?&ILZ-3Pl-Rro@|Hol^`34E1%)Nf zBg+znG>MYe6uJx2$ghZ9FQaPMuVJ;<+*i5S2#mL3I|!R5mj_zy=M>Hh;};i}67FOa z@5en3uVVghaH{MejpTx)097;uyq0@5QyEoZ0y{F5FEB_|l55)F?;I|xCSSN%nL^BD zfZaWRogkM)t}T*XjQ=+0|M~_D7u#&skGt5W$t=|WYngY4sNPx?uOK5V&3o@V3*_L= zM)a}&;!7$K=ECTsON46L&h!AC0q2dIqnE&IP#)1jC3I_cXn*Ns%@{TO_Xpx*a_tX zr1-1UAFpC@L4S_YcA&Mv#{*I50g9TBzsc+L}u#y}R!^MdUTjWMUS?t zB}2B)tVm32#RRr(L(CAs=RBy812@jA*>$$f`hanylStsx*@D|tFB%Ar#$K<+W+xDw zrCsUiw1&knBXf%2oMvlHc-Hj2Vlge|R&^xV+waS#sN6mBn*{u$susbcxwT|X8h|p* z!*+Z9Se@>CQ}r@(=6JzPYP(A%udeHLd7lb+3|c%uicwF&v9*3_u#W80D$@nMVp_eB zQ6?3~czd3lE81XpqXU7g{7`ClV=n^v9L8=z;lF^6ab$YSaHr9*q7TP&>`ZmLgyy`- z(Hk!`b*rME`J&=ireSK@e&PdX zwrAsiWt_n;f6u6foq;7*Q!Mc5akgg1L(ATmJ=+&7*jkK!*e=0FiV`+kTN5ieyrax; z^Ko|HUzdZ3^r*cGG~9iV3YQ|@hT8<_oyW#>&474QVk+8LXb3nIdU-cX_64I)xJWM_ z{AALDnEG5_iZVAo-lWI9v0 zn%--6oA7uMkxoJ^C>As4cc^0Dzg4!FQ%bJ}+~rH-KlU+;-Zq_$>-7(hKnr>e6sots z^!RxGR|5bwEmyz&_1bM-1YKGJ+yHKy6G^z@#f`pTtscF+K+U+td6;mkx5VSG?AbFf zo-^gF$NhY7p42~Zvh}IpWQyN{k6->t5nxmqiQj1kx}*+-w0LdeilrDPS2%l9hiiO- z04)HBg#~X4vHMIS_X2xlJ=9=DIGt2*2?IIVCM^=KnTZ6xR|Cr6nhmnD0QrvnVuD`y zgA|ItD_kP-1V-!^d&&YF!4$1KRkVJ%QssHshMvhhS+VR2c3Wkrb@i4SE(Si4y^1v4 z-E07$uS6+B`bKXd{HclEEhrhs2^WwCFk68aeeFe1*;aES8q@4G!^R8aBZR~x_e5~= z9+P-cjJ{w_e~)YJXnDJY39OXFSHyN{XitpYj?7f_qQ0Q5czxkn2aq_bJd(_T!c{1y zRl!*j_fd+bJ>|g|+Zi!T^+##i%s%MWtv>YF%;bDnl;05+*4u$-UPX0@3a!YzYZo0f z0=$vl_e5}WlC^KC^~vtW*#shv%vlc@cHjX8$!{iftAiQs z3moCvc#;1t*lJ&KdMTQdueBJN!QSV7k6wTx?8rxW`uc{y+1h?s+k91QlrXal+dg-n zsMn7P4<8HzW2q;V)Io1jun%^GXX3Sa38Vm5WydBHH1sJJ2b`szqDPb8yfL&1HE^IHR(=3%6U4yqZbs*nrvo`G)Ark$w0V){lo~zc$aD$v-g_ObCuXC} zCFq0=MsRJwFp1z^3*Bw6LaGIuwa`@S_@0HfLHWYm-u$?U?mQ?Ws9xaz$rV=?BY{Z} zpYxww@gbYfy&waFOnggSI&nX3nhs^5I%B|52jzc2w1T3MxyynRJ#!H@Pj>=Izblp< zFR>6s*Xik<2J42HIFD}j>Cn385Ar0}zoigcX)(30FX7i#!|;o8yiB*T7k*@!+n?^b z#4H0=~dy z@A;tXFsFE%g@h(f%)@^+0$5T10}bhiitxP!3GzAHT-?}&jjK3*7-t>}4NQm$&(^$K z^L5TM9`+;e-`x;A-8~`9Axy*A7F^7P4{71I{FXZqSy~Z{&a-%F_FHfLQnin{#S3YZ z4HISYG>$V&AG>gIZ!9?y8#n}v`@pfcPfRN)nV9HC^|LJNIu8I8oymX^C4`*44RF^i zcm1&w-#9r-H+9$5{O<1;vgk9rMzl<@$g(|gK>k0hl405F(vc9_Xdm{bh8kWdh}mw= zIC5`nQ(!%Wd&=O;<(okW$S{3#{hwGnSWuA;D$=p<2_8H`^=54y$W#o+3FPNu9 zhJ!a&@SD9Wc1|E8#%o~20;^T8r9ZWj`uGo%xy9=0z5Gn|R*XXp+I>-)c7dU5f43pN z%en_}|Asr%U?bswGGJbmh9o|}#P=2b*w5p|Cpf#*JQsc`hwmCOcAN|5zmKuwZB@!W zvxb-jtOXTiTGO@w4g|8Yt-9>Bf;RWq2VnK(#I*h?wUAKfCbhgxA8s?bdM_aMM%RVK zb&e5;b)QZ==UV9dw97yDa|tK*1a6oioyoa~Fe^g#k4AS2_c&udq*4aUqlT(2LC^oY z6a5SD)Qe$UEU31*DZHMhVp3=^XJYVnM5V8k$$@bkpqO4+zeOyxwzsAWW$Dxlx zS?xzL$-p=V&@j@0jf7}&3oM*+mF|3IF)rY|Mn^1QGIPDZ8&uISGMVgd>MK+?>r5cvZm_pgU#tignsRJm2J-UeId*|6LeEOpIv7l%Z+EkaUvuN+tGnTZULNeD^WYUe%%3f z6{7);&qNr_74nUr3~PsU=L6gmku3)U&0x8-I%(5tzNT5?%Ki7REyyX|THo&aw^JAZ zvYT8On;Kx9d?$nci-h~fo_ItRG6;jAD8#xzG7y#4<<1LkYFt)FTH$sCOwolh8Imo+ z({&BkF>Sj{AM*#Ag4NzF%$}aHslo659*m(#EhvJ$r~J_n~8`6LBAvs zcy^6jv)^3rUALx+rdM-~jn?|rLAU@v%yv3lqU)LAwI7sf65A+Ybqrp3?x2-^dJ-Ea zTN{jKs-d^}iZ-9h(jx%Y5S$vma{>pKHjcfew&O?7F}HXixc`kp0EZm_+_;=Px}5*w zPOY=xz>0Fb4os3zUBth(%}9txb2e$~k|ibhO#9d77f^{un51b*t!SKFqDf?Yga+&3 z_5?CLV2Or1-&TpEANu4|J3tQ(@U-IeHxo(JnUQNs5cbqJgUum|Qe^&&lD)2Nci$cu zIki5$_s&nZ)tbH?H9zO9X%I~KHD&)z!g)qsLIi?yil~>fJB`lyn_W6l%zElb6nqg0TD=*ID&AbcupF?3_~?x262KU97=7k~Y|bx4 zG2HR;|M>B#8pU6D0>I0vQ9#WCi8ZIz6(EVO4kR6c)Gt7>u@9kVBRvz?Wjdf(8`hCq znKqN2tpjMbE7u{h@;~Kdt7`haG`su`*FP&NCfAD?xKD!!1PL*_sDu0VKL;zIQz$I^dRYZglQM6+|{}WCdY;t zyKnueWAnQmuZ{trGzWSZmIDlsj2;2S{ym=#a4YV2+#-k>fIAr$vG$hTCbPz+4P1_c zWN>J+WWPTO_a4`3kA1fo0<-=qe`9{RCE#PexS3Cp)Ks$5u|*Ri-)xEBQV% zb%?wjsAd+r$>t^rjdp_K2_V7n*hPI55Qb5b21)&62xKW0o+lF>(tznm>H+*K>n_r> zPg=DJ8_g5K#hZ-Z=p7cAQxMLFZ7Wr+lKQs{c%>g4bmwF>KL1edd-`+PlExvTpFBaF zs-QHkJ&7jkCY6cKN&Jfa)2pT4fR*q5&>nd0eg$xHUR2x>HtREc>A&-rZwXMN3|z`j zN!oL%%?GqM=wU52#6_v{L@d1lN)V3EV#52ySP1qGkLHUf_og-ddAyu;pXn_t)1P_1 zVmN(1zO<|~TIxluBQip%$14}Y<#izy*zF>8;}e0vRsnFcK5ESR7j*UJ z3AqocxlA!FHoU9I56s07&3fy z7bgx}xcAC}9HrNtrk_KlMAVD}x9D}L!tM(167p(Y%jdh&8+eQFOHipn~>e+|6wTSK!NKf8q5+LB~Vm z_Fk98oejA+mMo6yGOWKgCv%ve4xpn)fRV8N3$`%*@%|^(s@&7e3qE#<2Ja(&O2RS z@5xwL%wFey-(Bjv_<>TKl2*XC zsfWQ7@{5E;+vbJAPer41a%YtftSZeHp}t@DBM)bdhW|>C$lAZW9OYvS3)~u92b<7h zL=}1W-Up;TJ2=rx(7@m5%x_fQU%QMnRT7d1e(sF@3mJQ z_E+W~9gN&^7;KFt`E-rxOGHj(Cz};y?R`G6xm@}5?Y(cv$oElCJ%(mRzsMD;xE;6E zL=u11QC6qF>>#im()KiY?s^rHdY^K^B!K-QToAM@3SL0Pf!I~ZH2WS{t)jr3H`AMs zsM|_WGdYSbQp?_RM7?8~bsf*Te=Pb@-Z_q}&w(-DlP~11|9LXys`T=%g!cu0=ORDP zR$VcTA6-6o7}@Nlzc#>*_Iy>SO zGT^VzOrH^(I>0X_F%GvYnTO0<$MC=Hb~RwJeyf0H1bjk<+3}e>0gLosZvwTX#!T>? z@}nu@4Al^A#quasaP?Qg+<5Q`10rJZt&%eS-EC{>iAPE$W)VRv_C-o-jahExr=GjH zuCX&V`NhgK8k;o74{u$+^CC7mE6H$3%GFlbwuosFfB1 z9QEZ4#X(!03U$=EFlspdA_Q=Zb=(h%yb$s@8;=Y z^AZ@C=?OrO9JQ5D!H(gA(YLuuTIpLZ4gpD!<4Xca36JOJc5xd=lAXR~%RRr}Q3aAi z_u(qita{M!eil9)5^s+@w-W1z6nJJHLzNf$=!?CZMaqtgO#cK(g|i-XrFTA-4wJ%Y z+`8trCD?RM{;5NwKN2$OT^@uE$$(Lb*V*-Y9u~A2_pb|&Q!2&woCWtz8*$d|x%U@A zxg*V}iBAvdNN+!lMGku^@v!dj`8MDD4U3H=ES-kEz6xXSJ^E;4<9Db+GZp`10BO51 zP+WIxr^!33Ior+YX7Kj6NC)H5l^v&ms%v65Rp;)3OP0G6_OLIXm9lgCvl8Sb^Sl>M6OLzoAI!bcq#?t$Fi1njE}ag>L5}Q0G@b}+u*dXA{uiw(zxQZB8a(SqS263yh@Rk`_M5G49p;uBhE$B!pV zU_W0h%2N6_-?HH0JM|#DxL8}DIP5wY8RRgdj#Z|D=FZ9DvT7{YNHG3ERYX7r)iVQ@ zrBUGCpnm$W(U?k&a$@f4 zB?_l2X0P|8Ei4A2^nR7rfFQYdx<+1iEywRiZ?DN*VHmh-mEhx>-tW%Yhkbo=YTW3? zP#Fb~h$ZFBuzANIASwKx)Kz|f6k=;JM`9Q-t@P}t3M?jvXV$==j#T_7)O1rp_Ss_X zwPtTpV}$*uYhKr0L1DRFE?1U|P*ZRGB4>>j9blu}oET_M)9#_3&Ho*({ATij2^oC; zkFiI!14{mQo7K601(4HGVlk8-*d{1~KMlN<2EZ8>7f-%U*r%gd0K_yWA(TV{r z3*`xZW70tzMfD#5(C`kGP<#A^H`OgW)!xoF$OQ%gwguJpImWz|8c16JAJ+2WLcO~z zjXLS?=u!Ts2=4qI;Jb162G;&j8z;DE5WkKm8_0h@D6MNnq{W1{3FXk_jx+6jNIr+S zsH`bGLic|F6%QK_IG3n_BP_@1mjBN|Dt4;&{a)QLp}bgGAjdwstD&f*1f=@8!Si?h zQ98DNl{>-5Zl~5%pA$wZ@1BFrf$k7GO)!ZAV%wwCCz0AE<-$@;72D%EYmZ7iDj@#$%*wVe)@L``idJ zv+8(c^wFe#d!O;;mwVoF$1KQA+(mkJTvd0k)Z&x($02jHD6xw&U*w=6AE;tVZ-e|9 z;*8A{uoULhUI&zBY?2W|$K4t7xgm{5stSF~o~Iw8Q4fHU6Q4=k95ks>VjIM){c-?A zte-Bdxgdgnj?GE|T|B(+@BbVw8>V0?R>;&6n(YrxSP5}c-yi4mAKnU$YB`9=td#mtn)U4@J zbr3-F0b=95uwuMkCLDawh|6o}!(haGQbmP=cEP}>FM-bm0#d776LAFKxFfC~ZEB4w zp9E7t=)MP^;4o7UaHUxqCg20wmk<2Nbny&o=jgpbot;v#Z6U4#KU27fNkMC z8BQTL6=U1kKFvXSqWIv{Qq&#CoKit)Io|^f85bfUgIF$P8ldLlt}lSsjMe)omjIWZ zBK{OvHU@=y4(K zyG2s6bhe=UM1jdoy}H);vA?TaB@0OT+v(6sk_Q-s>qEW_+Gzt_~~`+c70kKc9GRY~!F zzm{{(eeU}{SK0xZ_~Su8#p(w8V8SaN2uNg#fwK2;fGWhZ7d9rZw93B;{#`x~H(6Cc_VEB{Q}4daI}B=oc@ISEUh{v^ z4rqC{3ma;P61_=*6I#*(M}pkEdpR3Xl$s%>g1+tljW~j^4wIbHNQUD0IAE249vVm_ zvbieWoGm@~KUm}aYcLbR=vgj9FD3@r0byiw0TKZaXg_db-Ap~6@0*|(?XGUX{SCkB z-}gkPKAUw{T6`25!i1s?g+a&5gV0roPHsjpFQUHF5lWBCr2OroC4MHj=$-hF3;z)` zim99>m$$TpfgT?O8kou5W}Uk_UHd))wXgTZM+_R4)dul@>%XwcfAwGf678Hzg6~~& zr{_YB(#flgs}Ql*jG8D{JUSY8pfd^LrLG7dz%_$T$Bo)Oc;rHOa{{BVd206J@js&h zhVxN3)58mr{s)7B*cZt4r4?%F-PPia0Pa)Y%VNYd-}+4o2_86jq`>r%nbcxe%3THX zX{+xe^xfs$^Z=QFCKI6oE|vPTDS*%|ChFq|ni)Y*AWVJ-%xZ2T)&2r+Jj6_JL8xVQ zaz0?QReK?F!Q=iS%+9L#$zSF56WK@MRi&RM5rg$9ZjCA~664@5Y$P-%YXkEHZTYnT^3bZDSuK0iPqN)*qCp3liiS~m_b;Pc&FmcH`L?|m}noD=)|+SyC#g0RSx!VVlkf0V$~Bq|m5 zO`nnUXKeUubbhJ*Y5Ro=3cB7Rznn3GTpxHRh*EFu-o8Gt&kQAB^e8%a{O&0|B%n!w zM@N?Q7vICNy%K?JjeLM+$3X_@;?dB&##gxv>FLS?hX7U0D?_|nMmfBES<>5bwa}8~ ztxraj==;_&1Zg`f%^n7Q<|#YO%IRFG(c^%QV-g2pXGANw2u%yY1*wV2_EnaS-a_Zk zbE9}+OmBYnSt6GU4Rdn+>PP<7{__nI?q83PlU>05T8CVXpSEc?nBcfh#cAm&Rn`<^ zgG(7F?>ql3H}9`}k+h__K8ri5emVMDFN31q2nq61+B`ciZ_A}ZKd(?e<3k<+78D|S z;+|$z;pr{E@LZ*}e3_YSe8>Q<6>-lrb4!(DfvuBSiYxguGI;k5*W;B-9VZzQlc*Wm z_lS}CjLjkGYhEKHZ@l}2&?w4jRJOWxMWV{BaW}OFoS4|t-u-R4E@Kr6eiF1I>%&AI zb#%L2l@V~K?#&gIe||!ivovqD!?tTg7tk4WhC8NW zK@%D7MgN-QP7UXzcT4xC$=i>o$O|7~GyZ3d#8^=u;JrtQb^%ckZ3w8Bp!5G3T-1Pv z&QSaT9io-L%5Yrb(*Jo|j#wkX{P#+;8oSjpP}J)8r4S94hJ5>a0Yw%vDJ>*C)uz2Q zO)Lw@`asaF6%xd>yZPja$Db>^HOLJD!QsX5{d1Hj41dH5b;Ld4S7#&o&Ku=l0z5;t zCe&$06>lRe`S^ZtWc+`8xlpwA*!zG&2dC9%Y*9Ls-X9Ta`y3zWA{9K2@2&gp5Bu!4 zcs%_d&%By=b0KK(2a(ZU-nR>?^1n;IqpNevjKL@R+oRDYoc**T=@zpYVD+(q8hprBXakiwxdzw^7Cj#@hAY=b@RV~D(L!-A-v9z+IQU#nhITvven(>G z*wdIwZZK$TV24LS{eEd?!qEHMuW`O}^x?ZX)C3W5r3aT6TPANvxZK|tEo)0Gyj!G- zU^L`GLHNM6ACLeb(hTh{pOB5FP`v$gxW@y(^3<1(S0&Yjy}S2Li8E>bxqNiOvau%n zmcpK(6ZGV;ZPv^6sk46?7EQJoF{ln?8V5S@r77l4{*Kz_WaS_KPp9$k4H;7^%t5o; z44GgWwX$y3688of%76VtTf}UJyT(doTI@UiC(avx0>8+gyxQzDcX4g<6A;RSGDpUi zff3lXiB~2q6p3!{z}J8|2ym#_|36s&KS<4Wmep1*OG2{E=MMF})r~*~?5fTpQ1FuG zqHwhqVdc~SxB3P)(~4E_Ed`%0(XG7x+n{A~9&Apm}(L{(R%||KeuP zUc^hr2?BQ%H779%*N1*p4!02{e_s!~e6}A~NooV=x4t!aXm8s>>@SgDa(CuCJ*enL zf7(77GV1I;E_-!uZ8Sk(BEubgJMaIR?%bc@9vOf2uTKDIV%nhH!%&@@t+9DBJ+wDU zkI>38e8ksxnriXa55?t9kW(-Xq440`MpJMEoDJ+C=ZfD)87okZ{Zpb0yU%{+_P;{V zi*CZWN*Dp;!7`f(C=9stH{}>M1Hd9sUuA)OQdcKS(CDZbbMhz5Uw4nV!;2asI0y9m z09W#s%{v7@TWd^H{B*(Ha#68F-oE&9W8vU$`g!#4N&QsaOmO5*nj_(+ zX5{ait&Jh~^X{Pp6!ud?dT2~(snDVmz)XU{1^WL1!2W9?sBz-{0$^8^a0!yF&Ol@u z&u06f^}LN}>?^is$Io2)yZYJNY{P2!gl1Ph8N5|6xfSC1z}1ymwWpDK*Irmz*c9Z* zr3ak-)o=W7t~wJ>pmZtlJPK^eTW_!243P7}66>IJY}kgQ?dYvP>@1*Sl{;#UT5mbmT|E^e_$eT&Is(FGr+a2J50#>iDrNv_n+8vDZ=l}a~f!} zUNWnt+`9sZ!od3U$Gx<=ixtMg5HBN!DV1=!U4FSh-bg@Bm7wqD&pGgY-oK3dBl)Mi zm+xf95S33jc(^JaRzqygGaQpnikl1CzyHX@PV9Yt%TX~wAvHQE9%IBFtoLFAe9c75 zN;x1<#E5?eg=|@9ispwYF^p0yETm<#2qz{D2-9ffv-tvUg+6g;-NHDEurM11!0T4X%D>z0gX<**#qe}|s|2Ki*g#-cv6j6Y z5k2yCX7iCOt>4GW3%<&}KD!590hh-iHsA4g03mHRv;WWqay!_!kr#p~8h4guVR+*05(FWWN$#q@wl zR}*)v-<=mO&N362uYhFB(0IFnxFwTt_`-#VyB=XBtih5?oImk;P0>vZ2cxiXnhRRa z#|UZG2{!mTbBlkkx-syXSsQhZ+lZT|;afJk6z2}m=+jz?S~}+V>-_&EZMeCWYwK^# zB@6g>?{#7rTG01D>kQ%vYK?x3o;4JR@m?Jg{=ih3yy1TBpBVpBa@#=>(ddb%+J?Qq zhlev;xXDLkzc&Tjv@7=LO>(nW?+>P(d1sw{aqE{%r?y#Sh{e|B`epW?Tz3e-^5agj z$)oaDJ0SqGTByBtW&&VEOIF4z@gy0?J33rxai`xUFo`MtwKOex0xg9S&W=gF6#y|b z#3q^ZO;HpYrs#%z&bfaL`gCu1Q5V0MlB1SrWB97%i`tjr%RY{3Aw4Y7W)XTo^IOA} zAtcPO78lmNQuG(Aiy$$&K-IGEuS!3seGRMNsfvha5(6zkZQwE#^UcASX9-(vBU9!; z`9iB&c-Iil;d^!JU%2Z5s?2N+`migdTo=%!WYFEj>DzWmkmgTPn;{pfayQgY3Vs;- z>s~#Qe#BxAclgYpjVb;4V}K86eL7G21LWB<{J0QvriXKEA;7g@@YOjBrJ$x{mhTMA zT`x~9(V~!bU9+LebKn`XYqvlk=ZP7s@9#gAI222J^>qmB%)$IhKLVcpLz|>9s2vyS zpo0xVTMbS*D9r2aUGaS$x;pwd^)P)INcYh-Zh1m~Q4drCZ6&?Pu(AzZ{L5!5u>
    H0778C$jsSfcBbF}P>%pNoEIDJ+;6nqsIFpQdE3{aAO*OJtx-dNk1lt} zfCEmlY#GaDIoa|$cD$!x@w^+q{!j*{`H}4|~Za`G57V^01b2jce59s+AA0-M}{P#VzZ=_I* zi&_zSqtHt?EjvU@fs7sR2&qMJu}hD%#*N(r35pOS#?Ls(u-YQxeg$_C&jM^IK*%Ce zgh}qn|HusT`l)s~4$cY&#jY?!7bBNnQ}`p5iq=A((|C+QI9PpAN&h4^$WpTP0gw+2 z3^D?J4;3Jo{8I}%XpyBl=N@ZI-2@KrWg3}{ z<0tV~^^bBL?ojO{vwUg2wCg)@5ov~FcVTwcfzGcbu+Y{q$y}gYk&R!q2 zBzxM!G-`oE z0zPgNe{9k7$uwrAy8RCtDhhp#D=3{mMeD^9rM(y4u&>*g@^mF}>DM{6_&`i*5a^Cw zbl3T2M{YDvSNgz>JlCcID=8{lSe*rWaMu(w+uN|9tG5!B(!=TZeB`}w&Zvl2SMsE! zt2Me{Szx_grrbP$v_Fu%U(O5f>Ic`;!+aD6DvvvyR0q9P6F<_{pn@s*-DW72EnE(_ zW=`AQJ0XF6HCXV&J85E6(qW9?C{S3gpz!0}k<=>s)b!zoz`(sj;z(}|eEQAjf~7!S zoiSLyH214qp*>1qqU^~J+Xs#Gsm-u;axMOdIHtZl(2_7a6?T3+e=c0g15hLA*T@1{ zB1|?h4Bp||<*m_+;JzdOc(>y2y!1 zje7;JF;6`fw_Kl@@>*VPf%n(hOc_Ukox1d6Qh?f#nn85%w(uV9YFq4mViV7&|9a8z zwE|nbQk~ths&k<44`c1~eZylOBN%g;ojLYWc*)*--yZT$g~c{=>I!E@4c2&7sCka% z>F&&+#KWfp!Uy$YN+;`_ry@R>VadC-ljeU^u>-c_+6x(8-EFz%Qm|heFK!ru`fQ|| zHCW_9!yaL!kW+bAs)$G0>zBU>Z6bW~xAJ~^EPR@b3Y_wlAiHCHrX8{H9lr9rYbpws z@>n&<67&>;QZv%HGqs~s7rSCr6~(ukhVttyNEEv9Y<<=^gS?gB`?ij>9-cwpQs-`vhfbVc zzl}cOSsyjn;Jev>SP8FOUK&{Z+W>&JeEG8c-<(g80(t41bIAAA#(oUTJ5C%hzIQ)L zf|YI4e=L(*bVU;muf4yTYL*YZ;X^56Hq{P20u56lr-pvvv{oae7!6v^Kp{<@=6I`z&K>@sKomXcqTm{b6D)rOh`;s@?C zhivsGf2Z{M2cZ|~*qNOkgBy?rr}e608XDWczb|!xzes*{5E=CR#n?;PtJf$Cob*3P z(U&wR^^<>EtPQU|`+Adz92}2IKj?Pga|C7Nkrzn=qg?G76FD(y&Ws_pC9`E52@ZUb z={~W|cFVS(^;C2yy}kgw}y zf63Bgn=ECSNXY3Qa^i`6xaMTH#a!{t=&K~9I_|*jwt1Zn{*i%9ZeP<>rBjzHE;{Xmt>~^rMo;t2& z=0fhr)@}sMwb4Q(!&{oeEWWLWvE27ns@f~k7}@td%kZ702l1ie1}BgqubW2|+8ttz z&xvwxSSP9fYMC9QI?G}zUCjL)k4^M;7yh;L(YgX0O_)e^Zzq|K7QZ}Sfo|lA`CHy= zktAxBd!o9aZ5uwHEmo7p#^k;HK*dIS|0eH7`@5!ecpK2jbQhiK<;n>qiyE6tuIj>aJ_ z(Q-xNr{x;(Vh%wIt5Nt1ZY|nX_(tLz)VIfv$MhL#|3eWk3Y8Nv0#hq3`_*39dOinl zF25*^HMz>w22SRR-D2uyzXEi$uYpbSd^bKt_W8q!Yz+R&IFM5mp3T<9bt+ z;1mpnEUiWTR`aNOD$Z=fX;6%(zid8MVs*pn94gimFLFkHa&^$ zc+F=BBP$EW+ta)td5Or>R?By?nahhKd?aT5afyNTFE009!~C@)?=!nTq7K!+yHjx! z*?;Wyk1$Y1Nh5v_bB~eHw>kQc2)@{GZQt9Fn0jnkAxy9NkcqlA#Vwb2fDGR0`IloF z!^-r$He+*T=}}H`Qg6o$r95YHzKH3|Sq*ft)D5-XWXb8GUi#zHViuX^WmBKs5gVLi z?G@*dQ2VrN(<#VZHoh@e=-_stSXuSewcuh=Ao*&biZ1LvO8 z^!?7sujL=24#|FGjamB$qtTpF@Rw zzKwUN*?zgVZt`|5)oXlpaO}uC9;LQ@&KmEU_so0t%-c`RmK1-}Vo^5i!%SFe8EHcf zJn|+M90D7H@%}*OcCu2Pu{ASjXApPwYq}Nvn>8lA1%EysbUDQ<3Er6PYubeD>SuPO zkIg{ir%uYR@uIr@ufE(@%m2C#H|}CzVdX5x9eqfXvQ{O!C}Tgy6d zYqEv2S!Z?3JETXSCHbOVvDcAz7yW+RtK~m)$z6>lSzzCwyeWN~b1|Dw-0ii!PU9}; z!fn^DH<1G2aIoYj7a}%9E~+B=jwib$JFoO*knhR-PQTs5IiPh_UD)^IMG^gAzJZ^_ zV~6xZX#r1DLS4UQ=K+|!N2?tG9jy&a!2{H~YJqY}iMze-3i&8JqsbMMc)>sC>keTL zMtybiHRqut5`p%e6WYd@2Cv;RhpL}CQ?IKVZ(KcIMVt2R<&{DP_#RQj5q{Jmt4JFe zg%(vo+5@B3V{YG83X*bdRqBsbC#vA&ps0(U$2Y2+#pS6=#_4?0eD{>MMO{E1+ekr5 zcv|v@(GX;U%;KrTg^yKT+be@8Up-%jkT?&)_L^5QwB3V@pQxa{6Hzl6e|XXn6twjU z#bgqk*+!z6gNR!FUcVTfAihN;(?Y_b<|EW@jS<97e`QqB>N}<7&Whp&dKUN;3g=O+ zzNBlUv?w9(0&lE9J+gbDF5Sd;q|7_YC7N^{dL6ld+v+m7bB#}Zsd(RxT8hSy~^MmY5G}ol<~F1dPbsUi}}$bMqVZg~-2> zT#jQ}<4+udcl5VkykkU|GR{JhhdwreqT|^{%8=Nq;doBWReD(U^a=d3sTL*FgAiSq%7G{3H1AVdJz9is*caq|wSzrO%r2Vi?kU-}D5R zi>_w716AKluF>D*#Z)RQDM29LVLWGw;J9^9Xn>R9(~CUysM&(p3#~v;%U~Vn9x7KGFGN)DTn`wbm_F(Z8qOkAu^GB~ixCZp6oOpw27{@L0m) zS9~Ew4O2#s3wzz^q)>P2ia)7muxt}|iN5{*b!@vu-O_JMWpd;|P^taeB=2|cgLjDw z?v`t#+(tvTXt9x0k#RTr!Z~Mb08ahEfO4(>uM-X!zsCdc5XV_F_kh7G)rnDKcnwwIt!{fj45a-K;kW8(ywLVH+S zAoT6P0I*K_x>X=`{>rjqqq!!HI13~D^BOyNT0JE=<0**Qg^3Kh5myvF%(=&u)c5** zH<_3fnc+)!@n)gc@u2Y7d8no zuFCRJ6|~Ih@OalqenP)*m-f9?XTkuTxUJWdy$8gqjdf@h3;+~1kph&aK`;x-L+-XSE##l2U_9$d&LS}QcSZS{s3Lli= z712EeDXLkjz|tr+ed4o;@y_C0Z&t>}@DU0K?ypLmllV;J6N0oQ@=8rA400t=$!<7; zyVhdW5KX`axc0Ww=H)U2c_HS->&xJe%&0&j$ajnguONG+O>SnRmGHJ7>&G5%@3U(A zA%H2#jjjB!Sd3`s3s2Wy2S{A(xgoA*e@ND+z)o0zsk& zu0vJ>_X@D1JFhKGoGQ7{J%bmSGt z%fbflcheu8i8)Ij-yYHc#tqd-!sKANy-uRH0hhoRPXn{XxvHsU zE68hZC~mb1S>w(cNu%zC2By>I1(0zCa|57L5nw*g9(Iw0H|yPajv@@t!ibciUq(re z+-AADE;~@p0@};9ilvP2)yQ2%NJZ4mzD@A{F|?tBzbxxNq*RGWKq#gv94KcySb?7% zn^_Mm%R#j_ofE#;pFv%Qo)=B(PH(q!A?xj^o6(zE@Iswo(a8#e7HK6ANbqWp!i@@u z=5lR%Lx`_ZtZ8dwn)q7}q%$fNgRj7xgLq#vE4l4yBO1JNyjdkeHYnMRUu{j6&EE48& z55j6Uyr*s8|KP0lEJN!I2gO`Z3T~@#BJjNpe{&VsznnDm?E4#Uj*>85v@QwS!VUBH zn9IOUhpjXC-|#Jqay8AZ&S2nGJbPO!s2>a~(r~=Ib!j^MtIShw_}_@Loz2B(N(uX% z76>tEh!6ZIA)92^>!v#_O)t1xWvY(7+q7fHE0O%HUCh`^5EDkx+%=L_HSI8MkydB( zhX#3Y7sV${0u->e>IYZR_`(ZE4CgQ3Ly)P6q2jGDLX`(IVy(&jR{W8%S_^cG2R?*n zuZ5W|s(hf%YB1d{LKLk9U;RqW0s8 zEha4A1Y|qu<;vJ1BYM9qEFInV@^?3w;%LUM)pdb-N7J=ewTDxvGIB6>$>=71`&tAx zq=P4|m6x-o^75zf)u*+uUPl>Cw&uuP^}6`5x6AN?a;c)pc`w&VvEd+buPW}dC>^IOa5&4Db)q?nSS9Ev&P3wYs&(z0CP=8 z1d*5GLvs%TH+{9`&SXR%DP2j5Qwq~3s{af3`%#H-8^>NQS83 zEpBG!F2VQTfQ{3~7a-Km)GRu{`po@?7OIPOBjl(=?vDRZ?QHgu*tejPJqKVH0H`* zy|w|JyU6nJCURP(D_TdO_rq!n|FqKb= z&Fn5AUOoBKuKX;s#}(aI?oNFfs~@yNtM??u%5#0MC{hVkv!?cXSO7xGOyP2!l6|S8 zRWlzF4K!27P86@1Tpa87O`*^0EYMZv<{-3kVMw2{>CrTvG!I3^Djs7ljG#GYy)pXX zKB6nNs$n9KEmY(Ab8%+K$3RRU$u)4q9a9&y+LKU)J)y|0!bAmFh23pFh8v5}AXUE- z(7RVY+>;Hv1A4r!0r#Dw<5_VUUx*XL^GJF-|-ax9y z$y)2?x^~K8<61cNn*iQ2fp8rd{C3Wj^i6WG{q2@0y#UjQ9~Dzba33XV8tav1GB9us zKz+&i4#aoD6%Jm)=l_|>#?(3_fF~0cf*JLpbPf}flkz|3^lei;!U`qK8s{_HIAtM|@^WjT>lBO;M}*6)nU zqZDa7O6sLq7;q!nfUDwRC}Mc8+Zyb6gGgU6!xsoR$0utCU2x37IvMcOrL*xs93M3DT8IjF|n?)@+yT<4`N<~ZVvmeO_eKhYU6txZzh z>x_cvYvqfO+~#4~ocVLuRQ!$tW;Z>*C+-eJik11BK1ctFk=TLg-!*cfJf0h3+lg0$ z!lswC{+!yXmh-us*N?iVacj;URy5D;uH-Pviy>~o6x*F}dOv^8hLGgOt?ngVxcO6S zL=8dsje<2G$dkuTB`LdgiAX5#RAdw*<>?{cQWw>T$}U)92HN_eEnkj=~~cPF&F!fFxjN>OcWZV zA~0fTIF`nT|5=DD1pS{}*wZFV(98_Woi~2uV0WiudYN_~4hWmx5?UJ-zV5?kkY&Su zK3}~BD8IC3;Wzvu3e;_cGWjXi4cIz@~Z~2eFNC#x~4NddD-a?i_=Wr$i zah(^li2y*7bDs2AEZ#sOUPl%_t)q?eRikvhic}66W$8Lc=xs&g!mNF2xmis!QGY<- zkQ~fECv}DCK)`4ET`MP6ORq;~aI9aew(bO7e!2Wr=8mTh&|?Xq7G9GBefPpAV}A}k zM^YAIG32PBge%F%?g+mOglB6$CM%ly(9h|dJ54E|gpKkRTJ~22Zu}nVm@P@|r zc*>?`X-Be#xL%-ATdg(b7fokCMF|3$V{sDour_@{dSes#aYDi!yUf2fD*erRa4$4h zk2uMdx3lpzSBh)m1*g_}`1$)cWKii*BT|<>ou6h4SHU+n^-?{Ea`A_b zc0o>q6%Eu>J|%KkBW+H8Xj$D1WWMXsz_WZvHRa=>FHIr6Am!zH4LmSauYDjC_)znC zoR*n0iVnO^eAj?dym~#Aa()UFC>MjC82{lBTAh$Q>ay8Rj|0(h9KSh67PLU$sDaax z3?0%GpMhUa@G#5O?{<+9l|24#D!0{#w^*Vd_}`8w(a!R5kVW(jN-4Z5*seSJ8o*sY zKhkN;EdYuc0?%q$>Zk!4`k}wR#2-;Daa_qcM71#mMc|z0ca%v(jRW@M+8rr-aizfh zSfrcuy3G+%d0`-SiZj8TGQItFu)R_|S&~TMi(S;=%Pl!cm{WJ4P>9_VJ~5@SG> z;ntZCZ0$tGft6z~<3i_nFf(`_SADVNfv%?^M^I3d%jLXhV*azp+5Ea#(dWvPKR@pO zX$dY52v$sn{KchObHdaAut3%e#ddm=<>_5LM2EX4l$}Q0q5g~h^vyz%mPqN~1MKV{ zHLR0^0U_B0&yByAXp+ME@U zW%+cwaXjYqv5okCgUFEj<@-L^6B3YkRPDJ>WA8@`R@xAnrw1C`OUT3~at7uQWc^c0 zFU)Z}i!Zh~Q3+1EJ}R#cHMzD!C$FEXsX1E+I4;0a%%jToP7p^*NCFd3VGmS;p=Sjj zkwXSmE<#s+d^Nh-Dx>jY@7($5O*~s!$F&&tkDwHcm^YiqJ>V;d*-NiZ_ii!kJ#LzU z{9$xkPxk298%*a%#(dGBr~IHO>VeV>^=Y?yE}a<2+gAGQf!iP2r~*`*wKZ4QX+7X= z-~q`I&_Nb55)Lvgu{9jue>xJzu{w4bU>t z#mLObiVk+%fs82ygwFmF1L8(_tIr!1&#X)a7`MFQZ)?-bi>$NniQ8g4Ey>g2fQUCu zEB0wO*I&A_fQQ6ZQXfT6K#cja_*R6=?6kg6@9R@8?~N^Zt85o@jIj}M%R zOjDWB+^1qSo4;@0FMo&74NhB+5}m3tzL9L-#qY)lLKhoto0mDuTN}i3kd8ciu6bRt z0`V$;?`V?P^P<9i4i-}M)@ai%0Ib{={)mK_Tn)VQc@MeyWb)5hL%V(31>CH1CIJMY z%w=BV$i)R>ydahNJK&D6RJFXA3lE|aA`bJ{;txnqH4nCmih7AliPgQOM?X0~Sl7i}~JK{9NPf zUi@CsW*OH&*q4|?Tq+~+%Jetxw^IDKTfb?Wo3UuZ0hz%3K>B4S%`1?*9+u|`(+vPN z+IebO#lCJ4Fd3nhkQd14GVfDe+d~bMhJ#kaRmrxB(LseJL_OiPoM^4-tEzU3AOY>Z zv-;#`VhL$V=8DqUmH|8({QyZhI#4<@B=8GfeBDOg{^2Q7>@htPK2j%3!&A=#{*LX9 z#jwz{EnP_^HP`*&@Lk~xt(-t&@DXW>`*0SQkYr(Ezy_DQ`FZ1h-oHU+pXS)qd-OtS zo#U;j3&7_#p;o-0G9k@EfWKZC~*ucdiX8?(HJI#W$TJic9UyeF<08Le?O+m(N`% z^3v{3^4o})ct9-F@NVXj!+(?d5(*Bjiy7DT}>1w2op14Q(tj0zOF zAp>b6TxcS`H|(5W)?8iY*U1?VT8QGI5%KZDWjrtm`e&`mU9m%FQB0ie_I# zP&};HF57Yyp-HaffW0FZ6tkVj2*az38A0@wZuYP-4LSO1poFQH$C`~VHXvw1F5N^_ z4)u9dNT`bw0MhnLHRcscb>54d854hyo4_`aIi#~eV@eg|QG zHvmZmHWB1lW)ATS>UO?-LS(2ayYB}s@->Cu0ux&g`L>qI-AK-pqt9*aQrV7Zc)2Z2 z4kE4oWVq^OSf%-|6LV=EL{UiTd-|AqZas2q%~p6+?vmy05Y5}UQuUHijlbcu{0_oi z^}B_OVn}a?>OfP7Djh?gvh&P&r`K09Hz0k27m~?S`Ia=zyx2?WtnN~8mjH1q^5LK| zeu`s{y-D$Qg8bnt&tCy6Jupv}h1AJ+nPmbha6hWy1qWl4a+IGi&^0svo2U+?X1onA zXxVN=b(6gKCrcP;fTTauU&aYjwuuB5-{Y2&nXZ_ zYlfO6irV?U1Ea2MzsI(q#TD0kF%9>gke<`wZK_?rD_qV=78{CknBKAm^ZUqli|Nsd zL7?uDP^9|d1~Mkf*GhRUd@b=f_9Nxs-x831SpG%|@N#&u$&Gy415Ss+Lsf%8W95tE zWuY{;I=nA`EH5KQqV^i);IzfwB1}~R>bx2qQWCV&gDba9IgZ2U#(jJO0tw8IE}Rkg zlz7fu=}GrI=m})ho7}6lqP}bR#V}cME3au4(RA?vq>)ewDY#$h9dzVwG@!9Bhq1*jFKok9=bmu-HIs^6)XJ!WdYYj85n zGlK+ZL9lI&otM%YX5eTL9dzb-A&|`v8COuYG_6}#8vy%-)sv-@v%Axcu_) z1I8O4dE{qWHUX9zFgu!~8rH`FEb(nI%pvWMB)RonPlT&f4s!cq%bAY3{6u!#;%~*h zX6FpzCsvN+37_>WLn+nZt*1HP~(C~2|XvCuEO`RLajUf%om4}V~Y>+GRM1p#*PUG_7@))LS zG${TaIu=E9km}aoA6EKxpb_r1lZLI|94%BpehV4|3)2_`N?Laqevb2sqB$WVeWF%$ z!{C8T0#VTJ%2veNz7ArA;Q5$g>Wf3rU99K;5c9!klh(5cl~&$q`lBOB%4Xe%BT`yP-7Um=DNrs17g9$ ztIi!S9(s5?J|X}5w5mjk7E(DLQD&h1G1q-Fp|{ba@Jo=xqt~0e&_MkX$kTAfjpFh} z8Ms_`48(5_Q(35O;l`GTA2{17GyuGFQO;(T4o6BuIfs9VI2;x6^x^aEAGbZ3m6<`lm(+1Bej*;8ll`a*u zEmwm*o7YE&qbwFnKlBk)(8dT#LH*sEHistgDc+2W;*VJ=d^$Z>u6{B+t{xDG8h1Kn z9-df^ssMX0Xt8P0jw|-j?D0!qbl*?_M7`rAJ&OigYUTq`uJ3nb?dNvRSKC);W$#%g zkeizsl)2?YIFCYr59)~>c78ze$ho{ltYLd_K--_x4d+Ru=rnHOv@_cwfrH#ejJo`#IvtgT<(i% z#=a@x@J}YsT)KsPW^`8X3MK$8mwp_lJ^Ba-jl(^@FbKO|KLjMsVm^u-K%&?GfTXM&esXt+jD$EfVZsv^*zKW@^G18FieoW)nuM66F*GmU* zo-71{gDO(mVSb9=XZUW>)s!a4`{}(=8PS%^KdUul_tYX^+8cO|KC)&AgO1#!+>1>W zcGA11)`p!~p?#e3eYjLywUu#Qjfa#=&*~&(-l7XTjq^&BAK<6f3ORHR=t+$I(b?&< z;g^2ITwq$g4wgL2S-P!hTx4Zzpc*{6%)pEEW1l2k%{<7(^XDdg>8mv$JhOC&T{yPK zc^EnKj^4AW{GGRv?p5(hgy$7#OO=~5do?sJ+R(RQ=EYHuG9OvT{62qD9Wn)W|Mt2i z>I@8~#x_)+8Q80_5ny}XIT&E*WB{NGD@4^FLPnnD;5+Kf&Q78YrNHPr4&}|=tJUl9 z&JjV5p2O$ZO$*dYYhlkpT5R9ApFlilMq9}|(KUV*blZ)xu$96Wj#EWA;xbY1S{gh0 zEX9v*IepUGH|#2LQ%K{h7S^}057~-{4y7$qKbE-~R=$v(TfHoAn0<)qeKn*MyoXtS z*ybo%&P}j?WB2>irZIA}i_v?nN`+D1faO=-OhA3qc&YIlJSGZn**g(LF8m9oBE2?~$s#lTqWlUrH_>9i!PVW05s8 z_qFOqm3n+y#qwbs&q42t(u9TmFeYmakw71GHNqQ9*rWRq6k^@=Nx}61dXmHJ=4YTv zgtLV|??UA%bfvNdPtM{c*c?LW{*pwPe==9>$`91fdtfM`-9_co3VPm+mW5`d&9ToS{-MI)_M{UEMNs&ReAxE8ouWpydn(+n;G>*kXfI6h z$nFLQMkhM*G7BW#q-K|tP-PC^TE40WE1^YQUN@p?b+5G5s zINZ3tepVax=!C(^!CL&GEWXe5@6&s;)|~y~NIQ+wIE=(t_8!M(yNu5zQG=m!w&Hnc zb2oagaVG^;gP8oKEv_Is&Gh)mD$I{C=^#tn#G5nWqU7fG^sf7#o;kwKAFd`4ObtJR z+-N!h;Lmwb=+;wD&pC!Q4w_@Qy->VNlzkze>*(ofjIwF?~*Q~{Dwbzdjh`#dHAZsi&40lWx~UvKt%0I zdPc!-01bazMG;=`$<>QhUFiKQLnD!S| z+HEz*ia)$=13;kAr!~&n7*Ip$6^xTw@4R!Ae!hoO7cZwUvg$o`a-O*l5_gk*#=I!G8 zvktpjU*=2(imj+({{{zNAqi1R9zdCiU3ENnQ(k?lJ;CoAM9y67U*3uV%>(6~cdR#e zyPk%B;P<5vB6i2$HM-1Y$Yy#2c^4>C0oV9$%6E7MJ48hf^F0A;sh+_1ttD6YNyFb% zM+v@nEl9l|X+UqXg=^IP8O6CQs* z(lkV6qw1e+08DiZWubhQhZ9CJ9|08Rer*blK@r^jx-)Z+yxpy^5%4DPl<|zQcmJ<; zM!W~q<-0{VOoQ)J;zC-K+mgSnlJtGV&WCa(hYkXY!c56D2M$*ba%Fb%HYtx(+@KSb zJx}{@ZOE6Q$|Kl6+(-<0J#csS+~l*=S`0jFsNtRmP?5e zUuEGh*Hg!bZ`24tc&_duYrhnJ>_X+SL_ZcgN<3+bd=PuzcTj~|R>&Uo zv`^A45GdYG`v2_ZjA8Qs+YPDQ7i>%qhd}Bp;O_Ly7C;mM^C<5Fu?t8TFGyMf&F}a6 zw~Fj0bxL}LFOQh>(;31YsYFO?z}E-qIWfSq9vH^hT?}FSS!{}4J!(=@B$pREfa_xc z&)3)1+OH9iEg7e^@2BFiy^T{(NAKtrAjH1P%3tmb9*126yHx=+3BR2Scq`W}qZ`5Q zQ9rLx_7j7&@@wlX&`R03tUkPkPm^Wz7p=IDtIoR*6af`XTqyh321T zK)4@yOjc>7kV_@iR2QLAby=6yRwv4&um((1)f^PFZ2MVUvgLkgd!|Zl33UijqHtyv zG~*ew2>K25Z;5#fw(vqoYg%vatmDDd%aAITdzH6c`RpETDaQh{E&Qdw1MER*s#e|) z17yjSzuUJUOczhZqizwzV28s zXFpM*6#6|uH|OR*fAlY=Ewbt+mrli7=13+$RAv&a|C_$-{S2|K14ajX{LX8t`+dK^ zpU>~}Pu>6AJ?3#;=XIRN@j8y<_0;lK_;5C+H0`rkVh&|Mvc(hty1#8NlVMcv{k%_S zH*_}N7`Vs)3(-5^XJ9SHfAhGRp4g{G<9YLY;8kDM--~2q)&$jr9eo1d1kCT7i{Cde zY3E%o=6N*zqd{qp8%xf)8#`7$7Cf{$#{~9*wLidk^n3v*=m3xbZ%KFZY3QErpaJJ< zs{)}2pgSBRK62E$Ip zmF)$#A>)8IHs%aCLx#Yk>7o?3`RLwj334xZ4graqnsXSq#3VycVayQsdh!Pa$L+2O z^VXK&)9!`cv}3`VrNCqe{ei*9KMtvpo#$+O-vW!CMDTCs+r?A-((Mg8UY%&_NnJ_k zcNBtl;fI0q-aAc%!CJaYbgzlic0F(4y7uM+0BY_ZK*uMH-kd$)Z3ApqZM}ihhX}hP z(@e^;PIsU6r8_dzj53!j)j2qwz7@Rawg$Q zo;I+0`nag8L87NLoEdM8oVjx2N)z1Ry$u`;Fo31a#K_g;oGfd+WVno=pEmJ>W?ld zrD~teD$tGGMsCebTrX*)K9Xi3Ht=PfR^T&~A)qJ{@dU+#T2VcY@elYC;t}gs_o!FQ zd4GC7n~b9I9XC1IA+T{|{}GUc*<**>^bw78FXlbz<+aR5;aXqvjh&7e(h{UQ8>I<3(mODMcV>x+Ot#xqUj&lUU6?$0(!-Wj`c@Umh@!hVDS1?DMTZDE3| zfr%0zPym;3#ULiO0#(!u%wsmu5?vF4^+V4!_nU3SyqBMxDm7ZtE2FE)(d8iyN6 zZ{BaqVzyH2-MZZT_~3CzBxHZAi8Pnu^YggCA&;Sz#LB!=@DqcO_GKBSR9S~w=(#!rr-)B1sjqnhU2$7q)zjN{hW zxDQVJvC+rqWO;pu@tBk+rwmOlVscKzB;9n$>p zWG?Cy@R@m5Tah8L3s;qn55UBxk0d!C=pxR^M#Pwt?WC}ePCQ;bonM}#XPw2rX7eKC zOsxzdPOhe+B7GwD*Au_NcSS|!l1oQ4SBqC3%bM)4$<;dh&KePJ4V^z6y!igLIs-iS zdpo|}cNzZ5@g29;XR7~9i_@mu_+^=FlTMAqYT9QzviO{)q9Px8@22@`>ym?q^E{kim}?aE-k~|#sk)y}=^R}D zu+$m^UQh?`dD_J}tGOGOKod4o=qXMp{SqJ?@PK0}J|RaN;AIPk(}|HJ76@r$}0xuuV-DtRMfP=3qU{x~uoTgHxc zXPUkAJ%Pw;7hIe-(2Ru|47Ii=L*r;)JZqqK-@r2UCjA}jm-iWpkhUNFtZ~Z|1;9;l z0}5}2Z|fZYV5Q_slbsacE13J;{V(D8yn_$^W1aM@r3xHbzg)AH|>RaUcea! zsg``3TDX(;CD%?a5b4j#$yF&{7rc5YfWtm)MG|oCMVzS79l1M16UT*sZ^cAh0ah z2HcZi15d=+yL9lczl;a&Io-ZOLs*sTlK83A4QvR>~TpI05-a*eUYmU3Qvluw+# zlW>>n4tUd-xx|$*?+T*>x-?tY#ux>BkDX5TsRhlCS$FlG-PPH`UR7-V#qaz+)96_| z7`V>0osPCOTOa*#dbEJTL{R-tw_lsyuc8z6e_kZ8@)yzl@K0Qd0xq&~zmL)PmHX)< zkH!)yY!U8c&DlMWjuorV76>~S)Y8>CeH3%+D^mM~UX2+5ul!~dT*sI^>7|1wHkonf z#3$wY_i`L7L9V|__SABG8)Linu(_&=2iWU~7Pia-+DeckiJP#eWKQsrf zHWmQJy6+ke00}wrTX!%$f>UCFag-H|9cR)F1SBpPH6Obc0Nma<-umTKtS|SrAfS^x z2R3Z3EAcdFF8`v{7w8iB+!6LjCUtBV2E_H5H}2)GQ=Puox;%lk?TYB;JQsxL0{Qb$ z$wl*XB^ao`+~;~K$mL9duVX$n4P2kzk|u5qi`*U}*KEjG+bUT_F-ZsD+zSx%F$U7^ z^SjoM0N^=*UpPpC#2z?OL7wB6gO{%!f4osOw!zxgQSKvxr8+^zG>%dKMSDan8cx2e z&SA=yc23~t8H$$b!t*_|uhGlPPq#mNe)cV-{=9tI+Dh?c+6KHa)HSk%aqS||uh!Bl zirzoJ$|0&@)ZbAqNV3=|xE|;cDaQ(yV+cyn?Gu31=_?0q(AvI#U#vzd22E(T@ZY=l zZURXj`Z|!fQBCL_U7TS(pO9e8V3#(2|Sia{)cgO$7OW6Paybsp|Ft+GKB zXNu0>Vfm;G35L8>eQkV?3j!Ep>wTE{j*Vf)S;}k7Avxd{4L&-MzuaC6Mf?6&?i-)H8&-*PUT5%C;N+Fq5HmG$X#A5Gn*yfOm$x(7 zLpC&m6;;`~SbrPG@@j5QY-t&d*fe{n3Q=O2wmYm1c)ps10;%wF1Dly5#z2;^^z+2V z@Npj406OQ*YQeycW|fe;d!Rwg#CYoy{H0zLku|i|7=1o)H8?jz=&sZD3R6%Q0na^v z0~jUpNbKHe*(n?64~y4ZYjW`wE`Nn zc#To7(_Hx#mcy{+fCA=|&6Y+&D^KLv+i!zL_2d^+!9}~b(F^l5TG6B2YlQ=oXFnyF z7>aoB5h(LbKKznBGkD<(Ns8epA`(-wkh@A9bT7elea>QoG`w(L%l{zRKtjCl(^`H% z@{u~GmqhRXR0*54G#U@B z8ZGb%mHlR=il+jWJMXLTmnTBqPvCm`2OAtMz(HFLJ-COjMe-}COUj-F*0OH+X6=+8 zdZz?^igtlN%Q>RASMSYSyJ_$-Oac2b0C`EybGiZFkT0_~98gzxKThAXyD0n1K0j|j z=cSner{2z~DF4Vy4z8w_$gb)Y6w%tZi}>qSt$uJojOprX&$%Frl6;V5z@Ngizy+^EwViUP|w!0{$M$gF}Thi-UIl3*mBiWCh(!d8gO9 zzh8f36h37Q1(GDoqpo6 zXB(BB)_wg#GHd?W)KOo*r6K(BstKxpQD|r-f8Pc2oFSDR3$DKtmhWKv4apdSjjk|j zb>UZx!)G-E##h|l1O-ogx6Hh6UBp8GwOHl#kCN;q46x7|G?O|6yIlh8ciJciz$10I z@=w&mCUkcI)ZnW=AlVqa(s-l^jPK=PLQ4V>rp$?0Ya{`;L*QuJV>9wf*oP6V`8m!3 zTcz2#!xKl?Gs%ir9%7$wR<-k+o(uH3*~dd~?sZrx-a z#Pl6PWxr-7BH!utC5?olhe{6Qo=iWx2R?-K{)~d8<|hw*hd){Q;b&5v-==Rk*D38` zBDTP+1K_@6UaJBNuJb}5oy`UI&{aSPsk_x3lxraa;1I>UMah)up1yZkk2i1G_(oI6 zaDH)vCG7Q~o0rbp#I2oxW04@#MXaUo_PoE6YQW@pi_!x1YT-Ne5BJu6$w@7&aO|hWw+x;DFB) z_-(NNdMne(0g0*}0x)6N^9hh)D`I59{d&)6@s5AlrQ>mSQ`QM+GKcuat*KVOcpFxF zx-Iye+i5qCtPHA&`^c)F*#yXmHG-6Ei;Sf-8OCH*<(S_lxe;E%>n(FNOjtx2s_KS! zcJQpSlA-l3LFoLkHUr*GcRl~6NpUO#_(|5QbG9Xovnb*-1Mu+SBL<>y_(7ZV$)bS3 zT?7o8ffxh(1)-?`?vF z$nkAqq$h0iKYaLy+jgH2WtZ1~eP@6Rn~ctrSOd_coZ|v9=Z~3LU!4j?5NBrB&FxHI znBy!!65cr&0E&Y*8cqSfBg{;T6G(-OyWaxhxf1_5J&@Skj#4R?$c52l0Q~)^^an^U zneSXH?l-k3=MpgbQ{9pHC+9WpdvstXtXBW<%Dsq1tp_bBkgr=_Cw_i6L*>(9!)O$( zIz33dacv=%EegxcGMrB+W+&7Mm8USdK$0*Q=%o?l=C%jFxWF^Co^%$qqBFC`N(1j( zL3Fd(8R+hgZ`98p3g|BF53jf_&k7gQTAYBgtCk7C$hHN?VN-iEfr)HjHO;qC2DHV7 zxTW}9bfdxQ#76fQFW}-A2mjiSlMASPbVJL{^~ODpBo+2~>9P(};mfA!7=QJ_-7}_# zkEb5xfHbxax5nrMF|rVR;`dfrs+70YDMGi8dUJR)i-Ns~=EK)Vv$b*Qz#6uh=mk446Z=e?)jCP@4BFN&#Fl0ID~@&e7DI zW8~0pz#aQO%tzxlZ8@!A6eK3>e(uAwBJbGe-Kr{VFD@BKYlOf_bsSQwkJW9_uPfFQFJO5++HJ(xA_sI^LM9zw?{JmB5`quu< zoLgDE%6Rz*fuoa83xm(c@cC^WxlKKapc&K=O9GK2x0hy3CV2XHqMmRA;=@D)iwprw zS^9zLb3Z%0FZkWow~&Kh%SUboYldq%ZG&?#+$?qP5J)X7y&=LkcgFpdv>hB5Vitt8(QwpMK*T*wJxVD~-xIH`f{F~syOXt9!Ry`)*+BvTI_`8Qx zL<~z_cHrbD%!1ggG3P+KT-?NRKyr6>z)kCMj*u3%{*ilg{I}_~@tbWHW!Rt^9w-n6 zVH)jO@*0rQkD)`^@9-qr($M&*UyP`{AfWyRcYpM^cEX0eKul<67(k#64XMZ__>3j&*^C3943E3 zxf3XZ8$GjIuH8)wW3K|TVCC58hbzWGB|eO2$TDD8de;pkQt3q`T-9dK@B;|5f1LR- z?#6e}mIvrtHaV)UTbZ<_oMZ32pt$IOdTbnY$zb)379-%5AKt7lLSi+-&ObR=4{adI z;{FXU*In;ybOSZ2^{GjJZSXtJKk0Sdg->72q0Pz-5?0Mf3&J}Y=!0H^@n*Ols$sc? z>-Xd08h&xGbv-_&+#E2EK&en3+_#%5 z?or{%9arQ;8Lof-Iy!+nTJdjI2a2ueH?#C!t=5E$x@;!snt5HZtDianGMjAk?$iwP z3~iA8u73wIbBQEIrzYV+^eEXssvBAOD)>0Kq@?t1_|Ui)LMl3G)^z&Ocm$~<=xn5EJ7)iAUUH-{Jsq8>DomdToh`V571h=PkP{j2@L4SiGsKMo;}%W1 z>(ASMYYm4gE#*rgO8=U(LK}@^Q$%Gz>l_;A<3JAiPwUR79TbfKy>PZ=w!^r*a3HjbfI}~AL_Rq^O+HBBa=3{lY=rKuZdMqC$a|IL zld|`v+lOLq(-*~WqS*`RA3J)7<*-*N#he3L1HZafuCb6zC+X>$ zQ2KK=FQZkz2RAqhgGt@PH|-7|msj60e3GCHDsQEqF*+#4FQ=Y7IETpCtuhM-&S+u)lR^RG6H{}t^ ztP-dK{rl#(LEGfOM-L(mKtZH%5#nPzOrKD)(KkYCs-e#ZRMdljItWb89^>2%u2!Ll zn_zv(j2FCOwQXx#=VY>C0EvYQ<7kOJ22+b}tRvP&oh*Ti`I!LB329h->-+# z0<9FdJj+&Zh74H?(*!&=h`wZ5!-E?)bs13D`3muKo8B$JfTi>SE`oql66pB*;++MZz~M_`SUx+MhW%H_@KoY!j$BEN?o8q{OEMK*D5GXVHEWdzXzc%+>;x zd98+^^9-S_5p)G7`7-Eo9eB5Q?%r2*Y(PFj5tgG=c17OFL*wSC;26Mn>}Bi!=;vFr z>S6!(B#_Up9dQ~PTG7k72y1w!ho<&{CgmVfL?-Jp&=o?rwL=<(9OE+h6Vd={jU+bf;U%E=xD%_Pyk%C%jcKx@pj*yVHO ziJN!&YIe!bM$CVzthAnU2$&zPTs>ev$(MHhCRHPD`$5dl>T;YpT1P?qUG}OCd6Is< zy|s%gEJ*enan#4r@6) zJa(zJl!+otuRHAoPKur!!2hB0rPMzBxT@7;{+GY{Pg<4dq|s*~Fo0GYrN*+>9?yM$ ze;{-aLLH&fd1`zhZuNx2O z?mjPR;?c0HNbXIKZ&D@`%lk(K1Q_?r2R^OnE$byX3Pwt!1TcCv@ZogpJ{{;J2O(lU zj3rGY4^S4egToszY*3^w4~s^f!&=)0>#QbAMIDz#u=p7Rh%lxaM%BWNj;*OAa6o`d z?qAD#&YX;b)Tdd}=DDDDMa#!_aKvgjTmuXo`aNfQ&;hN~u(K~g6a|+&z)R$R0SLpC z|0vyHRGA}c*lrUh0cQCARu`lYXD=SO-7zL}5z}eg)AR1a4pHq*>5a6y4WToG7{nZU zRKlNA9YYs@9)woXQ_2F_rDN3-rgTa7a0L28Ir`U_YQeRrARM5gY2UV8^iVusG2gNY z)NRILP9{*Kbb*lzIH8Qs5kq-k_$_WXP<6rJ+ti5u!7SpAuS}lit))<<5}+@r6iPJh zE<0=6d(vsBUKW!O0yOZ&-3wO~M?d#8MlZ#m{d+R|j<4+8tV+4b)=G9F(FI34vT^LP z{mIo1!4E?#2e)?d*Ue3jZP*XSIpfwyM8d{1=_-T6Qy2vAs6dDy?e1$4QBMN+nZG`Z zy<^sJ+jb)ylR;G4nb_H)S-Ldfq1k7%i99RpPsxuqM*FMAgGw}Q!{I)_6n|I;;*9B~ zD6RIx$71?A)96pThkpx9Oo+Mrz5r@2e~Dt)l2+2 z_du2?31!s;7@!H}O#t5l5bht|>N+n&PzM`CimRcGpDJgf%s$5c*gB|K^6iQujFMQ< z6@k%wV)xUI1KuyP>&V6Mq@#cV+|W+bYKeK@ZhA!1W)lTDd0j$Vi)7sYX8)dPk;4;B zzqVERaSCERp|ovnv{3EAm$Xneu^{JV!oj-(W!cI!A-?TOH|dC}Vvbj)&_|;%r{j?M z%@!k7z*1g^{faH?Y3>!hbZTD$}*J=}AT zIG6-?GiT+Totqm{BS_GtaQI09-+nr6$119lI@)(Ts{`Mx%2bnxgUJ$KZ0G-}Er%ZD88> z!YAY{sQTQ&If+2-9}hE#gMp1pNF#b+cg3-?A(K4b`8kQaja9LUh?aY89_Lm1hO-cC zK54@)X8e)1_PlhpQu^#O$EiwzxHe75$Nj)xn&!2z9~;5|8dKz@2vFv*R(1wf8JHLJ zG>2`59C%!AZ(OtBzi0k%c0PAEN{@aPX^ftUnu?PDEYD{sEsKi`L-TGvc->9z&GHSt z)Xu}NE8ef3NJb3)3?%%lo?x_jBkB-y{I-_K5xr4OYw4sR8x8+X{LqmT^N5U?Lyc-xp6z1>qpNHGzBA$tANxRH4hS~O=zFQrv}>|84Rt>$4YHr+Ip>5%W)1r|ZvU}>Zw22AELwF67) zKMLk@;+NkXLNJ!~k1#ACfRWx2VXe1XFZ~ewe{|NuLanDBd?67lC1+Cocd?L=b()Cj zb3EJu0p*9==9XyX=CjDE9eV8;?+3~L=uf5QRhjso7XBonk!VhMkc1wCvpfRbV#iHH zM3%%R3o~K5OuF4^m~K~lRS95ZVJ;0Uo!{?wzYYLA4=&wd{tXnao>VZ+1}I(`@fqmf zY|eiL70O6Mdsd;=__DTmSSRRk-5YzuCMk%Y6R~dg4m$gCE-q9!qw;-%eUboqd+;R7 ztbGwL*O!!vYm93t4HGXHY`J2ficr$7sp1Fh*w;YWj54$O4-C{o3hn@x($elQ3AVbR zC3^X`|7G-JkG1VUu1q0{3J8pvYVjkVf+4RcCD>sY^x;9vrljcGfw_Ae04uQm~47*vIkr% z6w(FAR|@lRea`cK8;>g@*6ttT?!42}jpY3Z!6sOzYLE{GO1YtR-I%qktLZN$jurEQ zpJw(BMFc#O8#kH5l*bZ9J+ALM2F{$p5Vqq{*KpMZX!IM24Rz?7aakz>QRgSJpDlk}!Pk5EaT^l-&>In~-sC#**H_P;N) zo2U>94^+nG11jNNi@r!0UX-aYn*Inl8~yI;=6rVt{ZpVx!}zrwCQOWh5^p;y=|{$G zNNVAWR|!TG=xa;cCwH(R13Mx%PF_Z8j}aBtxok0SUdt5AmO~Pa%HDowh)BucNX~3hCa5`udc50ucQA-T7rZ?-M zyw~D?@f%ErT2_$ji=T7w!+uYEK!L{`Eq>2=xGU<%&ez6e7Do47eKY&iKDGE9m9?VI zd9mYt=SedZr@@|_$w?a)V&~YG(3#=aQlil8O}jz=G>5YRNCd&Nhp4iSR9)kWY{Qe- zk0^ZH3qGjhk7}L(G&pvBDo9lMs`j;RYX}2Ka4p;$ z3vj}V0_bFjaW$$&pJvi-YTv>$Pa@{U%zA|&f8OKF#Mi7#>dCNhk%=aAaXO5;n*lCl zq7>-;Jz!?07s(z#9nHM!^a%V`PHU^W6lpR}ME=+XZJ z0n$zZx>g2eDMz`t@6C^~`}hYvn~WnGoTcc_vd!%DH+8&QY>uy7toi1MDdE}rb2H({ zS7*yTYDS-8)BoX}0tbk_fGHfdrwPEte}i27b1goPoXyvox%3xE_TMcyQ$53DHx1Z- zer~5-RbGyD+qiNC&9WaQ+PA#2tgvnM1wKT7bTye{_JrmSo*TD<$#XkLu2aJil-4D$ zt))BWtKGHO4sw=h;p!)-W>Fl)T3;^2usKS(zt>)Riy0i_B=war7UCa+5x{vnp2y51 z_y+*>d(ajTQbD?jyhuz2f1>{I%i^6czJ0{(z`5wuSQ&9;yJ&_DX z@!vjl<&J+)!Sgz~(Rma1h3C^7 z`v8_q?!tBxlZ+FeBvVsf{k^pe{Oo3)*{XZpjonVLdM`(ox0?&!uvEtT5pQ@ZtNdaa zogEJJDiNO-^SF_I$+dg!lQq94lAmypJCMCX$vNK?Qzy4vTjA>KH&-|s3 zyzl@flL@{>U2t+0Y1&uz6U<~N#Hl!b_=|RDHr@bhGGFV};y++DgUgL|u|M7iEd|AX z2%7`y)&V!Ufxyd0@uNrlk+A~4JVYf{>MFf*S_JAn*u2JN+JI)M2n|X_`rlz?=%VJr z<*u=CL3Qc)T@n(`BD*#OK7=VkM%8Q2QP{grfy$N(+Peob*{TlaYG7{2>TK05QOAI8 zWT)UloAyc`nBDw0;0omDZ_ZkbC$Ddt1fBj7j^x=&Cm{{5&1pY7))m$-HLmI3WKkmER zH&r;pl%+))?o|P>Vr1w`aKi)q`4!M-0Uh?gsjq9~*Q8h7{HH-r`wy*65ebsE|2^|{ zV4gQmbs!ba^aMUzo3w#y2+Q`l82e@az7;aCLXd-0-m{b5CyDL$l!=>EGo?O5H>W4F zs?0>|i2dR*Z$aMvI0hfEkh@;$Txy3~F6Ke-^|Xii6Qy4E%jcgf?fEk^0@{$aY4F7b z$Ya2h&h=l3@1HYvKfS*@H77OlJ{SPjgNKnQ=?pCRB)%Rr;KW8#8V1?h#0KA}ii?qC zOf}0Tu#kMKTu$ESue7t|IWPzyR(JHqp*qIT@b*fKC}$pOo%<-`!rfGl9-^oBRICk4 z=FP_buVRP0yg3I*UpPCh>sX7S&cSb&S$CG+nz*L?A$=__X>>q6~ebkg9Q ze=cHv8@p`}I%#&!mks4Q&#Jq+nuoIw9hoQ|*xB2f?olJ7&9ys9ml39?65y#2`#@=< zwhR)Z;ukR2rY~JWwZD!% zCJn^#7iV)$Ukk|Saoh&WddePh*1mDI@9N|z{lPV9{w(zP02_J?a+OPyO=mZU>1!ZvF0Vc;bKn9m{2_7LM!JtYo z9W)~ayx{pjGM;*5vF7S{)rGW}<>fV+PiZOyN$iKj_oHN2idVU-%701)&Y{5fKy}8c zI^hZ0-@CT9nU0ow+Z;RdLHCWmG{hF1rMw_xPBztfvZMe$P@yA2&?KuJlk^T!ki>i^ zaHT$SGlCkDJF=LO7#<7BVjv!6ZdhN>1xR!z(^6ZcR=Q0;BYnu#9 z>1J{IRrJNZR-vZNLr0vSq)y}H=qKOlf(uo@K9gZTV{XM01nOy!kC#X)zUWD{ZvHc{ z2yTefbD#%CN9=Oz0fO%Dhvfh;NojC}bx9yTM_=}L?rV73)wIbt=>GA(&q)KWacETR z4eDb^J5i2WrJQC(Ezz2N4y950^cl16m6(KAGXggzRfu6wbmEyF^nB&Zk@(W|Q+m_B zrQ~PBpID9@lj*oE{$V;s)7$RVuwfOz4op0Piy8_#=ol`NTk(y&6?0o5bj*{U^Mhc5~kpHK2r9b%nQnxJT326uCl4<$sXPBtM{e!X&1HkmuyZ` zANaUXXSWR-HVb51|2M*Ib4pu7B7FIYYpm1U5)fn1S!!@c#0yXP^P_E;kQdblt8KY2 z_~bUyu@qCGN=W4sJaM;X4$fm8f6|VvL5oa^lLfE#l5Tylz#&YG)T`$>_UD(cx&b{W8 zXXgyrAKE&VUb;??BiL&VSEl6<2cto+;?>U$S zzhW5OS3lc$XN1;OYTM}CiE>P&bXhY>Jq%i0d{M2 zK@6GLNZ5;*htZXQdd1J2V&t{z1(fLpvoFBS5TF|iqlIJF#6)NhGgz8-KRycjdzPZ^ z*P~jc^gk!rjQ`jv_KNtCEOvLbi>1zble&BTc9|8GwxM67Q?%8eg7$bLvY&^5cq!e? z8}o2Z&1%obCH|V2zT{d`vjA)VU=#^+xc@f%yIe)u$ZhCf@eY#j{ zIbJ`ofE3p@E4Eu2^lunSZ>4&L1hQ3Mfb4&*mnO@vJ6|ZVMs}Xs@ssMG)wZ@oOiinp zx4vzY?5G~=xxZ&l%?U55JO!wbg=c0*4TzRx3U%&$FYCGHjh0DBAYLp&b#F9rIxe>|A7V;X_}1 z0}pWE1#TH9(W1!Dt-pZ0gz!$n?WIv}&$DMht1EL#{>CjwSbI%PUkCABwetz>rm(h$c?vp0Sxk&5Ik)q#@M z1k&(+D%UDY2i-AGeCN*$4Y!}4mG6zy391QkttqbEDKCQ|#@q4{n5Gi+FaAB!(11*N_9soEv|P2e`?CEK_mV3jc?cYA;k4GbV%0 zGiF5j0Om06_>Zo|Srx^^oO_UsD;G9(w6I++MB`P$Cw;mw z6lo<--#E`m7C{{bf9DC(bpqe9Rq~}U3Dh&?RCPV$Gf8)LWwh1pZ?73$I&YEj}?gulz76_SgYC@p}YsFfqBWRgUj}u zv<
    mduA5{Da-jxgkDpOX2Wnl5g_uQ=hmN7#U$*^j6^&=o)@_L?1CjhX56gf*t$ zk~F>e6N#SmyE)Z}JhQ3!a`-kkkm=<~92lCiWLDRq|HKoIVXT|3^0WvG3)ZE%ATWw& zq@3X&`=mHo-WO>%s*VwC5m=`v)ig)7DF|?Tv9=(+ztM8Y7O}&x`r>^#HN#af9}-BI zmn!29&58G#zl~Sk>>Z1kjOQy>OFJM}b5a_rh{@u-<5Co{WFbTF{}-27?N4m+{{oU> z1GfJ~B>tz;{(T_55`32Lr`HOyQe&RQ&4yQG5fUa&sIQV9ve#`+)<kI&&GJ$2!C>rYxn8~$ zxLN**Ie)(!IF|_kxeOq*8#W)4QC-lLhT~4r>~(d2VNl$D(xLJV3|S-nt*LB;*L=cs zB*d0T9~gRS^)&+2MQ<^Rq*Yu0T((Pg#EZcU2a32RA0P0Nf8@}9;1s%2mw2UzIDE7@ zsBTpTMUR#4>whHI-XQuMNTlddW7}VyFdv`L?-`E(5tH8#2P|`s9@ema*uPj< zQa2HoIsR>@<1csaWgF4CQyU9ahl|BiPx|O&-$dvv;|WlK7^e!3l3(#|3NM@`0OgoJ z-#`;cVi<}Lstv=Yn4VK`%zxgBKo1>}4p0!jXM@VXSIZ==&)MAu_+tmS*H?urmn2?o z+?R8yUEB@OIAKM!X6;`sDa+osTbZ*Qqm5z7Ph+U?v?NE9;e$V=oTGQHh0=>i9f2;_ z2`YK^`E|F@Ons@gp|M#?8=9n+_DO{zmCV^@&Bl9lNTMZ1nX?VgnI$lqOkFrsnoP>( ztVph&knHF2?Lsddf7^HR{Ku4Epvt7@-lYyJ2XZO=`ITl-d1^&qhTYctpCJwqyiMKK z(PC0#J4p+q7MX&JQe=Vlw z$;pYxY3B$;3=H_mNTBd-^)^bB$E#J^ym!9_az5GCG;{4JU4CxKUu~3cPLrhjqrq2G z!mw`~E5tWqEFIgB+8@0?&Idfs3)dVkQs?8us6UjrQrTP;;g5%OJ1^%_pu4ISm-vZS z1OI%!YSO!(?dc=)$8@~}g=g^eg}}u}Th^;Hfaa(A=z*S=8Yt42Sp|*WUWA8FmWEY& zx_LC$xg=(9*rpW*Q=Nm>rtv~wS&NYi%hTT;?7`1_u=tl!?lFQO;U4d|g_{QoIE=KNWfQ-b7la0l{ZFUw$!G&|_uut+nECIp3dHgxALn=zaT^njTq47@Z~aI z<`U~{vSC3dSrpIB(ZCLI@xUDVkrL19!9xoIIfiN-Tvcbnj-CwZlphf;)xh||Qyjkwd4k2Kw91RvZGMd}-Z@#m>h^M2 zul`ARxm$H1c>1+#7Qnc7bOsbNL3vJrz z#gF|9xKepcgq8Ek#KEXfmD@@I2flWAF!)G6&xyL7WZlS-Ko>Il7KMHHhQLGNz)e-> zB`q5}f`}Jo2&82YTGq5dKNYN?Gsm3Btnc7;FfvHVundrrCG4?*75gPEwe`J6*-u*? zs6FQB)zmeGB6380&egooCnmJsA)!d-5E^Dh;6sP{8s*+4V17_3JS^hFT`TFkg0QWk zyjjYTV>7cHjO;$zVD(TN2d~z|LAEC})9AEZbt@U==6F2!3F@=U5n;I@&l?AB4BK+H zRsMQ^NE_h>Pat9M(xaWW%UAh6L4HRVE`cvb1!REm6h zddM0`b`?*U1pVFqfg4&An`rIYqmv7|K$HMav&*lx-GSVxbW8QVG`if!IlH^Zt{^^&K_U2N=Yq_ z2lK_?gn;7tUnM{Dnyf*h-c#O2`Ujy!Fw?!J2i!wLBeWvzc&7XVIPk%#$c0wOj2Fpe z3-Oh>LnSH@XEKF+V5w(sRr6CWeF|66M=5!Yv<+6Syf~ea`km#6bpFW6rE;v z|M+-v@1ahxEmDJI<@o%`wp)|UHry1QRXUD0)sg3K=&R)*{y>+dIf|hJ_P%EiSzA*i zf*lv#piy<EuMP|-k@(aZC-Z{(%ax{$1iTG6u@&yl89kS5|6?)7f7Ck;1N6F z4Jh6plMrs$mY8-u;b3AMI)mko+LrIxU~m!$QXD|}fz@k^lH>uTMO($e0kjmTs) zw8k`cARB~~1u%lI>YQFo_UQH@QwI3i(aE}uYskG+AxLZU`IUfp}EJ1>VVjm>=z4LsQm z^y2}A7kMiwYa{elfGG3J%TpNH!S-sI!*4xY)PC$^$ z=Hv3k=2*=dGOvWjP1I@d(kQ)1^Kn4m6z)m3r;D6mf(gXLx0iv{rV9tciYe5_aT*Ce-f;J$ER z+X7bdcc&~Aj#T>JTz)q#{t>zF_=8@G=&j{}JdcKt%h)S4>5yRsa>THYGGdfZPQ8-{ z_XLj~Jk|G-$S{R*OIw92k#?l5@~Xrpng!wUUAoM*p#Epaz-a(2ITGO( zIQ+xSb6oiOH}TXJavElx|BBmOVa`3{HBcSia~hIdl$_>eH07k0duHuZnOTLfJ$ zW{b6vyvu$2)Mx=1CNa5h2+!Fmj#7bM@!JhYi>$vK*?HOrZwk`Nyp*sjOr>WbR4+0q zPf_TUUV7A3P=VhQ4WAvzyCEmR=CU7e5DnNVk#`mloSi9z^*P^X%JP@w3Wv1`9;q4l zBq71(Z#GDEr8P{=+Cnlyg527#3JKCUesS}9jtv^|8g~yZ+EGPCYBQ_AY)%+vwlCa$IR+Z;f$vor+@WJH#CF;Q5sZ+eN)*7A%>z`zlK$ zHyDl=*jpEjAWK8Bk->i^$?|@C?~5;gv|!4R{=jTOthZha9>PpRD{?mkp);clJ8RD{ zH`A#nha}+x3b2`c2j;?pbrV|W#sY<2c99O0S8nzUSti`+rahrQdd~@s68Xg~O{=Ks zB;s?hGZI^MN^yPF5wXtUV@J#wpLat@tjooXG{`s#>LK2P`>^d}D9d`x!x&11UQM$2 z+D4pb*nw#$KZF@rBiHUnYD;1C*pvPaMg5;Gwu|I9JnIf{)^zpwoTrIoMd+jBb+vln zO9$VQ-@FQV;(e#VOUtjgk{JW~pbc;WC0LAPu3;Bknvk*=;-F6ZC)Ch+rP z6^n5Wi1QjJ&*^~?#M712Na456o_izclXt);Pnw@@q&HL;9Af}EK~LN2$;S87Tu`#$ zU3ikZ74GJOLJ~GNVgpO-lX${UbxaX3nGyh=7XK0})uewYQ=y$%@^w)-d3;k<3o^y5!g$D{?5m(GsF59Xrez&LSlD1Ib zmr$xI@sZi}>B{vsyqb}$0TD!{K=9k771?u)pyH>%x})#;2y8XQ92VF`gTkf2ZPU66 znoE6qhR)OMy|D)#JfNI;6fKe4lS(D;J9;=M;WDmtRr8Ij6h{o$SE9X0862#rENoR| z?dJ$&-;WQTu&`Ss7WXK&NTR`P05-%nLHId;h^kAK8GsWXNZHjK+X zAVMckfi_Y6WrtH)$RJb^C>FukcPsM7SDJ=7Jgxd?P6X^yY_*at)B9(hn$-&j9-3u= zI#7TVZ&R5u>AG#t&kmZxd1U5|5=96)_p9UMoc5K7vl4_>3Wtdfl|j%wrDDBIE+X_b zEx7YwIn4qN0Re;0^idd{Kc}$_duwmEUzijAtp??>%Hbhc!B1C9`}crZkpeOH=Cc3NF>*riFkG+i1)Q}D`d&@a*- z5c!np$nAM8$7ri;CSIb>Z&Z1WxLXZvCwYhN@KdTnIc~ntfc~i=nHKdf!2bN@JcPgn zOS5*GgXaE=ERn#n6JE6cUu+&etEx)S;jDX#Lq5S#VC`AO(Ey-fQZ5Xob$rw1-O0;| z!Ck>4KMR-^_DPwna@eoXZz@cf8enHR4k=M8P#*5QPo$1M4WkLOL83M7wCCpHq+{2c z4WVW`DLqE}NVNHivor@@3jEe@hjx?CX`moRXlnz~{N~Yzn|@h<=9?nsLFR^3Ci(9a z>w?N-U|uF)53*iTL`EW|84MfpFkiIFi*YcEBS6OZbWcu6Y}^W()n4!;+l(WHmx<3< z5et0pE1#pulG-M#o%$M>W=_u6dgqTxqMOmPuQm8Ee4o&&JJL^b;Nvg2A0iX)-sk!X zx<_^%J!n^Lx@Ek!RdY~?@%1B~SL)aZv;Z(>+vB)h#2Scu>V?rH4loJ9+>}-LJ=&kX zA(hT0BI95nnRIC+!qt6k+ZWg<@1I7TfoO(|V$;z?`2&0h`X>Ls%C0;f>bG5wU6HY7 zi&54jBugS=3E4s@2{Tk=i|n$@2q9&XvSlZ0*(JirC}byP$*znTLng+InK|F7-uL~T z&pGFh<6nPxJm2TO@9Vkl>%Jc9wGe$v$jL<*M~^1ew8MBCMk%nhVa!pG#%mZDRQl7^MWVB`?WX(YmopN{j?H_;H>9iHe~d z1t6|oeK8;EE;4yeAqIj*07-s-kh>Iaiu&TF{j-MDM7bPfE%iGey6Y10|;VD9~%esh$#C zXNp-lRl#k3ayJvWOpIusSY|oxUz+G+q=%tDOh2dh=3b=UDYeg)OeaA0Erb9dX|n!8 ziZC>&yovikjHHZf+Ln3COq1fC@G)n2ty?Fe&YzY76QN5tZH2!7%kdR|s0(C+& z&ol!aW9RUB2j`^%pOQOwjj^vmk${5KwM+!z-T`#yPT4y?q%Iw8KR=n5VYGj-uyQ{Y zxzxxS324-d7=hQ1MEX<>%|OWcXP1coFU^|Ca#PlJ0JdiPqdIoYU7bv^-nibDQ2NDi zTMf@kTmkKCF`1u&Ao`$kOh2sDlH)g-2zXSW+->&}#4W+O#0&PVqCjJZP5Q~F;~npT z3BP}lSLdNu0(IJ@gee>=soC$2aK{j1D{Gy9QB&TY=9Xvjv}Wv4;4xxhIkA=bTS@ng z-bRr-JOe4_{fpFHjc;J~YIDAou>lqwfV%$QlW4y^*Bs*kz3j(fN;ql0F(;P0x=Xe| zIY));)U{EM6lxr4LzwP@cc{WGXpP5KNXv;;o(Cs07*0Ws%87R&TsygV=ImCcfopeQ zXbBKwFzh+vbGTFBriL&jUMi4b*zbVu6+jz4F+lf<{GE`p&OpEKbP#p2@&PXYPaltw z@~4jn_+;{iDy06ML;K4M9OK4Ka8%=BV~p9$S+XG#Rz(S4OF?mI4E(WtyLaFqa}SBl z1|miHXuXeR=%6KXN|-pP9N}1Isyt}fBgR93{K#&l_%^It3N5bK?-=`;cmWseYiAB} zPd!KjoiPFKg(b!SC+#Y&9{_OUGC=FjW*9O;XHm1HqmTeeq;mf}@DT7gAV>WTiWC31 zmY#v$pbPBEDUS~j;v-`Qz23s-xVa_N1UtiZ=Ztr)Bo#f~Eg6~DCD2%~0hJ*hbm!Dr z%pSvGik20oR`FDI@b(pbrZ8N7&X+0M3?w zu)M>Nzd4w4(_7$olKNk?ZCM+L<+AM`!4u72;)qg6>bzdSrRz01X1n0pn6GU#?$2zS ztGZprv;y8d>V2q7yWhyq9G=9vZrRjZy0i(1b8KLy2f$X}{GO^jKCvKl*z(>y)$be| z;xurFZ01LUN^=B|!x zi2@FXc~E#sz69s#18tG>X+-8rl6uk*cF)ij&|Ew@*7dIGMkxVex_!rtdj2DI7!mn_ zs{o;ywgH((&#UZ4uevhERfxyPseKNFS;eiYS7e$Y3lVPt`GuodyF9TjiS>igyV%N8 zfHMW)U1Mw$Yp6Djp$p;wf81fHBvchnzi&f|k5o0>e69nu^MK|FozkS=eg7MN2Mn*g z9{k1cT~U4{(!#ipAC~InnZ(3`pRbDnA!pen)ZAhq6IsyQF42Ddi1a-JJZtPK zRTt=|e+94mfk13UWShQwk`zO4l>9PMuS->CEd0)FXH+x_v!h#{sjo=k5C6Fh#`{;ZEwIQJaTD3?$Fx>sY# zsA$i~ZEW9c?b`V7sf*vPvvgX$6i_ZL?N2yQnu7rck7?Y$8$Z$pi`*w&t}0d(bKQMC zMuE*8I2W!h0@&PChkd2^HHzOKUD{>##pi(nG>}$_D@j2xMWO84J~|QiNZELB-$EEDl@o&V@=?64FV?s^((fcC6xv z;1N=(lF$*no`GAO?i<0BX~x{>ltS9X!W}EM3>3S;^=*?3&vie}Bw0N_#GBkY=u5Us z`LyNCdYeNz3^m#eaP%xEW1eY=b=`|c8$&^!z=)?ZblC(DQMCZl68ea?37A56X#NaL zQ0XBe{-|3t57=Y_BKbdqK>&gZ`e-O%eQPlb5Fi-D=UAKrx{CAq2-D9Y@6d1KEW>nw zxktZMT?U+N=s}@SchG(*L##THGTZ+!5wPjT^5i3Eu-EHg9Hww+{y^RF5#UmG(TK)B z4}fKpBao2tH#ZTksj`%SO2-!i40((8eL`!pi}6-r_ca0{rzg9mH=S!1&=9p74VvHCa{iQn znm1UEY8k^niIULYkX@Hg3#Fk(DO+_=m)5n|%OWls-<);;918TNfbza>n|ej<&UVU? z4fuA-uIGBz1DfMk+v{qno113=Td6=pd)xYRO&N53?!QbXANZN>GXbqr3vd{H3>M%N z+y+s4P<#Hzdpbt{)YYleEpUou7o#V@gM)2@-^Kh0a$C7GYp#)knLw}1xmm39Fyqg+ zDWH!4yWZi%OdQ*6B1gy0S--&_i^M~{VLSDjv3ph%mKNVD6IB@{ro5@N>9->sVocE^Gxqj^~G3yGp|0Z-0IFsGdxjce{%E1;>GUdBR4R*S=4 z9@MfRFTn(n@y|zS717H3hyq=q!`a7mn(d4k;32^m!r#4_nq_jiD2G6~8ppjmA;_PG zY~4!M*RmZj4@?yCdu?ZJtW|HY!lj9#UelvxSUnPJ_&j|`{DgG~I;A^F;M;l)CxQ0U zEh?gGS_7#G1>JXXoDb=xN=M7ak}O}FH5DpJFDW&>6?A^aHZ9vr6&^g8XUq`sTCz9j zJ#3vtKX%)u&Wf_p0KL;Vjz!!J3b!Dq=8kh8bL$-}Z{0HQSE0$ky+zqE_uO87ym=&# z_l!-%U|E~77Vs~;`raxQXd;TOR@=>;To1;=K3OkU{ql|<6!W)$%ddLzpsswjo;4IC zIDWV51jBc*IUBU4qOd*D^Z5*E?@vGUF@C57V`MLbe4&Ol=Xn_?;7>&eRiv`*wc**| zGM$_{#zZb5a_!OPi{Z}`RjFfGXq(%ET)yBx_g<|rxMghMc8G(qC0p=~}wAJaO zO$W^#O7Q(|ue8zP&XOoW-)BeWc9X*Py~SaE%?`Iw-9PjE{cm_F(K!68mw4}@$m*c; zsjbw@k85=dBHqeo=;ad5I2TM2y@>aCiC{%9EBi(vQl5_vo87r}QQ?dbfr9U+ahAd) z@bfhdhIp0}tjHQFH~sH_0@WyKeLXq@y)?>7D(abM3GS)H>C|84qExd|l1QF6fqkq? zv&X8(vE;a`C*2yXcy+}2w(6QLqGNxnbPp#yN8O%pkIWNX3L>6naNA?!V`3m*7UJ3n zNUVKy^94kLeYIP0Jb({__$sdxqG3qm(KXqp=$<557A>oiRq5J!{qDm`;|+^L%@ z?kSR#hF6bwWY3iFAx|>7*biGj7#cq|V9nIAvHqA+hEwJ~%sxJ$m%e;xL~RD?h{rA0j+Q5S$upviS2f zm+je6>L;IlpUfTEkyY;i|8UT2&RCasnTr9Ju^WuQ5{K1hpqDRkvwNLAyGq>#R@N9z zbqJUX#(q6oEtGLERvyQ#>any}l*O_BIK(0;RQqIVBj%fH6zLX~$jEq~L0y^#Ql_xg zJplK8hper0B0FeI7c*SY=1dRGIWHL9++X0C#Dj!LeULi%otEox|E&eXi&ZVRu1ee` z1|*pKgcI_NdRG%oIW_PaILp>B5eif2gev_E+COuuHv@};^yvqXOB4DiA27cUZu8;k zxu(%-B(5Ys23~Pj8-k>+CT!Qcz#mN1Kk7D!9=kTMEqGGw^;W2y-+8po^KL;$MTw-2 zQ*X>^vwf}hDYh;ud>~H50nmgL^|lCzalbf^aYMNKn#Do|x?iN#{oIXxn*_DQ5~5P$ z0RNdWs%<{a+z#r2xL5F9PWKR#3z4QDt*n5R8|}K0EDYMY8Gm^gv80>9=fFWSRs{~= zQe<4PTt>mOz^5_PmhrUU1ntnIiGl#$?+>EOb;~|*IjrGWQP1SG6c)A?U#TRQ2@nur zYS73o^c+K{geF-4jo8db8*Va5hCzk{t{u;`LWX+6+2pT_?vh2wVb@_){U*?9uP2*O2T)3L~+(Y1zEaN*yuHeTi$r83LTGQ->cWKH0qQbo}EAeY#ksKXqw zEc$A}!_r{}3i`XKHzgM;pCljbAn*p4wOMF3a%e8MifxSi`T7;&t*uFl*yjGqWyjRB zEhS_$<$4q;;Y&4gm~u#2xpZ!tdSXh5lmxnQAC%shI`7GX<3oLvCM?gz^1MxHsv7b+ za(l_z+zhEe68yyO6vr}5^WN4h`|(INXJ;kH?l%0CLaqZc{rhgpj{8~L?G6gFaS?sN zfwZdyZ#h73>oJ*&V->I7G*^yLPPog26`X#*@^VUEOF`9u168F7t(E!6Wfo}LG+Udn zg8E_2$a1h-NY+vZ0rSjHZ;)fl`83d(Q?UZB^mbK_SxEx zb;yS&g=<^hZ!I(2{jj@!$zt^`bI1L;svsyp!r&To7{X=T7llD@U z<|rij*BF(Eh18w{Jz_+PPpKpW-`!Wl12_+XCPTq*3|bJy#fJ#SDer)oxe^XMBQe#E z8%{C!tWwEZlHKJgZs~P7w81&!!H z&*`cDq=?maTi<2B0c_e1&Pn|FT;x@F0a`aw1N6D%zFDeFQE?_`j?B-WpZGdGk6VdZ zRJ~d5eI%|ec)ZL9MIDpVJZGJ_@E!AllIa=9hcp7#H6gs>4*YpNSPmON(E6-q7pp4DNioR!s4{Ct^p7l&R0L;BOJ zCF(Y#xZgmlCdY1wNO(tC;Aw?BNr7*>41pl~=kUH8YgHbk7krgdu$8 zFwd>BL#lBV2XqerJ10MkD5;okfa;AK)c+$wUb@F0TBV*k7uLAO(wUVUoVOUD)+35P z5l=L`-!@jM9FzaZiY$sP`70{bPXa7{U^BO`JJ0nGK?bHZxJQ&X z=Hh04dR^HS-n!;9)&RW}r#yhB9Mp4QKw5Ad+&hHqza?>4HsomPBlP3x{&fjZ6|U>> z^2=1vQ;(++KxEvy6vRZz6Sh1n{?S_@suU#gt`!gl$S#~GqNAY5vfRH%HdR# zDM$giUcePQciKvXf|k@9@D|8x<)g$8q#Mv)06~qWewgd>e&xMr`*cUvitKlq1RKB@ zsqJnHY~N=9v9$ZX2XR;6w76$ofUlhoeIVY-sI@#EoI6z~*@FJuS}1nbzsJFCyyI!= z8|V;>z~8tP+B3P}e+mMtg24RR9Qc*Y$gMF~(r|l}_N8*#Ipj1;N5)({@dlgGd7eSk zZjxF{Vssnx5-hPrLceChydda_4E6+Wh_K8`uh;x3CQ6%47GyqA6sKB{)9IzH%EbX+ z3$kSQnNNi0NOzGJrmu_0YnLkxv#J=8pT8=A0)67`jq9CH7QEgCie*JZjAE~7q1WJ$ z-YT`?7KT6X#MxzS5=`L_9P(tQZ(LK-_jQcdqO?km0rsunkWM`lE2!&siq0)zA%Tl!9>W@Tvfzb z-YahRoPzj`@85xVAVy_XJ~eAFR2Fq&JTu3SS(v~?ONd_)zt3$UdF`IjFtkF`m(2+P znO*~trLP@P+ha{)XG40_qRWe`4S^6d4ij`54#>RHJh>WlaTgsGs$LCCobRD9-T5Hi zyK_3U%<(eG7-X3}?+#k`YBI`J+C!|1@e;u4lQwWD)7=fW(T`}`d++j1*J<}f=UUw& zN+_At7RN%1C8&d=)X*i9fpTCo$7^%qaoJ9H`$08YTB?it^xD{H$BIw*UUkzoxU9v( z!OcAwJNduj@o)Z=Z>ywLB_u1D#Keg8h}{*Rne)RX%y__O(bFK1FIp+jEE5o$vI&M^ zj>xjkzPEpgmtnK{Kx3v{+j~BOs5ods$a5}p`xF`w;H&LaOv$XZ6sG{LFL9&PvKW~| zfp+=U<1)TjqWcq8)fvm>xnOa15#BT`)_hB|a%W@RuY}m(S*e3`8ly-``$#CcG7V}R zU%dq^`0G)x#JJlw_QOGL_4Y~oDyH8y{fn-$k(hui`lVFRvFdzGc(~y8sHBFteaxHi z2#$<-;yWB4{Jf>MNcWzw*&_|9C%~0*JnvmjGT+1ATlYpIirwL%Rn|;fYhzTY3qL#v zLrW$_G=(mWZM%h_;~hChw8^c0eZLel?R%@Gr+0m-60?6azdPBa^>*uvuNIU+jzmzL zCwd;Uu!F^4*qT(J!=+V}lcNtlx`vM2TBLjvs#d9nryHIm`wv5B<)=pm*N2-82H9jz zj`b`Vs;%JipDSG78~|{t3{-nY>a@=G%G$4V*=>Btu}gy;NptwH(D*zU-QNh$jWqFKsRd z4Mh=Sn-n(!G>+&#m6zBLR1W5D1Eb=U_;HoU{~&wwEsycbo#l@80JQT?eUf~;3FA2o zl^Nq%amRG<*Sd?M;tRp>*EfX!k;j^&h=Gx$bvPyA3)}PJ0CTdz2fInwhtY8OvNW1G zQ>cd#ExDT-0qMw}VZXcLR&)E#d7oB+3B5>+plY<2YHggCILn)j8{%p^={syzK`lRC zPfc8RG4&i9A{PL-c_7t8cbqMXPB;Yed$_7}bArfqm{VA4McF8D4CY=wF!$G5e&nfG zV=&%-KD~^={n|{C*iPDFfL04y@z(NIVOG8KO-%LKYr2Se5@C--mUXP!zX+@A7k~1a zY2_EB_RJ}ayW_5`E-&*ba<^WDWIrS;LUjHR3N~d(W$94`0qUGCcLG%Z?B$i`OKn(a z{Cf=(vJJBxCVD2}z0-Ea>LqT+EXYZmrJoaxNwNP%NSdK56$Jd47+ySIr4PUNUk1Mc AC;$Ke literal 0 HcmV?d00001 diff --git a/docs/logo.svg b/docs/logo.svg new file mode 100644 index 000000000..bac0c391a --- /dev/null +++ b/docs/logo.svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/modules/mysql.md b/docs/modules/mysql.md new file mode 100644 index 000000000..a02f15c90 --- /dev/null +++ b/docs/modules/mysql.md @@ -0,0 +1,93 @@ +# MySQL + +Since testcontainers-go :material-tag: v0.20.0 + +## Introduction + +The Testcontainers module for MySQL. + +## Adding this module to your project dependencies + +Please run the following command to add the MySQL module to your Go dependencies: + +``` +go get github.com/testcontainers/testcontainers-go/modules/mysql +``` + +## Usage example + + + +[Creating a MySQL container](../../modules/mysql/tests/test_mysql.py) inside_block:runMySQLContainer + + + +## Module Reference + +### Run function + +- Since testcontainers-go :material-tag: v0.32.0 + +!!!info +The `RunContainer(ctx, opts...)` function is deprecated and will be removed in the next major release of _Testcontainers for Go_. + +The MySQL module exposes one entrypoint function to create the container, and this function receives three parameters: + +```golang +func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*MySQLContainer, error) +``` + +- `context.Context`, the Go context. +- `string`, the Docker image to use. +- `testcontainers.ContainerCustomizer`, a variadic argument for passing options. + +### Container Options + +When starting the MySQL container, you can pass options in a variadic way to configure it. + +!!!tip + + You can find all the available configuration and environment variables for the MySQL Docker image on [Docker Hub](https://hub.docker.com/_/mysql). + +#### Image + +Use the second argument in the `Run` function to set a valid Docker image. +In example: `Run(context.Background(), "mysql:8.0.36")`. + +{% include "../index.md" %} + +#### Set username, password and database name + +If you need to set a different database, and its credentials, you can use `WithUsername`, `WithPassword`, `WithDatabase` +options. + +!!!info +The default values for the username is `root`, for password is `test` and for the default database name is `test`. + +#### Init Scripts + +If you would like to perform DDL or DML operations in the MySQL container, add one or more `*.sql`, `*.sql.gz`, or `*.sh` +scripts to the container request, using the `WithScripts(scriptPaths ...string)`. Those files will be copied under `/docker-entrypoint-initdb.d`. + + + +[Example of Init script](../../modules/mysql/tests/seeds/01-schema.sql) + + + +#### Custom configuration + +If you need to set a custom configuration, you can use `WithConfigFile` option to pass the path to a custom configuration file. + +### Container Methods + +#### ConnectionString + +This method returns the connection string to connect to the MySQL container, using the default `3306` port. +It's possible to pass extra parameters to the connection string, e.g. `tls=skip-verify` or `application_name=myapp`, in a variadic way. + + + +[Get connection string](../../modules/mysql/tests/test_mysql.py) inside_block:connectionString + + diff --git a/docs/poetry.lock b/docs/poetry.lock new file mode 100644 index 000000000..bb4f10cd7 --- /dev/null +++ b/docs/poetry.lock @@ -0,0 +1,829 @@ +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. + +[[package]] +name = "babel" +version = "2.17.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, +] + +[package.extras] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] + +[[package]] +name = "backrefs" +version = "5.8" +description = "A wrapper around re and regex that adds additional back references." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "backrefs-5.8-py310-none-any.whl", hash = "sha256:c67f6638a34a5b8730812f5101376f9d41dc38c43f1fdc35cb54700f6ed4465d"}, + {file = "backrefs-5.8-py311-none-any.whl", hash = "sha256:2e1c15e4af0e12e45c8701bd5da0902d326b2e200cafcd25e49d9f06d44bb61b"}, + {file = "backrefs-5.8-py312-none-any.whl", hash = "sha256:bbef7169a33811080d67cdf1538c8289f76f0942ff971222a16034da88a73486"}, + {file = "backrefs-5.8-py313-none-any.whl", hash = "sha256:e3a63b073867dbefd0536425f43db618578528e3896fb77be7141328642a1585"}, + {file = "backrefs-5.8-py39-none-any.whl", hash = "sha256:a66851e4533fb5b371aa0628e1fee1af05135616b86140c9d787a2ffdf4b8fdc"}, + {file = "backrefs-5.8.tar.gz", hash = "sha256:2cab642a205ce966af3dd4b38ee36009b31fa9502a35fd61d59ccc116e40a6bd"}, +] + +[package.extras] +extras = ["regex"] + +[[package]] +name = "bracex" +version = "2.5.post1" +description = "Bash style brace expander." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "bracex-2.5.post1-py3-none-any.whl", hash = "sha256:13e5732fec27828d6af308628285ad358047cec36801598368cb28bc631dbaf6"}, + {file = "bracex-2.5.post1.tar.gz", hash = "sha256:12c50952415bfa773d2d9ccb8e79651b8cdb1f31a42f6091b804f6ba2b4a66b6"}, +] + +[[package]] +name = "certifi" +version = "2025.4.26" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, + {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, +] + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version == \"3.9\"" +files = [ + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "markdown" +version = "3.8" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, + {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.6.1" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, + {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-codeinclude-plugin" +version = "0.2.1" +description = "A plugin to include code snippets into mkdocs pages" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "mkdocs-codeinclude-plugin-0.2.1.tar.gz", hash = "sha256:305387f67a885f0e36ec1cf977324fe1fe50d31301147194b63631d0864601b1"}, + {file = "mkdocs_codeinclude_plugin-0.2.1-py3-none-any.whl", hash = "sha256:172a917c9b257fa62850b669336151f85d3cd40312b2b52520cbcceab557ea6c"}, +] + +[package.dependencies] +mkdocs = ">=1.2" +pygments = ">=2.9.0" + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-include-markdown-plugin" +version = "7.1.5" +description = "Mkdocs Markdown includer plugin." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "mkdocs_include_markdown_plugin-7.1.5-py3-none-any.whl", hash = "sha256:d0b96edee45e7fda5eb189e63331cfaf1bf1fbdbebbd08371f1daa77045d3ae9"}, + {file = "mkdocs_include_markdown_plugin-7.1.5.tar.gz", hash = "sha256:a986967594da6789226798e3c41c70bc17130fadb92b4313f42bd3defdac0adc"}, +] + +[package.dependencies] +mkdocs = ">=1.4" +wcmatch = "*" + +[package.extras] +cache = ["platformdirs"] + +[[package]] +name = "mkdocs-markdownextradata-plugin" +version = "0.2.6" +description = "A MkDocs plugin that injects the mkdocs.yml extra variables into the markdown template" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mkdocs_markdownextradata_plugin-0.2.6-py3-none-any.whl", hash = "sha256:34dd40870781784c75809596b2d8d879da783815b075336d541de1f150c94242"}, + {file = "mkdocs_markdownextradata_plugin-0.2.6.tar.gz", hash = "sha256:4aed9b43b8bec65b02598387426ca4809099ea5f5aa78bf114f3296fd46686b5"}, +] + +[package.dependencies] +mkdocs = "*" +pyyaml = "*" + +[[package]] +name = "mkdocs-material" +version = "9.6.13" +description = "Documentation that simply works" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_material-9.6.13-py3-none-any.whl", hash = "sha256:3730730314e065f422cc04eacbc8c6084530de90f4654a1482472283a38e30d3"}, + {file = "mkdocs_material-9.6.13.tar.gz", hash = "sha256:7bde7ebf33cfd687c1c86c08ed8f6470d9a5ba737bd89e7b3e5d9f94f8c72c16"}, +] + +[package.dependencies] +babel = ">=2.10,<3.0" +backrefs = ">=5.7.post1,<6.0" +colorama = ">=0.4,<1.0" +jinja2 = ">=3.1,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.6,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<3)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "paginate" +version = "0.5.7" +description = "Divides large result sets into pages for easier browsing" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591"}, + {file = "paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945"}, +] + +[package.extras] +dev = ["pytest", "tox"] +lint = ["black"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pygments" +version = "2.19.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymdown-extensions" +version = "10.15" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pymdown_extensions-10.15-py3-none-any.whl", hash = "sha256:46e99bb272612b0de3b7e7caf6da8dd5f4ca5212c0b273feb9304e236c484e5f"}, + {file = "pymdown_extensions-10.15.tar.gz", hash = "sha256:0e5994e32155f4b03504f939e501b981d306daf7ec2aa1cd2eb6bd300784f8f7"}, +] + +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.19.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "pyyaml-env-tag" +version = "1.0" +description = "A custom YAML tag for referencing environment variables in YAML files." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyyaml_env_tag-1.0-py3-none-any.whl", hash = "sha256:37f081041b8dca44ed8eb931ce0056f97de17251450f0ed08773dc2bcaf9e683"}, + {file = "pyyaml_env_tag-1.0.tar.gz", hash = "sha256:bc952534a872b583f66f916e2dd83e7a7b9087847f4afca6d9c957c48b258ed2"}, +] + +[package.dependencies] +pyyaml = "*" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "wcmatch" +version = "10.0" +description = "Wildcard/glob file name matcher." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wcmatch-10.0-py3-none-any.whl", hash = "sha256:0dd927072d03c0a6527a20d2e6ad5ba8d0380e60870c383bc533b71744df7b7a"}, + {file = "wcmatch-10.0.tar.gz", hash = "sha256:e72f0de09bba6a04e0de70937b0cf06e55f36f37b3deb422dfaf854b867b840a"}, +] + +[package.dependencies] +bracex = ">=2.1.1" + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version == \"3.9\"" +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.9,<4.0" +content-hash = "3f78e6a27c59513925f871a6a742fd5dc51049c5624a5dfe1377235cc2b7bda1" diff --git a/docs/pyproject.toml b/docs/pyproject.toml new file mode 100644 index 000000000..565d77714 --- /dev/null +++ b/docs/pyproject.toml @@ -0,0 +1,25 @@ +[tool.poetry] +name = "testcontainers-docs" +version = "0.1.0" +description = "Documentation site for testcontainers-python" +authors = ["Sergey Pirogov "] +maintainers = [ + "Balint Bartha ", + "David Ankin ", + "Vemund Santi ", +] + +[tool.poetry.dependencies] +python = ">=3.9,<4.0" +mkdocs = "^1.5.3" +mkdocs-material = "^9.5.0" +mkdocs-markdownextradata-plugin = "^0.2.6" +mkdocs-codeinclude-plugin = "^0.2.1" +mkdocs-include-markdown-plugin = "^7.1.5" + +[tool.poetry.requires-plugins] +poetry-plugin-export = ">=1.8" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..a46ce6fff --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,36 @@ +babel==2.17.0 ; python_version >= "3.9" and python_version < "4.0" +backrefs==5.8 ; python_version >= "3.9" and python_version < "4.0" +bracex==2.5.post1 ; python_version >= "3.9" and python_version < "4.0" +certifi==2025.4.26 ; python_version >= "3.9" and python_version < "4.0" +charset-normalizer==3.4.2 ; python_version >= "3.9" and python_version < "4.0" +click==8.1.8 ; python_version >= "3.9" and python_version < "4.0" +colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0" +ghp-import==2.1.0 ; python_version >= "3.9" and python_version < "4.0" +idna==3.10 ; python_version >= "3.9" and python_version < "4.0" +importlib-metadata==8.7.0 ; python_version == "3.9" +jinja2==3.1.6 ; python_version >= "3.9" and python_version < "4.0" +markdown==3.8 ; python_version >= "3.9" and python_version < "4.0" +markupsafe==3.0.2 ; python_version >= "3.9" and python_version < "4.0" +mergedeep==1.3.4 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-codeinclude-plugin==0.2.1 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-get-deps==0.2.0 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-include-markdown-plugin==7.1.5 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-markdownextradata-plugin==0.2.6 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-material-extensions==1.3.1 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-material==9.6.13 ; python_version >= "3.9" and python_version < "4.0" +mkdocs==1.6.1 ; python_version >= "3.9" and python_version < "4.0" +packaging==25.0 ; python_version >= "3.9" and python_version < "4.0" +paginate==0.5.7 ; python_version >= "3.9" and python_version < "4.0" +pathspec==0.12.1 ; python_version >= "3.9" and python_version < "4.0" +platformdirs==4.3.8 ; python_version >= "3.9" and python_version < "4.0" +pygments==2.19.1 ; python_version >= "3.9" and python_version < "4.0" +pymdown-extensions==10.15 ; python_version >= "3.9" and python_version < "4.0" +python-dateutil==2.9.0.post0 ; python_version >= "3.9" and python_version < "4.0" +pyyaml-env-tag==1.0 ; python_version >= "3.9" and python_version < "4.0" +pyyaml==6.0.2 ; python_version >= "3.9" and python_version < "4.0" +requests==2.32.3 ; python_version >= "3.9" and python_version < "4.0" +six==1.17.0 ; python_version >= "3.9" and python_version < "4.0" +urllib3==2.4.0 ; python_version >= "3.9" and python_version < "4.0" +watchdog==6.0.0 ; python_version >= "3.9" and python_version < "4.0" +wcmatch==10.0 ; python_version >= "3.9" and python_version < "4.0" +zipp==3.21.0 ; python_version == "3.9" diff --git a/docs/testcontainers-logo.svg b/docs/testcontainers-logo.svg new file mode 100644 index 000000000..cc5fb6188 --- /dev/null +++ b/docs/testcontainers-logo.svg @@ -0,0 +1,22 @@ + + + Testcontainers + + + + + + + + + + + + + + + + + + + diff --git a/docs/theme/main.html b/docs/theme/main.html new file mode 100644 index 000000000..1c0823892 --- /dev/null +++ b/docs/theme/main.html @@ -0,0 +1,10 @@ +{% extends "base.html" %} + +{% block analytics %} + +{% endblock %} + +{% block extrahead %} + + +{% endblock %} diff --git a/docs/theme/partials/header.html b/docs/theme/partials/header.html new file mode 100644 index 000000000..2c59cbb3d --- /dev/null +++ b/docs/theme/partials/header.html @@ -0,0 +1,140 @@ + + + +{% set class = "md-header" %} {% if "navigation.tabs.sticky" in features %} {% +set class = class ~ " md-header--shadow md-header--lifted" %} {% elif +"navigation.tabs" not in features %} {% set class = class ~ " md-header--shadow" +%} {% endif %} {% include "partials/tc-header.html" %} + + +
    + + + + {% if "navigation.tabs.sticky" in features %} {% if "navigation.tabs" in + features %} {% include "partials/tabs.html" %} {% endif %} {% endif %} +
    diff --git a/docs/theme/partials/nav.html b/docs/theme/partials/nav.html new file mode 100644 index 000000000..90dcdc2ef --- /dev/null +++ b/docs/theme/partials/nav.html @@ -0,0 +1,79 @@ + + + +{% import "partials/nav-item.html" as item with context %} +{% set class = "md-nav md-nav--primary" %} +{% if "navigation.tabs" in features %} +{% set class = class ~ " md-nav--lifted" %} +{% endif %} +{% if "toc.integrate" in features %} +{% set class = class ~ " md-nav--integrated" %} +{% endif %} + + + diff --git a/docs/theme/partials/tc-header.html b/docs/theme/partials/tc-header.html new file mode 100644 index 000000000..246e9ff52 --- /dev/null +++ b/docs/theme/partials/tc-header.html @@ -0,0 +1,157 @@ +{% set header = ({ + "siteUrl": "https://testcontainers.com/", + "menuItems": [ + { + "label": "Desktop NEW", + "url": "https://testcontainers.com/desktop/" + }, + { + "label": "Cloud", + "url": "https://testcontainers.com/cloud/" + }, + { + "label": "Getting Started", + "url": "https://testcontainers.com/getting-started/" + }, + { + "label": "Guides", + "url": "https://testcontainers.com/guides/" + }, + { + "label": "Modules", + "url": "https://testcontainers.com/modules/" + }, + { + "label": "Docs", + "children": [ + { + "label": "Testcontainers for Java", + "url": "https://java.testcontainers.org/", + "image": "/language-logos/java.svg", + }, + { + "label": "Testcontainers for Go", + "url": "https://golang.testcontainers.org/", + "image": "/language-logos/go.svg", + }, + { + "label": "Testcontainers for .NET", + "url": "https://dotnet.testcontainers.org/", + "image": "/language-logos/dotnet.svg", + }, + { + "label": "Testcontainers for Node.js", + "url": "https://node.testcontainers.org/", + "image": "/language-logos/nodejs.svg", + }, + { + "label": "Testcontainers for Python", + "url": "https://testcontainers-python.readthedocs.io/en/latest/", + "image": "/language-logos/python.svg", + "external": true, + }, + { + "label": "Testcontainers for Rust", + "url": "https://docs.rs/testcontainers/latest/testcontainers/", + "image": "/language-logos/rust.svg", + "external": true, + }, + { + "label": "Testcontainers for Haskell", + "url": "https://github.com/testcontainers/testcontainers-hs", + "image": "/language-logos/haskell.svg", + "external": true, + }, + { + "label": "Testcontainers for Ruby", + "url": "https://github.com/testcontainers/testcontainers-ruby", + "image": "/language-logos/ruby.svg", + "external": true, + }, + ] + }, + { + "label": "Slack", + "url": "https://slack.testcontainers.org/", + "icon": "icon-slack", + }, + { + "label": "GitHub", + "url": "https://github.com/testcontainers", + "icon": "icon-github", + }, + ] +}) %} + + + + + + + + + + + diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..171274109 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,48 @@ +site_name: Testcontainers for Python +site_url: https://python.testcontainers.org +plugins: + - search + - codeinclude + - include-markdown + - markdownextradata +theme: + name: material + custom_dir: docs/theme + palette: + scheme: testcontainers + font: + text: Roboto + code: Roboto Mono + logo: logo.svg + favicon: favicon.ico +extra_css: + - "css/extra.css" + - "css/tc-header.css" +repo_name: "testcontainers-python" +repo_url: "https://github.com/testcontainers/testcontainers-python" +markdown_extensions: + - admonition + - codehilite: + linenums: false + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.snippets + - toc: + permalink: true + - attr_list + - pymdownx.emoji: + emoji_generator: !!python/name:material.extensions.emoji.to_svg + emoji_index: !!python/name:material.extensions.emoji.twemoji +nav: + - Home: index.md + - Features: + - features/creating_container.md + - Modules: + - modules/mysql.md + - Contributing: contributing.md + - Contributing Docs: contributing_docs.md + - Getting help: getting_help.md +edit_uri: edit/main/docs/ +extra: + latest_version: 4.10.0 diff --git a/poetry.lock b/poetry.lock index 653a13e2d..5a892b169 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. [[package]] name = "alabaster" diff --git a/pyproject.toml b/pyproject.toml index 219a1e171..360fe31b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,12 @@ [tool.poetry] name = "testcontainers" -version = "4.10.0" # auto-incremented by release-please +version = "4.10.0" # auto-incremented by release-please description = "Python library for throwaway instances of anything that can run in a Docker container" authors = ["Sergey Pirogov "] maintainers = [ "Balint Bartha ", "David Ankin ", - "Vemund Santi " + "Vemund Santi ", ] readme = "README.md" keywords = ["testing", "logging", "docker", "test automation"] @@ -29,7 +29,7 @@ classifiers = [ packages = [ { include = "testcontainers", from = "core" }, { include = "testcontainers", from = "modules/arangodb" }, - { include = "testcontainers", from = "modules/aws"}, + { include = "testcontainers", from = "modules/aws" }, { include = "testcontainers", from = "modules/azurite" }, { include = "testcontainers", from = "modules/cassandra" }, { include = "testcontainers", from = "modules/chroma" }, @@ -39,7 +39,7 @@ packages = [ { include = "testcontainers", from = "modules/db2" }, { include = "testcontainers", from = "modules/elasticsearch" }, { include = "testcontainers", from = "modules/generic" }, - { include = "testcontainers", from = "modules/test_module_import"}, + { include = "testcontainers", from = "modules/test_module_import" }, { include = "testcontainers", from = "modules/google" }, { include = "testcontainers", from = "modules/influxdb" }, { include = "testcontainers", from = "modules/k3s" }, @@ -79,9 +79,9 @@ packages = [ [tool.poetry.dependencies] python = ">=3.9,<4.0" -docker = "*" # ">=4.0" -urllib3 = "*" # "<2.0" -wrapt = "*" # "^1.16.0" +docker = "*" # ">=4.0" +urllib3 = "*" # "<2.0" +wrapt = "*" # "^1.16.0" typing-extensions = "*" python-dotenv = "*" @@ -130,7 +130,10 @@ cosmosdb = ["azure-cosmos"] cockroachdb = [] db2 = ["sqlalchemy", "ibm_db_sa"] elasticsearch = [] -generic = ["httpx", "redis"] # The advance doctests for ServerContainer require redis +generic = [ + "httpx", + "redis", +] # The advance doctests for ServerContainer require redis test_module_import = ["httpx"] google = ["google-cloud-pubsub", "google-cloud-datastore"] influxdb = ["influxdb", "influxdb-client"] @@ -204,19 +207,17 @@ addopts = "--tb=short --strict-markers" log_cli = true log_cli_level = "INFO" markers = [ - "inside_docker_check: mark test to be used to validate DinD/DooD is working as expected" + "inside_docker_check: mark test to be used to validate DinD/DooD is working as expected", ] [tool.coverage.run] branch = true -omit = [ - "oracle.py" -] +omit = ["oracle.py"] [tool.coverage.report] exclude_lines = [ "pass", - "raise NotImplementedError" # TODO: used in core/generic.py, not sure we need DbContainer + "raise NotImplementedError", # TODO: used in core/generic.py, not sure we need DbContainer ] [tool.ruff] @@ -254,7 +255,8 @@ select = [ # mccabe "C90", # pycodestyle - "E", "W", + "E", + "W", # pyflakes "F", # pygrep-hooks @@ -272,7 +274,7 @@ ignore = [ # the must-have __init__.py (we are using package namespaces) "INP001", # we do have some imports shadowing builtins - "A004" + "A004", ] [tool.ruff.lint.pyupgrade] @@ -293,47 +295,41 @@ strict = true modules = ["testcontainers.core"] mypy_path = [ "core", -# "modules/arangodb", -# "modules/azurite", -# "modules/cassandra", -# "modules/clickhouse", -# "modules/elasticsearch", -# "modules/google", -# "modules/k3s", -# "modules/kafka", -# "modules/keycloak", -# "modules/localstack", + # "modules/arangodb", + # "modules/azurite", + # "modules/cassandra", + # "modules/clickhouse", + # "modules/elasticsearch", + # "modules/google", + # "modules/k3s", + # "modules/kafka", + # "modules/keycloak", + # "modules/localstack", "modules/mailpit", -# "modules/minio", -# "modules/mongodb", -# "modules/mssql", -# "modules/mysql", -# "modules/neo4j", -# "modules/nginx", -# "modules/ollama", -# "modules/opensearch", -# "modules/oracle", -# "modules/postgres", -# "modules/rabbitmq", -# "modules/redis", -# "modules/selenium" + # "modules/minio", + # "modules/mongodb", + # "modules/mssql", + # "modules/mysql", + # "modules/neo4j", + # "modules/nginx", + # "modules/ollama", + # "modules/opensearch", + # "modules/oracle", + # "modules/postgres", + # "modules/rabbitmq", + # "modules/redis", + # "modules/selenium" "modules/sftp", -# "modules/vault" -# "modules/weaviate" -] -enable_error_code = [ - "ignore-without-code", - "redundant-expr", - "truthy-bool", + # "modules/vault" + # "modules/weaviate" ] +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] [[tool.mypy.overrides]] module = ['tests.*'] # in pytest we allow fixtures to be more relaxed, though we check the untyped functions check_untyped_defs = true -disable_error_code = [ - 'no-untyped-def' -] +disable_error_code = ['no-untyped-def'] [[tool.mypy.overrides]] module = ['docker.*'] From c148984fa04e093469d7bd8369abb40378c0f35c Mon Sep 17 00:00:00 2001 From: Terry Date: Wed, 14 May 2025 16:07:33 -0300 Subject: [PATCH 07/16] docs: generating module docs, fixing build settings, and working on the standard docs pages for python --- Makefile | 17 +- docs/contributing.md | 119 +++++++----- docs/contributing_docs.md | 109 ----------- docs/css/extra.css | 8 + docs/features/configuration.md | 29 +++ docs/features/creating_container.md | 75 +++++++- docs/getting_help.md | 6 +- docs/index.md | 26 ++- docs/modules/arangodb.md | 41 ++++ docs/modules/aws.md | 23 +++ docs/modules/azurite.md | 23 +++ docs/modules/cassandra.md | 23 +++ docs/modules/chroma.md | 43 +++++ docs/modules/clickhouse.md | 44 +++++ docs/modules/cockroachdb.md | 44 +++++ docs/modules/cosmosdb.md | 43 +++++ docs/modules/db2.md | 43 +++++ docs/modules/elasticsearch.md | 23 +++ docs/modules/generic.md | 23 +++ docs/modules/google.md | 23 +++ docs/modules/influxdb.md | 17 ++ docs/modules/k3s.md | 23 +++ docs/modules/kafka.md | 23 +++ docs/modules/keycloak.md | 23 +++ docs/modules/localstack.md | 23 +++ docs/modules/mailpit.md | 23 +++ docs/modules/memcached.md | 23 +++ docs/modules/milvus.md | 23 +++ docs/modules/minio.md | 23 +++ docs/modules/mongodb.md | 23 +++ docs/modules/mqtt.md | 23 +++ docs/modules/mssql.md | 23 +++ docs/modules/mssql/example_basic.py | 50 +++++ docs/modules/mysql.md | 80 +------- docs/modules/nats.md | 23 +++ docs/modules/neo4j.md | 23 +++ docs/modules/nginx.md | 23 +++ docs/modules/ollama.md | 23 +++ docs/modules/opensearch.md | 23 +++ docs/modules/oracle-free.md | 23 +++ docs/modules/postgres.md | 23 +++ docs/modules/qdrant.md | 23 +++ docs/modules/rabbitmq.md | 23 +++ docs/modules/redis.md | 23 +++ docs/modules/registry.md | 23 +++ docs/modules/scylla.md | 23 +++ docs/modules/selenium.md | 23 +++ docs/modules/sftp.md | 23 +++ docs/modules/test_module_import.md | 23 +++ docs/modules/trino.md | 23 +++ docs/modules/vault.md | 23 +++ docs/modules/weaviate.md | 23 +++ docs/quickstart.md | 116 ++++++++++++ docs/system_requirements/docker.md | 11 ++ docs/system_requirements/index.md | 183 ++++++++++++++++++ mkdocs.yml | 53 +++++- modules/arangodb/example_basic.py | 91 +++++++++ modules/aws/example_basic.py | 117 ++++++++++++ modules/azurite/example_basic.py | 73 ++++++++ modules/cassandra/example_basic.py | 153 +++++++++++++++ modules/chroma/example_basic.py | 65 +++++++ modules/clickhouse/example_basic.py | 76 ++++++++ modules/cockroachdb/example_basic.py | 90 +++++++++ modules/cosmosdb/example_basic.py | 75 ++++++++ modules/db2/example_basic.py | 89 +++++++++ modules/elasticsearch/example_basic.py | 105 +++++++++++ modules/generic/example_basic.py | 115 ++++++++++++ modules/google/example_basic.py | 127 +++++++++++++ modules/influxdb/example_basic.py | 192 +++++++++++++++++++ modules/k3s/example_basic.py | 179 ++++++++++++++++++ modules/kafka/example_basic.py | 80 ++++++++ modules/keycloak/example_basic.py | 171 +++++++++++++++++ modules/localstack/example_basic.py | 72 +++++++ modules/mailpit/example_basic.py | 62 ++++++ modules/memcached/example_basic.py | 135 +++++++++++++ modules/milvus/example_basic.py | 138 ++++++++++++++ modules/minio/example_basic.py | 120 ++++++++++++ modules/mongodb/example_basic.py | 85 +++++++++ modules/mqtt/example_basic.py | 51 +++++ modules/mssql/example_basic.py | 164 ++++++++++++++++ modules/mysql/example_basic.py | 16 ++ modules/nats/example_basic.py | 152 +++++++++++++++ modules/neo4j/example_basic.py | 198 ++++++++++++++++++++ modules/nginx/example_basic.py | 116 ++++++++++++ modules/ollama/example_basic.py | 50 +++++ modules/opensearch/example_basic.py | 0 modules/oracle-free/example_basic.py | 140 ++++++++++++++ modules/postgres/example_basic.py | 99 ++++++++++ modules/qdrant/example_basic.py | 149 +++++++++++++++ modules/rabbitmq/example_basic.py | 98 ++++++++++ modules/redis/example_basic.py | 84 +++++++++ modules/registry/example_basic.py | 92 +++++++++ modules/scylla/example_basic.py | 153 +++++++++++++++ modules/selenium/example_basic.py | 49 +++++ modules/sftp/example_basic.py | 137 ++++++++++++++ modules/test_module_import/example_basic.py | 144 ++++++++++++++ modules/trino/example_basic.py | 66 +++++++ modules/vault/example_basic.py | 75 ++++++++ modules/weaviate/example_basic.py | 143 ++++++++++++++ pyproject.toml | 3 + 100 files changed, 6292 insertions(+), 259 deletions(-) delete mode 100644 docs/contributing_docs.md create mode 100644 docs/features/configuration.md create mode 100644 docs/modules/arangodb.md create mode 100644 docs/modules/aws.md create mode 100644 docs/modules/azurite.md create mode 100644 docs/modules/cassandra.md create mode 100644 docs/modules/chroma.md create mode 100644 docs/modules/clickhouse.md create mode 100644 docs/modules/cockroachdb.md create mode 100644 docs/modules/cosmosdb.md create mode 100644 docs/modules/db2.md create mode 100644 docs/modules/elasticsearch.md create mode 100644 docs/modules/generic.md create mode 100644 docs/modules/google.md create mode 100644 docs/modules/influxdb.md create mode 100644 docs/modules/k3s.md create mode 100644 docs/modules/kafka.md create mode 100644 docs/modules/keycloak.md create mode 100644 docs/modules/localstack.md create mode 100644 docs/modules/mailpit.md create mode 100644 docs/modules/memcached.md create mode 100644 docs/modules/milvus.md create mode 100644 docs/modules/minio.md create mode 100644 docs/modules/mongodb.md create mode 100644 docs/modules/mqtt.md create mode 100644 docs/modules/mssql.md create mode 100644 docs/modules/mssql/example_basic.py create mode 100644 docs/modules/nats.md create mode 100644 docs/modules/neo4j.md create mode 100644 docs/modules/nginx.md create mode 100644 docs/modules/ollama.md create mode 100644 docs/modules/opensearch.md create mode 100644 docs/modules/oracle-free.md create mode 100644 docs/modules/postgres.md create mode 100644 docs/modules/qdrant.md create mode 100644 docs/modules/rabbitmq.md create mode 100644 docs/modules/redis.md create mode 100644 docs/modules/registry.md create mode 100644 docs/modules/scylla.md create mode 100644 docs/modules/selenium.md create mode 100644 docs/modules/sftp.md create mode 100644 docs/modules/test_module_import.md create mode 100644 docs/modules/trino.md create mode 100644 docs/modules/vault.md create mode 100644 docs/modules/weaviate.md create mode 100644 docs/quickstart.md create mode 100644 docs/system_requirements/docker.md create mode 100644 docs/system_requirements/index.md create mode 100644 modules/arangodb/example_basic.py create mode 100644 modules/aws/example_basic.py create mode 100644 modules/azurite/example_basic.py create mode 100644 modules/cassandra/example_basic.py create mode 100644 modules/chroma/example_basic.py create mode 100644 modules/clickhouse/example_basic.py create mode 100644 modules/cockroachdb/example_basic.py create mode 100644 modules/cosmosdb/example_basic.py create mode 100644 modules/db2/example_basic.py create mode 100644 modules/elasticsearch/example_basic.py create mode 100644 modules/generic/example_basic.py create mode 100644 modules/google/example_basic.py create mode 100644 modules/influxdb/example_basic.py create mode 100644 modules/k3s/example_basic.py create mode 100644 modules/kafka/example_basic.py create mode 100644 modules/keycloak/example_basic.py create mode 100644 modules/localstack/example_basic.py create mode 100644 modules/mailpit/example_basic.py create mode 100644 modules/memcached/example_basic.py create mode 100644 modules/milvus/example_basic.py create mode 100644 modules/minio/example_basic.py create mode 100644 modules/mongodb/example_basic.py create mode 100644 modules/mqtt/example_basic.py create mode 100644 modules/mssql/example_basic.py create mode 100644 modules/mysql/example_basic.py create mode 100644 modules/nats/example_basic.py create mode 100644 modules/neo4j/example_basic.py create mode 100644 modules/nginx/example_basic.py create mode 100644 modules/ollama/example_basic.py create mode 100644 modules/opensearch/example_basic.py create mode 100644 modules/oracle-free/example_basic.py create mode 100644 modules/postgres/example_basic.py create mode 100644 modules/qdrant/example_basic.py create mode 100644 modules/rabbitmq/example_basic.py create mode 100644 modules/redis/example_basic.py create mode 100644 modules/registry/example_basic.py create mode 100644 modules/scylla/example_basic.py create mode 100644 modules/selenium/example_basic.py create mode 100644 modules/sftp/example_basic.py create mode 100644 modules/test_module_import/example_basic.py create mode 100644 modules/trino/example_basic.py create mode 100644 modules/vault/example_basic.py create mode 100644 modules/weaviate/example_basic.py diff --git a/Makefile b/Makefile index e4b0241ce..855a9d9c3 100644 --- a/Makefile +++ b/Makefile @@ -78,8 +78,12 @@ DOCS_DOCKERFILE := Dockerfile.docs .PHONY: clean-docs clean-docs: @echo "Destroying docs" - docker rm -f $(DOCS_CONTAINER) || true - docker rmi $(DOCS_IMAGE) || true + @if docker ps -a --format '{{.Names}}' | grep -q '^$(DOCS_CONTAINER)$$'; then \ + docker rm -f $(DOCS_CONTAINER); \ + fi + @if docker images -q $(DOCS_IMAGE) | grep -q .; then \ + docker rmi $(DOCS_IMAGE); \ + fi .PHONY: docs-ensure-image docs-ensure-image: @@ -96,15 +100,6 @@ serve-docs: docs-ensure-image cd docs && poetry install --no-root && \ poetry run mkdocs serve -f ../mkdocs.yml -a 0.0.0.0:8000" -.PHONY: watch-docs -watch-docs: docs-ensure-image - docker run --rm --name $(DOCS_CONTAINER) -it -p 8000:8000 \ - -v $(PWD):/testcontainers-go \ - -w /testcontainers-go \ - $(DOCS_IMAGE) bash -c "\ - cd docs && poetry install --no-root && \ - poetry run mkdocs serve -f ../mkdocs.yml -a 0.0.0.0:8000" --live-reload - # Needed if dependencies are added to the docs site .PHONY: export-docs-deps export-docs-deps: diff --git a/docs/contributing.md b/docs/contributing.md index bf5a3d639..2e5a27c2a 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,71 +1,96 @@ -# Contributing +# Contributing to `testcontainers-python` -`Testcontainers for Go` is open source, and we love to receive contributions from our community — you! +Welcome to the `testcontainers-python` community! +This should give you an idea about how we build, test and release `testcontainers-python`! -There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests, or writing code for the core library or for a technology module. +Highly recommended to read this document thoroughly to understand what we're working on right now +and what our priorities are before you are trying to contribute something. -In any case, if you like the project, please star the project on [GitHub](https://github.com/testcontainers/testcontainers-go/stargazers) and help spread the word :) -Also join our [Slack workspace](http://slack.testcontainers.org) to get help, share your ideas, and chat with the community. +This will greatly increase your chances of getting prompt replies as the maintainers are volunteers themselves. -## Questions +## Before you begin -GitHub is reserved for bug reports and feature requests; it is not the place for general questions. -If you have a question or an unconfirmed bug, please visit our [Slack workspace](https://testcontainers.slack.com/); -feedback and ideas are always welcome. +We recommend following these steps: -## Code contributions +1. Finish reading this document. +2. Read the [recently updated issues](https://github.com/testcontainers/testcontainers-python/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc){:target="\_blank"} +3. Look for existing issues on the subject you are interested in - we do our best to label everything correctly -If you have a bug fix or new feature that you would like to contribute, please find or open an [issue](https://github.com/testcontainers/testcontainers-go/issues) first. -It's important to talk about what you would like to do, as there may already be someone working on it, -or there may be context to be aware of before implementing the change. +## Local development -Next would be to fork the repository and make your changes in a feature branch. **Please do not commit changes to the `main` branch**, -otherwise we won't be able to contribute to your changes directly in the PR. +### Pre-Requisites -### Submitting your changes +You need to have the following tools available to you: -Please just be sure to: +- `make` - You'll need a GNU Make for common developer activities +- `poetry` - This is the primary package manager for the project +- `pyenv` **Recommended**: For installing python versions for your system. + Poetry infers the current latest version from what it can find on the `PATH` so you are still fine if you don't use `pyenv`. -- follow the style, naming and structure conventions of the rest of the project. -- make commits atomic and easy to merge. -- use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) for the PR title. This will help us to understand the nature of the changes, and to generate the changelog after all the commits in the PR are squashed. - - Please use the `feat!`, `chore!`, `fix!`... types for breaking changes, as these categories are considered as `breaking change` in the changelog. Please use the `!` to denote a breaking change. - - Please use the `security` type for security fixes, as these categories are considered as `security` in the changelog. - - Please use the `feat` type for new features, as these categories are considered as `feature` in the changelog. - - Please use the `fix` type for bug fixes, as these categories are considered as `bug` in the changelog. - - Please use the `docs` type for documentation updates, as these categories are considered as `documentation` in the changelog. - - Please use the `chore` type for housekeeping commits, including `build`, `ci`, `style`, `refactor`, `test`, `perf` and so on, as these categories are considered as `chore` in the changelog. - - Please use the `deps` type for dependency updates, as these categories are considered as `dependencies` in the changelog. +### Build and test -!!!important -There is a GitHub Actions workflow that will check if your PR title follows the conventional commits convention. If not, it contributes a failed check to your PR. -To know more about the conventions, please refer to the [workflow file](https://github.com/testcontainers/testcontainers-go/blob/main/.github/workflows/conventions.yml). +- Run `make install` to get `poetry` to install all dependencies and set up `pre-commit` + - **Recommended**: Run `make` or `make help` to see other commands available to you. +- After this, you should have a working virtual environment and proceed with writing code with your favorite IDE +- **TIP**: You can run `make core/tests` or `make modules//tests` to run the tests specifically for that to speed up feedback cycles +- You can also run `make lint` to run the `pre-commit` for the entire codebase. -- use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) for your commit messages, as it improves the readability of the commit history, and the review process. Please follow the above conventions for the PR title. -- unless necessary, please try to **avoid pushing --force** to the published branch you submitted a PR from, as it makes it harder to review the changes from a given previous state. -- apply format running `make lint-all`. It will run `golangci-lint` for the core and modules with the configuration set in the root directory of the project. Please be aware that the lint stage on CI could fail if this is not done. - - For linting just the modules: `make -C modules lint-modules` - - For linting just the examples: `make -C examples lint-examples` - - For linting just the modulegen: `make -C modulegen lint` -- verify all tests are passing. Build and test the project with `make test-all` to do this. - _ For a given module or example, go to the module or example directory and run `make test`. - _ If you find an `ld warning` message on MacOS, you can ignore it. It is indeed a warning: https://github.com/golang/go/issues/61229 +## Adding new modules - > === Errors - > ld: warning: '/private/var/folders/3y/8hbf585d4yl6f8j5yzqx6wz80000gn/T/go-link-2319589277/000018.o' has malformed LC_DYSYMTAB, expected 98 undefined symbols to start at index 1626, found 95 undefined symbols starting at index 1626 +We have an [issue template](https://github.com/testcontainers/testcontainers-python/blob/main/.github/ISSUE_TEMPLATE/new-container.md){:target="\_blank"} for adding new module containers, please refer to that for more information. +Once you've talked to the maintainers (we do our best to reply!) then you can proceed with contributing the new container. -- when updating the `go.mod` file, please run `make tidy-all` to ensure all modules are updated. +!!!WARNING + + Please raise an issue before you try to contribute a new container! It helps maintainersunderstand your use-case and motivation. + This way we can keep pull requests forced on the "how", not the "why"! :pray: + It also gives maintainers a chance to give you last-minute guidance on caveats orexpectations, particularly with + new extra dependencies and how to manage them. + +### Module documentation + +Leave examples for others with your mew module such as `modules//basic_example.py`. You can create as many examples as you want. + +Create a new `docs/modules/.md` describing the basic use of the new container. There is a [starter template provided here](https://raw.githubusercontent.com/testcontainers/testcontainers-python/blob/main/docs/modules/template.md){:target="\_blank"}. + +!!! important + + Make sure to add your new module to the sidebar nav in the `mkdocs.yml` + +## Raising issues + +We have [Issue Templates](https://raw.githubusercontent.com/testcontainers/testcontainers-python/refs/heads/main/.github/ISSUE_TEMPLATE/new-container.md){:target="\_blank"} to cover most cases, please try to adhere to them, they will guide you through the process. +Try to look through the existing issues before you raise a new one. + +## Releasing versions + +We have automated Semantic Versioning and release via [release-please](https://github.com/testcontainers/testcontainers-python/blob/main/.github/workflows/release-please.yml){:target="\_blank"}. +This takes care of: + +- Detecting the next version, based on the commits that landed on `main` +- When a Release PR has been merged + - Create a GitHub Release with the CHANGELOG included + - Update the [CHANGELOG](https://github.com/testcontainers/testcontainers-python/blob/main/CHANGELOG.md){:target="\_blank"}, similar to the GitHub Release + - Release to PyPI via a [trusted publisher](https://docs.pypi.org/trusted-publishers/using-a-publisher/){:target="\_blank"} + - Automatically script updates in files where it's needed instead of hand-crafting it (i.e. in `pyproject.toml`) + +!!!DANGER + + Community modules are supported on a best-effort basis and for maintenance reasons, any change to them + is only covered under minor and patch changes. + Community modules changes DO NOT contribute to major version changes! + If your community module container was broken by a minor or patch version change, check out the change logs! ## Documentation contributions -The _Testcontainers for Go_ documentation is a static site built with [MkDocs](https://www.mkdocs.org/). -We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme, which offers a number of useful extensions to MkDocs. +The _Testcontainers for Go_ documentation is a static site built with [MkDocs](https://www.mkdocs.org/){:target="\_blank"}. +We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/){:target="\_blank"} theme, which offers a number of useful extensions to MkDocs. We publish our documentation using Netlify. ### Adding code snippets -To include code snippets in the documentation, we use the [codeinclude plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin), which uses the following syntax: +To include code snippets in the documentation, we use the [codeinclude plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin){:target="\_blank"}, which uses the following syntax: > <!--codeinclude-->
    > [Human readable title for snippet](./relative_path_to_example_code.go) targeting_expression
    > [Human readable title for snippet](./relative_path_to_example_code.go) targeting_expression
    > <!--/codeinclude-->
    @@ -75,7 +100,7 @@ in the snippet, and each `targeting_expression` would be: - `block:someString` or - `inside_block:someString` -Please refer to the [codeinclude plugin documentation](https://github.com/rnorth/mkdocs-codeinclude-plugin) for more information. +Please refer to the [codeinclude plugin documentation](https://github.com/rnorth/mkdocs-codeinclude-plugin){:target="\_blank"} for more information. ### Previewing rendered content @@ -93,7 +118,7 @@ Once finished, you can destroy the container with the following command: make clean-docs ``` -### PR Preview deployments +### PR preview deployments Note that documentation for pull requests will automatically be published by Netlify as 'deploy previews'. These deployment previews can be accessed via the `deploy/netlify` check that appears for each pull request. diff --git a/docs/contributing_docs.md b/docs/contributing_docs.md deleted file mode 100644 index 343e683ff..000000000 --- a/docs/contributing_docs.md +++ /dev/null @@ -1,109 +0,0 @@ -# Contributing to documentation - -The Testcontainers for Java documentation is a static site built with [MkDocs](https://www.mkdocs.org/). -We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme, which offers a number of useful extensions to MkDocs. - -In addition we use a [custom plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin) for inclusion of code snippets. - -We publish our documentation using Netlify. - -## Previewing rendered content - -### Using Docker locally - -The root of the project contains a `docker-compose.yml` file. Simply run `docker-compose up` and then access the docs at [http://localhost:8000](http://localhost:8000). - -### Using Python locally - -* Ensure that you have Python 3.8.0 or higher. -* Set up a virtualenv and run `pip install -r requirements.txt` in the `testcontainers-java` root directory. -* Once Python dependencies have been installed, run `mkdocs serve` to start a local auto-updating MkDocs server. - -### PR Preview deployments - -Note that documentation for pull requests will automatically be published by Netlify as 'deploy previews'. -These deployment previews can be accessed via the `deploy/netlify` check that appears for each pull request. - -## Codeincludes - -The Gradle project under `docs/examples` is intended to hold compilable, runnable example code that can be included as -snippets into the documentation at build-time. - -As a result, we can have more confidence that code samples shown in the documentation is valid. - -We use a custom plugin for MkDocs to include snippets into our docs. - -A codeinclude block will resemble a regular markdown link surrounded by a pair of XML comments, e.g.: - - - -
    <!--codeinclude-->
    -[Human readable title for snippet](./relative_path_to_example_code.java) targeting_expression
    -<!--/codeinclude-->
    -
    - -Where `targeting_expression` could be: - -* `block:someString` or -* `inside_block:someString` - -If these are provided, the macro will seek out any line containing the token `someString` and grab the next curly brace -delimited block that it finds. `block` will grab the starting line and closing brace, whereas `inside_block` will omit -these. - -e.g., given: -```java - -public class FooService { - - public void doFoo() { - foo.doSomething(); - } - - ... - -``` - -If we use `block:doFoo` as our targeting expression, we will have the following content included into our page: - -```java -public void doFoo() { - foo.doSomething(); -} -``` - -Whereas using `inside_block:doFoo` we would just have the inner content of the method included: - -```java -foo.doSomething(); -``` - -Note that: - -* Any code included will have its indentation reduced -* Every line in the source file will be searched for an instance of the token (e.g. `doFoo`). If more than one line - includes that token, then potentially more than one block could be targeted for inclusion. It is advisable to use a - specific, unique token to avoid unexpected behaviour. - -When we wish to include a section of code that does not naturally appear within braces, we can simply insert our token, -with matching braces, in a comment. -While a little ugly, this has the benefit of working in any context and is easy to understand. -For example: - -```java -public class FooService { - - public void boringMethod() { - doSomethingBoring(); - - // doFoo { - doTheThingThatWeActuallyWantToShow(); - // } - } - - -``` diff --git a/docs/css/extra.css b/docs/css/extra.css index 04eb018a0..4c700dac4 100644 --- a/docs/css/extra.css +++ b/docs/css/extra.css @@ -126,3 +126,11 @@ body .card-grid-item:focus { height: 1.75em; } } + +.md-typeset__table { + min-width: 100%; + } + + .md-typeset table:not([class]) { + display: table; +} diff --git a/docs/features/configuration.md b/docs/features/configuration.md new file mode 100644 index 000000000..b2a711dd0 --- /dev/null +++ b/docs/features/configuration.md @@ -0,0 +1,29 @@ +# Custom configuration + +..... + +## Docker host detection + +_Testcontainers for Go_ will attempt to detect the Docker environment and configure everything to work automatically. + +However, sometimes customization is required. _Testcontainers for Go_ will respect the following order: + +1. Read the **tc.host** property in the `~/.testcontainers.properties` file. E.g. `tc.host=tcp://my.docker.host:1234` + +2. Read the **DOCKER_HOST** environment variable. E.g. `DOCKER_HOST=unix:///var/run/docker.sock` + See [Docker environment variables](https://docs.docker.com/engine/reference/commandline/cli/#environment-variables) for more information. + +3. Read the Go context for the **DOCKER_HOST** key. E.g. `ctx.Value("DOCKER_HOST")`. This is used internally for the library to pass the Docker host to the resource reaper. + +4. Read the default Docker socket path, without the unix schema. E.g. `/var/run/docker.sock` + +5. Read the **docker.host** property in the `~/.testcontainers.properties` file. E.g. `docker.host=tcp://my.docker.host:1234` + +6. Read the rootless Docker socket path, checking in the following alternative locations: + + 1. `${XDG_RUNTIME_DIR}/.docker/run/docker.sock`. + 2. `${HOME}/.docker/run/docker.sock`. + 3. `${HOME}/.docker/desktop/docker.sock`. + 4. `/run/user/${UID}/docker.sock`, where `${UID}` is the user ID of the current user. + +7. The library panics if none of the above are set, meaning that the Docker host was not detected. diff --git a/docs/features/creating_container.md b/docs/features/creating_container.md index 45b983be3..09cb7bc5d 100644 --- a/docs/features/creating_container.md +++ b/docs/features/creating_container.md @@ -1 +1,74 @@ -hi +# How to create a container + +Testcontainers-Python is a thin wrapper around Docker designed for use in tests. Anything you can run in Docker, you can spin up with Testcontainers-Python: + +- NoSQL databases or other data stores (e.g. Redis, ElasticSearch, MongoDB) +- Web servers/proxies (e.g. NGINX, Apache) +- Log services (e.g. Logstash, Kibana) +- Other services developed by your team/organization which are already Dockerized + +## Run + +- Since Testcontainers-Python [v3.10.0]() + +You can use the high-level run helper to start a container in one call, similar to Docker’s docker run. Under the hood it builds a temporary network, mounts files or tmpfs, and waits for readiness for you. + +```python +import io +import pytest +from docker import DockerClient +from testcontainers.core.container import run +from testcontainers.core.network import DockerNetwork +from testcontainers.core.waiting_utils import wait_for_logs + +def test_nginx_run(): + # Create an isolated network + network = DockerNetwork() + network.create() + pytest.addfinalizer(network.remove) + + # File to mount into the container + test_file_content = b"Hello from file!" + host_file = io.BytesIO(test_file_content) + + # Run the container with various options + container = run( + image="nginx:alpine", + network=network.name, + files=[(host_file, "/tmp/file.txt")], + tmpfs={"/tmp": "rw"}, + labels={"testcontainers.label": "true"}, + environment={"TEST": "true"}, + ports={"80/tcp": None}, # expose port 80 + command=["/bin/sh", "-c", "echo hello world"], + wait=wait_for_logs("Configuration complete; ready for start"), + startup_timeout=5, + ) + # Ensure cleanup + pytest.addfinalizer(container.stop) + pytest.addfinalizer(container.remove) + + # Inspect runtime state + client = DockerClient.from_env() + info = client.containers.get(container.id).attrs + + # Networks + aliases = info["NetworkSettings"]["Networks"][network.name]["Aliases"] + assert "nginx-alias" in aliases + + # Environment + env = info["Config"]["Env"] + assert any(e.startswith("TEST=true") for e in env) + + # Tmpfs + tmpfs = info["HostConfig"]["Tmpfs"].get("/tmp") + assert tmpfs == "" + + # Labels + assert info["Config"]["Labels"]["testcontainers.label"] == "true" + + # File copy + bits, _ = client.api.get_archive(container.id, "/tmp/file.txt") + archive = io.BytesIO().join(bits) + # extract and verify... +``` diff --git a/docs/getting_help.md b/docs/getting_help.md index e7fc05394..51a1227ac 100644 --- a/docs/getting_help.md +++ b/docs/getting_help.md @@ -5,6 +5,6 @@ However, sometimes things don't go the way we'd expect, and we'd like to try and To contact the Testcontainers team and other users you can: -* Join our [Slack team](https://slack.testcontainers.org) -* [Search our issues tracker](https://github.com/testcontainers/testcontainers-java/issues), or raise a new issue if you find any bugs or have suggested improvements -* [Search Stack Overflow](https://stackoverflow.com/questions/tagged/testcontainers), especially among posts tagged with `testcontainers` +- Join our [Slack team](https://slack.testcontainers.org) +- [Search our issues tracker](https://github.com/testcontainers/testcontainers-python/issues), or raise a new issue if you find any bugs or have suggested improvements +- [Search Stack Overflow](https://stackoverflow.com/questions/tagged/testcontainers), especially among posts tagged with `testcontainers` diff --git a/docs/index.md b/docs/index.md index 11593916d..6ad48f82a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,11 +2,11 @@

    Not using Python? Here are other supported languages!

    - Java + Java Go .NET Node.js - Python + Python Rust Haskell Ruby @@ -14,23 +14,29 @@ ## About Testcontainers for Python -_Testcontainers for Python_ is a Python library that supports JUnit tests, providing lightweight, throwaway instances of common databases, Selenium web browsers, or anything else that can run in a Docker container. +_Testcontainers for Python_ is a Python library that that makes it simple to create and clean up container-based dependencies for automated integration/smoke tests. The clean, easy-to-use API enables developers to programmatically define containers that should be run as part of a test and clean up those resources when the test is done. -Testcontainers make the following kinds of tests easier: +To start using _Testcontainers for Python_ please read our [quickstart guide](quickstart.md) -- **Data access layer integration tests**: use a containerized instance of a MySQL, PostgreSQL or Oracle database to test your data access layer code for complete compatibility, but without requiring complex setup on developers' machines and safe in the knowledge that your tests will always start with a known DB state. Any other database type that can be containerized can also be used. -- **Application integration tests**: for running your application in a short-lived test mode with dependencies, such as databases, message queues or web servers. +## Code Comments -## Prerequisites +Inline documentation and docs where the code live is crucial for us. Testcontainers Python follows [PEP 257](https://peps.python.org/pep-0257/){:target="\_blank"} comment conventions. The codebase previously supported Sphinx so you may encounter comments not yet updated for the new documentation style. + +## Who is using Testcontainers Python? + +- [AWS](https://aws.amazon.com/) +- [Google](https://google.com/) +- [Grafana](https://grafana.com/) +- [Timescale](https://www.timescale.com/) ## License -See [LICENSE](https://raw.githubusercontent.com/testcontainers/testcontainers-python/main/LICENSE). +See [LICENSE](https://raw.githubusercontent.com/testcontainers/testcontainers-python/refs/heads/main/LICENSE.txt){:target="\_blank"} . ## Attributions ## Copyright -Copyright (c) 2015-2021 Richard North and other authors. +Copyright (c) 2015-2021 Sergey Pirogov and other authors. -See [AUTHORS](https://raw.githubusercontent.com/testcontainers/testcontainers-python/main/AUTHORS) for contributors. +See [AUTHORS](https://github.com/testcontainers/testcontainers-python/graphs/contributors){:target="\_blank"} for contributors. diff --git a/docs/modules/arangodb.md b/docs/modules/arangodb.md new file mode 100644 index 000000000..97834f90b --- /dev/null +++ b/docs/modules/arangodb.md @@ -0,0 +1,41 @@ +# ArangoDB + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for ArangoDB. + +## Adding this module to your project dependencies + +Please run the following command to add the ArangoDB module to your python dependencies: + +```bash +pip install testcontainers[arangodb] +``` + +## Usage example + + + +[Creating an ArangoDB container](../../modules/arangodb/example_basic.py) + + + +## Features + +- Multi-model database support (key-value, document, graph) +- AQL (ArangoDB Query Language) for complex queries +- Built-in aggregation functions +- Collection management +- Document CRUD operations +- Bulk document import + +## Configuration + +The ArangoDB container can be configured with the following parameters: + +- `username`: Database username (default: "root") +- `password`: Database password (default: "test") +- `port`: Port to expose (default: 8529) +- `version`: ArangoDB version to use (default: "latest") diff --git a/docs/modules/aws.md b/docs/modules/aws.md new file mode 100644 index 000000000..dcb512444 --- /dev/null +++ b/docs/modules/aws.md @@ -0,0 +1,23 @@ +# AWS + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for AWS services, including S3, DynamoDB, and SQS. + +## Adding this module to your project dependencies + +Please run the following command to add the AWS module to your python dependencies: + +``` +pip install testcontainers[aws] +``` + +## Usage example + + + +[Creating an AWS container](../../modules/aws/example_basic.py) + + diff --git a/docs/modules/azurite.md b/docs/modules/azurite.md new file mode 100644 index 000000000..912a2f084 --- /dev/null +++ b/docs/modules/azurite.md @@ -0,0 +1,23 @@ +# Azurite + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Azurite. + +## Adding this module to your project dependencies + +Please run the following command to add the Azurite module to your python dependencies: + +```bash +pip install testcontainers[azurite] +``` + +## Usage example + + + +[Creating an Azurite container](../../modules/azurite/example_basic.py) + + diff --git a/docs/modules/cassandra.md b/docs/modules/cassandra.md new file mode 100644 index 000000000..68a4be83c --- /dev/null +++ b/docs/modules/cassandra.md @@ -0,0 +1,23 @@ +# Cassandra + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Cassandra. + +## Adding this module to your project dependencies + +Please run the following command to add the Cassandra module to your python dependencies: + +```bash +pip install testcontainers[cassandra] +``` + +## Usage example + + + +[Creating a Cassandra container](../../modules/cassandra/example_basic.py) + + diff --git a/docs/modules/chroma.md b/docs/modules/chroma.md new file mode 100644 index 000000000..1b7e80fcd --- /dev/null +++ b/docs/modules/chroma.md @@ -0,0 +1,43 @@ +# Chroma + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Chroma. + +## Adding this module to your project dependencies + +Please run the following command to add the Chroma module to your python dependencies: + +```bash +pip install testcontainers[chroma] +``` + +## Usage example + + + +[Creating a Chroma container](../../modules/chroma/example_basic.py) + + + +## Features + +- Vector similarity search +- Document storage and retrieval +- Metadata filtering +- Collection management +- Embedding storage +- Distance metrics +- Batch operations +- REST API support + +## Configuration + +The Chroma container can be configured with the following parameters: + +- `port`: Port to expose (default: 8000) +- `version`: Chroma version to use (default: "latest") +- `persist_directory`: Directory to persist data (default: None) +- `allow_reset`: Whether to allow collection reset (default: True) diff --git a/docs/modules/clickhouse.md b/docs/modules/clickhouse.md new file mode 100644 index 000000000..90f425955 --- /dev/null +++ b/docs/modules/clickhouse.md @@ -0,0 +1,44 @@ +# ClickHouse + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for ClickHouse. + +## Adding this module to your project dependencies + +Please run the following command to add the ClickHouse module to your python dependencies: + +```bash +pip install testcontainers[clickhouse] +``` + +## Usage example + + + +[Creating a ClickHouse container](../../modules/clickhouse/example_basic.py) + + + +## Features + +- Column-oriented storage +- High-performance analytics +- Real-time data processing +- SQL support +- Data compression +- Parallel processing +- Distributed queries +- Integration with pandas for data analysis + +## Configuration + +The ClickHouse container can be configured with the following parameters: + +- `port`: Port to expose (default: 9000) +- `version`: ClickHouse version to use (default: "latest") +- `user`: Database username (default: "default") +- `password`: Database password (default: "") +- `database`: Database name (default: "default") diff --git a/docs/modules/cockroachdb.md b/docs/modules/cockroachdb.md new file mode 100644 index 000000000..1a8018052 --- /dev/null +++ b/docs/modules/cockroachdb.md @@ -0,0 +1,44 @@ +# CockroachDB + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for CockroachDB. + +## Adding this module to your project dependencies + +Please run the following command to add the CockroachDB module to your python dependencies: + +```bash +pip install testcontainers[cockroachdb] +``` + +## Usage example + + + +[Creating a CockroachDB container](../../modules/cockroachdb/example_basic.py) + + + +## Features + +- Distributed SQL database +- ACID transactions +- Strong consistency +- Horizontal scaling +- Built-in replication +- Automatic sharding +- SQL compatibility +- Integration with pandas for data analysis + +## Configuration + +The CockroachDB container can be configured with the following parameters: + +- `username`: Database username (default: "root") +- `password`: Database password (default: "") +- `database`: Database name (default: "postgres") +- `port`: Port to expose (default: 26257) +- `version`: CockroachDB version to use (default: "latest") diff --git a/docs/modules/cosmosdb.md b/docs/modules/cosmosdb.md new file mode 100644 index 000000000..222209ace --- /dev/null +++ b/docs/modules/cosmosdb.md @@ -0,0 +1,43 @@ +# CosmosDB + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for CosmosDB. + +## Adding this module to your project dependencies + +Please run the following command to add the CosmosDB module to your python dependencies: + +```bash +pip install testcontainers[cosmosdb] +``` + +## Usage example + + + +[Creating a CosmosDB container](../../modules/cosmosdb/example_basic.py) + + + +## Features + +- Multi-model database support (document, key-value, wide-column, graph) +- SQL-like query language +- Automatic indexing +- Partitioning support +- Global distribution +- Built-in aggregation functions +- Container management +- Document CRUD operations + +## Configuration + +The CosmosDB container can be configured with the following parameters: + +- `port`: Port to expose (default: 8081) +- `version`: CosmosDB Emulator version to use (default: "latest") +- `ssl_verify`: Whether to verify SSL certificates (default: False) +- `emulator_key`: Emulator key for authentication (default: "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") diff --git a/docs/modules/db2.md b/docs/modules/db2.md new file mode 100644 index 000000000..7143f7e3d --- /dev/null +++ b/docs/modules/db2.md @@ -0,0 +1,43 @@ +# DB2 + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for DB2. + +## Adding this module to your project dependencies + +Please run the following command to add the DB2 module to your python dependencies: + +```bash +pip install testcontainers[db2] +``` + +## Usage example + + + +[Creating a DB2 container](../../modules/db2/example_basic.py) + + + +## Features + +- Full SQL support +- Transaction management +- Stored procedures +- User-defined functions +- Advanced analytics +- JSON support +- Integration with pandas for data analysis + +## Configuration + +The DB2 container can be configured with the following parameters: + +- `username`: Database username (default: "db2inst1") +- `password`: Database password (default: "password") +- `database`: Database name (default: "testdb") +- `port`: Port to expose (default: 50000) +- `version`: DB2 version to use (default: "latest") diff --git a/docs/modules/elasticsearch.md b/docs/modules/elasticsearch.md new file mode 100644 index 000000000..b3faabcfe --- /dev/null +++ b/docs/modules/elasticsearch.md @@ -0,0 +1,23 @@ +# Elasticsearch + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for Elasticsearch. + +## Adding this module to your project dependencies + +Please run the following command to add the Elasticsearch module to your python dependencies: + +```bash +pip install testcontainers[elasticsearch] +``` + +## Usage example + + + +[Creating an Elasticsearch container](../../modules/elasticsearch/example_basic.py) + + diff --git a/docs/modules/generic.md b/docs/modules/generic.md new file mode 100644 index 000000000..87d1209b7 --- /dev/null +++ b/docs/modules/generic.md @@ -0,0 +1,23 @@ +# Generic + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for running generic containers with various configurations and features. + +## Adding this module to your project dependencies + +Please run the following command to add the Generic module to your python dependencies: + +``` +pip install testcontainers[generic] +``` + +## Usage example + + + +[Creating a Generic container](../../modules/generic/example_basic.py) + + diff --git a/docs/modules/google.md b/docs/modules/google.md new file mode 100644 index 000000000..ff9dd3aef --- /dev/null +++ b/docs/modules/google.md @@ -0,0 +1,23 @@ +# Google + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for Google Cloud services, including Cloud Storage, Pub/Sub, BigQuery, and Datastore. + +## Adding this module to your project dependencies + +Please run the following command to add the Google module to your python dependencies: + +``` +pip install testcontainers[google] +``` + +## Usage example + + + +[Creating a Google Cloud container](../../modules/google/example_basic.py) + + diff --git a/docs/modules/influxdb.md b/docs/modules/influxdb.md new file mode 100644 index 000000000..dbdc01abf --- /dev/null +++ b/docs/modules/influxdb.md @@ -0,0 +1,17 @@ +# InfluxDB + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for InfluxDB. + +## Adding this module to your project dependencies + +Please run the following command to add the InfluxDB module to your python dependencies: + +```bash +pip install testcontainers[influxdb] +``` + +## Usage example diff --git a/docs/modules/k3s.md b/docs/modules/k3s.md new file mode 100644 index 000000000..67c3772a0 --- /dev/null +++ b/docs/modules/k3s.md @@ -0,0 +1,23 @@ +# K3s + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for K3s. + +## Adding this module to your project dependencies + +Please run the following command to add the K3s module to your python dependencies: + +```bash +pip install testcontainers[k3s] +``` + +## Usage example + + + +[Creating a K3s container](../../modules/k3s/example_basic.py) + + diff --git a/docs/modules/kafka.md b/docs/modules/kafka.md new file mode 100644 index 000000000..3a0a6083e --- /dev/null +++ b/docs/modules/kafka.md @@ -0,0 +1,23 @@ +# Kafka + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for Kafka. + +## Adding this module to your project dependencies + +Please run the following command to add the Kafka module to your python dependencies: + +```bash +pip install testcontainers[kafka] +``` + +## Usage example + + + +[Creating a Kafka container](../../modules/kafka/example_basic.py) + + diff --git a/docs/modules/keycloak.md b/docs/modules/keycloak.md new file mode 100644 index 000000000..9148badbf --- /dev/null +++ b/docs/modules/keycloak.md @@ -0,0 +1,23 @@ +# Keycloak + +Since testcontainers-python :material-tag: v4.8.2 + +## Introduction + +The Testcontainers module for Keycloak. + +## Adding this module to your project dependencies + +Please run the following command to add the Keycloak module to your python dependencies: + +```bash +pip install testcontainers[keycloak] +``` + +## Usage example + + + +[Creating a Keycloak container](../../modules/keycloak/example_basic.py) + + diff --git a/docs/modules/localstack.md b/docs/modules/localstack.md new file mode 100644 index 000000000..132682828 --- /dev/null +++ b/docs/modules/localstack.md @@ -0,0 +1,23 @@ +# LocalStack + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for LocalStack. + +## Adding this module to your project dependencies + +Please run the following command to add the LocalStack module to your python dependencies: + +```bash +pip install testcontainers[localstack] +``` + +## Usage example + + + +[Creating a LocalStack container](../../modules/localstack/example_basic.py) + + diff --git a/docs/modules/mailpit.md b/docs/modules/mailpit.md new file mode 100644 index 000000000..2a561d325 --- /dev/null +++ b/docs/modules/mailpit.md @@ -0,0 +1,23 @@ +# Mailpit + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Mailpit. + +## Adding this module to your project dependencies + +Please run the following command to add the Mailpit module to your python dependencies: + +```bash +pip install testcontainers[mailpit] +``` + +## Usage example + + + +[Creating a Mailpit container](../../modules/mailpit/example_basic.py) + + diff --git a/docs/modules/memcached.md b/docs/modules/memcached.md new file mode 100644 index 000000000..04e3b1cf8 --- /dev/null +++ b/docs/modules/memcached.md @@ -0,0 +1,23 @@ +# Memcached + +Since testcontainers-python :material-tag: v4.4.1 + +## Introduction + +The Testcontainers module for Memcached. + +## Adding this module to your project dependencies + +Please run the following command to add the Memcached module to your python dependencies: + +```bash +pip install testcontainers[memcached] +``` + +## Usage example + + + +[Creating a Memcached container](../../modules/memcached/example_basic.py) + + diff --git a/docs/modules/milvus.md b/docs/modules/milvus.md new file mode 100644 index 000000000..6e762362f --- /dev/null +++ b/docs/modules/milvus.md @@ -0,0 +1,23 @@ +# Milvus + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Milvus. + +## Adding this module to your project dependencies + +Please run the following command to add the Milvus module to your python dependencies: + +```bash +pip install testcontainers[milvus] +``` + +## Usage example + + + +[Creating a Milvus container](../../modules/milvus/example_basic.py) + + diff --git a/docs/modules/minio.md b/docs/modules/minio.md new file mode 100644 index 000000000..fd904191b --- /dev/null +++ b/docs/modules/minio.md @@ -0,0 +1,23 @@ +# MinIO + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for MinIO. + +## Adding this module to your project dependencies + +Please run the following command to add the MinIO module to your python dependencies: + +```bash +pip install testcontainers[minio] +``` + +## Usage example + + + +[Creating a MinIO container](../../modules/minio/example_basic.py) + + diff --git a/docs/modules/mongodb.md b/docs/modules/mongodb.md new file mode 100644 index 000000000..6db2ece6b --- /dev/null +++ b/docs/modules/mongodb.md @@ -0,0 +1,23 @@ +# MongoDB + +Since testcontainers-python :material-tag: v4.3.1 + +## Introduction + +The Testcontainers module for MongoDB. + +## Adding this module to your project dependencies + +Please run the following command to add the MongoDB module to your python dependencies: + +```bash +pip install testcontainers[mongodb] +``` + +## Usage example + + + +[Creating a MongoDB container](../../modules/mongodb/example_basic.py) + + diff --git a/docs/modules/mqtt.md b/docs/modules/mqtt.md new file mode 100644 index 000000000..6b2afefeb --- /dev/null +++ b/docs/modules/mqtt.md @@ -0,0 +1,23 @@ +# MQTT + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for MQTT. + +## Adding this module to your project dependencies + +Please run the following command to add the MQTT module to your python dependencies: + +```bash +pip install testcontainers[mqtt] +``` + +## Usage example + + + +[Creating an MQTT container](../../modules/mqtt/example_basic.py) + + diff --git a/docs/modules/mssql.md b/docs/modules/mssql.md new file mode 100644 index 000000000..cde204395 --- /dev/null +++ b/docs/modules/mssql.md @@ -0,0 +1,23 @@ +# MSSQL + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for MSSQL. + +## Adding this module to your project dependencies + +Please run the following command to add the MSSQL module to your python dependencies: + +```bash +pip install testcontainers[mssql] +``` + +## Usage example + + + +[Creating an MSSQL container](../modules/mssql/example_basic.py) + + diff --git a/docs/modules/mssql/example_basic.py b/docs/modules/mssql/example_basic.py new file mode 100644 index 000000000..78287bc70 --- /dev/null +++ b/docs/modules/mssql/example_basic.py @@ -0,0 +1,50 @@ +import sqlalchemy + +from testcontainers.mssql import SqlServerContainer + + +def basic_example(): + # Create a SQL Server container with default settings + with SqlServerContainer() as mssql: + # Get the connection URL + connection_url = mssql.get_connection_url() + print(f"Connection URL: {connection_url}") + + # Create a SQLAlchemy engine + engine = sqlalchemy.create_engine(connection_url) + + # Create a test table and insert some data + with engine.begin() as connection: + # Create a test table + connection.execute( + sqlalchemy.text(""" + CREATE TABLE test_table ( + id INT IDENTITY(1,1) PRIMARY KEY, + name NVARCHAR(50), + value INT + ) + """) + ) + print("Created test table") + + # Insert some test data + connection.execute( + sqlalchemy.text(""" + INSERT INTO test_table (name, value) + VALUES + ('test1', 100), + ('test2', 200), + ('test3', 300) + """) + ) + print("Inserted test data") + + # Query the data + result = connection.execute(sqlalchemy.text("SELECT * FROM test_table")) + print("\nQuery results:") + for row in result: + print(f"id: {row[0]}, name: {row[1]}, value: {row[2]}") + + +if __name__ == "__main__": + basic_example() diff --git a/docs/modules/mysql.md b/docs/modules/mysql.md index a02f15c90..3f24c6e1e 100644 --- a/docs/modules/mysql.md +++ b/docs/modules/mysql.md @@ -1,6 +1,6 @@ # MySQL -Since testcontainers-go :material-tag: v0.20.0 +Since testcontainers-python :material-tag: v4.4.1 ## Introduction @@ -8,86 +8,16 @@ The Testcontainers module for MySQL. ## Adding this module to your project dependencies -Please run the following command to add the MySQL module to your Go dependencies: +Please run the following command to add the MySQL module to your python dependencies: -``` -go get github.com/testcontainers/testcontainers-go/modules/mysql +```bash +pip install testcontainers[mysql] ``` ## Usage example -[Creating a MySQL container](../../modules/mysql/tests/test_mysql.py) inside_block:runMySQLContainer - - - -## Module Reference - -### Run function - -- Since testcontainers-go :material-tag: v0.32.0 - -!!!info -The `RunContainer(ctx, opts...)` function is deprecated and will be removed in the next major release of _Testcontainers for Go_. - -The MySQL module exposes one entrypoint function to create the container, and this function receives three parameters: - -```golang -func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*MySQLContainer, error) -``` - -- `context.Context`, the Go context. -- `string`, the Docker image to use. -- `testcontainers.ContainerCustomizer`, a variadic argument for passing options. - -### Container Options - -When starting the MySQL container, you can pass options in a variadic way to configure it. - -!!!tip - - You can find all the available configuration and environment variables for the MySQL Docker image on [Docker Hub](https://hub.docker.com/_/mysql). - -#### Image - -Use the second argument in the `Run` function to set a valid Docker image. -In example: `Run(context.Background(), "mysql:8.0.36")`. - -{% include "../index.md" %} - -#### Set username, password and database name - -If you need to set a different database, and its credentials, you can use `WithUsername`, `WithPassword`, `WithDatabase` -options. - -!!!info -The default values for the username is `root`, for password is `test` and for the default database name is `test`. - -#### Init Scripts - -If you would like to perform DDL or DML operations in the MySQL container, add one or more `*.sql`, `*.sql.gz`, or `*.sh` -scripts to the container request, using the `WithScripts(scriptPaths ...string)`. Those files will be copied under `/docker-entrypoint-initdb.d`. - - - -[Example of Init script](../../modules/mysql/tests/seeds/01-schema.sql) - - - -#### Custom configuration - -If you need to set a custom configuration, you can use `WithConfigFile` option to pass the path to a custom configuration file. - -### Container Methods - -#### ConnectionString - -This method returns the connection string to connect to the MySQL container, using the default `3306` port. -It's possible to pass extra parameters to the connection string, e.g. `tls=skip-verify` or `application_name=myapp`, in a variadic way. - - - -[Get connection string](../../modules/mysql/tests/test_mysql.py) inside_block:connectionString +[Creating a MySQL container](../../modules/mysql/example_basic.py) diff --git a/docs/modules/nats.md b/docs/modules/nats.md new file mode 100644 index 000000000..9800ff8f7 --- /dev/null +++ b/docs/modules/nats.md @@ -0,0 +1,23 @@ +# NATS + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for NATS. + +## Adding this module to your project dependencies + +Please run the following command to add the NATS module to your python dependencies: + +```bash +pip install testcontainers[nats] +``` + +## Usage example + + + +[Creating a NATS container](../../modules/nats/example_basic.py) + + diff --git a/docs/modules/neo4j.md b/docs/modules/neo4j.md new file mode 100644 index 000000000..3c56ba5d0 --- /dev/null +++ b/docs/modules/neo4j.md @@ -0,0 +1,23 @@ +# Neo4j + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for Neo4j. + +## Adding this module to your project dependencies + +Please run the following command to add the Neo4j module to your python dependencies: + +```bash +pip install testcontainers[neo4j] +``` + +## Usage example + + + +[Creating a Neo4j container](../../modules/neo4j/example_basic.py) + + diff --git a/docs/modules/nginx.md b/docs/modules/nginx.md new file mode 100644 index 000000000..5dfafc0c5 --- /dev/null +++ b/docs/modules/nginx.md @@ -0,0 +1,23 @@ +# Nginx + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Nginx. + +## Adding this module to your project dependencies + +Please run the following command to add the Nginx module to your python dependencies: + +```bash +pip install testcontainers[nginx] +``` + +## Usage example + + + +[Creating an Nginx container](../../modules/nginx/example_basic.py) + + diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md new file mode 100644 index 000000000..e8ca5c573 --- /dev/null +++ b/docs/modules/ollama.md @@ -0,0 +1,23 @@ +# Ollama + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Ollama. + +## Adding this module to your project dependencies + +Please run the following command to add the Ollama module to your python dependencies: + +```bash +pip install testcontainers[ollama] +``` + +## Usage example + + + +[Creating an Ollama container](../../modules/ollama/example_basic.py) + + diff --git a/docs/modules/opensearch.md b/docs/modules/opensearch.md new file mode 100644 index 000000000..f0113920f --- /dev/null +++ b/docs/modules/opensearch.md @@ -0,0 +1,23 @@ +# OpenSearch + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for OpenSearch. + +## Adding this module to your project dependencies + +Please run the following command to add the OpenSearch module to your python dependencies: + +```bash +pip install testcontainers[opensearch] +``` + +## Usage example + + + +[Creating an OpenSearch container](../../modules/opensearch/example_basic.py) + + diff --git a/docs/modules/oracle-free.md b/docs/modules/oracle-free.md new file mode 100644 index 000000000..cd0cc7175 --- /dev/null +++ b/docs/modules/oracle-free.md @@ -0,0 +1,23 @@ +# Oracle Free + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Oracle Free. + +## Adding this module to your project dependencies + +Please run the following command to add the Oracle Free module to your python dependencies: + +```bash +pip install testcontainers[oracle-free] +``` + +## Usage example + + + +[Creating an Oracle Free container](../../modules/oracle-free/example_basic.py) + + diff --git a/docs/modules/postgres.md b/docs/modules/postgres.md new file mode 100644 index 000000000..1b51257ce --- /dev/null +++ b/docs/modules/postgres.md @@ -0,0 +1,23 @@ +# PostgreSQL + +Since testcontainers-python :material-tag: v4.8.2 + +## Introduction + +The Testcontainers module for PostgreSQL. + +## Adding this module to your project dependencies + +Please run the following command to add the PostgreSQL module to your python dependencies: + +```bash +pip install testcontainers[postgres] +``` + +## Usage example + + + +[Creating a PostgreSQL container](../../modules/postgres/example_basic.py) + + diff --git a/docs/modules/qdrant.md b/docs/modules/qdrant.md new file mode 100644 index 000000000..87791de1f --- /dev/null +++ b/docs/modules/qdrant.md @@ -0,0 +1,23 @@ +# Qdrant + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Qdrant. + +## Adding this module to your project dependencies + +Please run the following command to add the Qdrant module to your python dependencies: + +```bash +pip install testcontainers[qdrant] +``` + +## Usage example + + + +[Creating a Qdrant container](../../modules/qdrant/example_basic.py) + + diff --git a/docs/modules/rabbitmq.md b/docs/modules/rabbitmq.md new file mode 100644 index 000000000..7616f6ebd --- /dev/null +++ b/docs/modules/rabbitmq.md @@ -0,0 +1,23 @@ +# RabbitMQ + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for RabbitMQ. + +## Adding this module to your project dependencies + +Please run the following command to add the RabbitMQ module to your python dependencies: + +```bash +pip install testcontainers[rabbitmq] +``` + +## Usage example + + + +[Creating a RabbitMQ container](../../modules/rabbitmq/example_basic.py) + + diff --git a/docs/modules/redis.md b/docs/modules/redis.md new file mode 100644 index 000000000..b5f8598ee --- /dev/null +++ b/docs/modules/redis.md @@ -0,0 +1,23 @@ +# Redis + +Since testcontainers-python :material-tag: v4.4.0 + +## Introduction + +The Testcontainers module for Redis. + +## Adding this module to your project dependencies + +Please run the following command to add the Redis module to your python dependencies: + +```bash +pip install testcontainers[redis] +``` + +## Usage example + + + +[Creating a Redis container](../../modules/redis/example_basic.py) + + diff --git a/docs/modules/registry.md b/docs/modules/registry.md new file mode 100644 index 000000000..a1a123156 --- /dev/null +++ b/docs/modules/registry.md @@ -0,0 +1,23 @@ +# Registry + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Registry. + +## Adding this module to your project dependencies + +Please run the following command to add the Registry module to your python dependencies: + +```bash +pip install testcontainers[registry] +``` + +## Usage example + + + +[Creating a Registry container](../../modules/registry/example_basic.py) + + diff --git a/docs/modules/scylla.md b/docs/modules/scylla.md new file mode 100644 index 000000000..c1001a425 --- /dev/null +++ b/docs/modules/scylla.md @@ -0,0 +1,23 @@ +# Scylla + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Scylla. + +## Adding this module to your project dependencies + +Please run the following command to add the Scylla module to your python dependencies: + +```bash +pip install testcontainers[scylla] +``` + +## Usage example + + + +[Creating a Scylla container](../../modules/scylla/example_basic.py) + + diff --git a/docs/modules/selenium.md b/docs/modules/selenium.md new file mode 100644 index 000000000..793b23a27 --- /dev/null +++ b/docs/modules/selenium.md @@ -0,0 +1,23 @@ +# Selenium + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Selenium. + +## Adding this module to your project dependencies + +Please run the following command to add the Selenium module to your python dependencies: + +```bash +pip install testcontainers[selenium] +``` + +## Usage example + + + +[Creating a Selenium container](../../modules/selenium/example_basic.py) + + diff --git a/docs/modules/sftp.md b/docs/modules/sftp.md new file mode 100644 index 000000000..2606cb898 --- /dev/null +++ b/docs/modules/sftp.md @@ -0,0 +1,23 @@ +# SFTP + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for SFTP. + +## Adding this module to your project dependencies + +Please run the following command to add the SFTP module to your python dependencies: + +```bash +pip install testcontainers[sftp] +``` + +## Usage example + + + +[Creating an SFTP container](../../modules/sftp/example_basic.py) + + diff --git a/docs/modules/test_module_import.md b/docs/modules/test_module_import.md new file mode 100644 index 000000000..fa01570fa --- /dev/null +++ b/docs/modules/test_module_import.md @@ -0,0 +1,23 @@ +# Test Module Import + +Since testcontainers-python :material-tag: v4.7.1 + +## Introduction + +The Testcontainers module for testing Python module imports and package management. + +## Adding this module to your project dependencies + +Please run the following command to add the Test Module Import module to your python dependencies: + +``` +pip install testcontainers[test_module_import] +``` + +## Usage example + + + +[Creating a Test Module Import container](../../modules/test_module_import/example_basic.py) + + diff --git a/docs/modules/trino.md b/docs/modules/trino.md new file mode 100644 index 000000000..17edcbb1d --- /dev/null +++ b/docs/modules/trino.md @@ -0,0 +1,23 @@ +# Trino + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Trino. + +## Adding this module to your project dependencies + +Please run the following command to add the Trino module to your python dependencies: + +```bash +pip install testcontainers[trino] +``` + +## Usage example + + + +[Creating a Trino container](../../modules/trino/example_basic.py) + + diff --git a/docs/modules/vault.md b/docs/modules/vault.md new file mode 100644 index 000000000..0266614c5 --- /dev/null +++ b/docs/modules/vault.md @@ -0,0 +1,23 @@ +# Vault + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Vault. + +## Adding this module to your project dependencies + +Please run the following command to add the Vault module to your python dependencies: + +```bash +pip install testcontainers[vault] +``` + +## Usage example + + + +[Creating a Vault container](../../modules/vault/example_basic.py) + + diff --git a/docs/modules/weaviate.md b/docs/modules/weaviate.md new file mode 100644 index 000000000..2c362769e --- /dev/null +++ b/docs/modules/weaviate.md @@ -0,0 +1,23 @@ +# Weaviate + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Weaviate. + +## Adding this module to your project dependencies + +Please run the following command to add the Weaviate module to your python dependencies: + +```bash +pip install testcontainers[weaviate] +``` + +## Usage example + + + +[Creating a Weaviate container](../../modules/weaviate/example_basic.py) + + diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 000000000..0bbf37b26 --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,116 @@ +_Testcontainers for Go_ plays well with the native `go test` framework. + +The ideal use case is for integration or end to end tests. It helps you to spin +up and manage the dependencies life cycle via Docker. + +## 1. System requirements + +Please read the [system requirements](../system_requirements) page before you start. + +## 2. Install _Testcontainers for Go_ + +We use [go mod](https://blog.golang.org/using-go-modules) and you can get it installed via: + +``` +go get github.com/testcontainers/testcontainers-go +``` + +## 3. Spin up Redis + +```go +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" +) + +func TestWithRedis(t *testing.T) { + ctx := context.Background() + req := testcontainers.ContainerRequest{ + Image: "redis:latest", + ExposedPorts: []string{"6379/tcp"}, + WaitingFor: wait.ForLog("Ready to accept connections"), + } + redisC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + testcontainers.CleanupContainer(t, redisC) + require.NoError(t, err) +} +``` + +The `testcontainers.ContainerRequest` describes how the Docker container will +look. + +- `Image` is the Docker image the container starts from. +- `ExposedPorts` lists the ports to be exposed from the container. +- `WaitingFor` is a field you can use to validate when a container is ready. It + is important to get this set because it helps to know when the container is + ready to receive any traffic. In this case, we check for the logs we know come + from Redis, telling us that it is ready to accept requests. + +When you use `ExposedPorts` you have to imagine yourself using `docker run -p +`. When you do so, `dockerd` maps the selected `` from inside the +container to a random one available on your host. + +In the previous example, we expose `6379` for `tcp` traffic to the outside. This +allows Redis to be reachable from your code that runs outside the container, but +it also makes parallelization possible because if you add `t.Parallel` to your +tests, and each of them starts a Redis container each of them will be exposed on a +different random port. + +`testcontainers.GenericContainer` creates the container. In this example we are +using `Started: true`. It means that the container function will wait for the +container to be up and running. If you set the `Start` value to `false` it won't +start, leaving to you the decision about when to start it. + +All the containers must be removed at some point, otherwise they will run until +the host is overloaded. One of the ways we have to clean up is by deferring the +terminated function: `defer testcontainers.TerminateContainer(redisC)` which +automatically handles nil container so is safe to use even in the error case. + +!!!tip + + Look at [features/garbage_collector](/features/garbage_collector) to know another way to + clean up resources. + +## 4. Make your code to talk with the container + +This is just an example, but usually Go applications that rely on Redis are +using the [redis-go](https://github.com/go-redis/redis) client. This code gets +the endpoint from the container we just started, and it configures the client. + +```go +endpoint, err := redisC.Endpoint(ctx, "") +if err != nil { + t.Error(err) +} + +client := redis.NewClient(&redis.Options{ + Addr: endpoint, +}) + +_ = client +``` + +We expose only one port, so the `Endpoint` does not need a second argument set. + +!!!tip + + If you expose more than one port you can specify the one you need as a second + argument. + +In this case it returns: `localhost:`. + +## 5. Run the test + +You can run the test via `go test ./...` + +## 6. Want to go deeper with Redis? + +You can find a more elaborated Redis example in our examples section. Please check it out [here](./modules/redis.md). diff --git a/docs/system_requirements/docker.md b/docs/system_requirements/docker.md new file mode 100644 index 000000000..7bc3c55de --- /dev/null +++ b/docs/system_requirements/docker.md @@ -0,0 +1,11 @@ +# General Docker requirements + +Testcontainers requires a Docker-API compatible container runtime. +During development, Testcontainers is actively tested against recent versions of Docker on Linux, as well as against Docker Desktop on Mac and Windows. +These Docker environments are automatically detected and used by Testcontainers without any additional configuration being necessary. + +It is possible to configure Testcontainers to work for other Docker setups, such as a remote Docker host or Docker alternatives. +However, these are not actively tested in the main development workflow, so not all Testcontainers features might be available and additional manual configuration might be necessary. Please see the [Docker host detection](../features/configuration.md#docker-host-detection) section for more information. + +If you have further questions about configuration details for your setup or whether it supports running Testcontainers-based tests, +please contact the Testcontainers team and other users from the Testcontainers community on [Slack](https://slack.testcontainers.org/). diff --git a/docs/system_requirements/index.md b/docs/system_requirements/index.md new file mode 100644 index 000000000..74e0464da --- /dev/null +++ b/docs/system_requirements/index.md @@ -0,0 +1,183 @@ +# Python versions + +The library supports Python >= 3.9, < 4.0. + +## Updating your Python version + +There are several common approaches for managing and isolating your Python environment when using Testcontainers (or any Python project). Each has its own trade-offs in terms of reproducibility, ease of use, and integration with tooling: + +### venv (built-in virtual environments) + +#### What it is + +Python’s built-in way to create lightweight environments. + +#### How to use + +```bash +python3 -m venv .venv # create an env in “.venv” +source .venv/bin/activate # on Unix/macOS +.venv\Scripts\activate # on Windows +pip install -r requirements.txt +``` + +| Pros | Cons | +| ----------------------------------------- | -------------------------------------------------- | +| No extra dependencies | You still manage `requirements.txt` by hand | +| Very lightweight | Doesn’t provide lockfiles or dependency resolution | +| Works everywhere Python 3.3+ is installed | | + +### virtualenv (stand-alone) + +#### What it is + +A more mature alternative to venv, sometimes faster and with more features. + +#### How to use + +```bash +pip install virtualenv +virtualenv .env +source .env/bin/activate +pip install -r requirements.txt +``` + +| Pros | Cons | +| --------------------------------------------------------------------------- | ---------------------------------------------------- | +| Slightly more flexible than `venv` (e.g. can target different interpreters) | Still manual management of versions and dependencies | + +### pipenv + +#### What it is + +A higher-level tool combining environment creation with Pipfile dependency management. + +#### How to use + +```bash +pip install pipenv +pipenv install --dev testcontainers +pipenv shell +``` + +Dependencies live in Pipfile; exact versions locked in Pipfile.lock. + +| Pros | Cons | +| ----------------------------------------- | --------------------------------------------------- | +| Automatic creation of a virtualenv | Can be slower, historically some performance quirks | +| Lockfile for reproducible installs | | +| `pipenv run …` to avoid activating shells | | + +### poetry + +#### What it is + +A modern dependency manager and packaging tool, with built-in virtualenv support. + +#### How to use + +```bash +curl -sSL https://install.python-poetry.org | python3 - +poetry init # walk you through pyproject.toml creation +poetry add --dev testcontainers +poetry shell +``` + +Your Python version constraints and dependencies are in pyproject.toml; lockfile is poetry.lock. + +| Pros | Cons | +| --------------------------------------------------- | ----------------------------------------------------- | +| Elegant TOML-based config | A bit of a learning curve if you’re used to plain Pip | +| Creates truly reproducible environments | | +| Supports publishing packages to PyPI out of the box | | + +### conda / mamba + +#### What it is + +Cross-language environment and package manager (Python/R/C++). + +#### How to use + +```bash +conda create -n tc-env python=3.10 +conda activate tc-env +conda install pip +pip install testcontainers +``` + +Or with Mamba for faster solves: + +```bash +mamba install pip +mamba install -c conda-forge testcontainers +``` + +| Pros | Cons | +| --------------------------------------------------------------- | --------------------------- | +| Manages non-Python dependencies easily (e.g., system libraries) | Larger disk footprint | +| Reproducible YAML environment files (`environment.yml`) | Less “pure” Python workflow | + +### Docker-based environments + +#### What it is + +Run your tests inside a Docker image, so everything (even Python itself) is containerized. + +#### How to use + +```bash +FROM python:3.10-slim +WORKDIR /app +COPY pyproject.toml poetry.lock ./ +RUN pip install poetry && poetry install --no-root +COPY . . +CMD ["pytest", "--maxfail=1", "--disable-warnings", "-q"] +``` + +| Pros | Cons | +| ---------------------------------------------------- | --------------------------------------------------- | +| True isolation from host machine (including OS libs) | Slower startup/testing cycle | +| Easy to share exact environment via Dockerfile | Extra complexity if you’re not already Docker-savvy | + +### tox for multi-env testing + +#### What it is + +A tool to automate testing across multiple Python versions/environments. + +#### How to use + +```bash +# tox.ini + +[tox] +envlist = py39,py310,py311 + +[testenv] +deps = pytest +testcontainers +commands = pytest +``` + +| Pros | Cons | +| --------------------------------------------------------- | ---------------------------- | +| Ensures compatibility across multiple Python interpreters | Adds another layer of config | +| Isolates each test run in its own venv | | + +## Choosing the Right Tool + +| Tool | Lockfile? | Built-in Env | Cross-Platform | Non-Python Deps | Reproducibility | +| ------------ | --------- | ------------ | -------------- | --------------- | --------------- | +| `venv` | No | Yes | Yes | No | Low | +| `virtualenv` | No | Yes | Yes | No | Low | +| `pipenv` | Yes | Yes | Yes | No | Medium | +| `poetry` | Yes | Yes | Yes | No | High | +| `conda` | Yes (YML) | Yes | Yes | Yes | High | +| Docker | – | Container | Yes | Yes | Very High | + +## Next Steps + +With any of these, once your environment is set up you can simply `pip install testcontainers` (or use Poetry’s `poetry add --dev testcontainers`) and begin writing your container-backed tests in Python. + +See the [General Docker Requirements](docker.md) to continue diff --git a/mkdocs.yml b/mkdocs.yml index 171274109..7efc7d524 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -36,13 +36,60 @@ markdown_extensions: emoji_index: !!python/name:material.extensions.emoji.twemoji nav: - Home: index.md + - Quickstart: quickstart.md - Features: - features/creating_container.md + - features/configuration.md - Modules: - - modules/mysql.md + - Databases: + - modules/arangodb.md + - modules/cassandra.md + - modules/chroma.md + - modules/clickhouse.md + - modules/cockroachdb.md + - modules/cosmosdb.md + - modules/db2.md + - modules/elasticsearch.md + - modules/influxdb.md + - modules/mongodb.md + - modules/mssql.md + - modules/mysql.md + - modules/neo4j.md + - modules/opensearch.md + - modules/oracle-free.md + - modules/postgres.md + - modules/qdrant.md + - modules/redis.md + - modules/scylla.md + - modules/trino.md + - modules/weaviate.md + - modules/aws.md + - modules/azurite.md + - modules/generic.md + - modules/google.md + - modules/k3s.md + - modules/keycloak.md + - modules/kafka.md + - modules/localstack.md + - modules/mailpit.md + - modules/memcached.md + - modules/milvus.md + - modules/minio.md + - modules/mqtt.md + - modules/nats.md + - modules/nginx.md + - modules/ollama.md + - modules/rabbitmq.md + - modules/registry.md + - modules/selenium.md + - modules/sftp.md + - modules/test_module_import.md + - modules/vault.md + - System Requirements: + - system_requirements/index.md + - system_requirements/docker.md - Contributing: contributing.md - - Contributing Docs: contributing_docs.md - - Getting help: getting_help.md + - Getting Help: getting_help.md edit_uri: edit/main/docs/ extra: latest_version: 4.10.0 diff --git a/modules/arangodb/example_basic.py b/modules/arangodb/example_basic.py new file mode 100644 index 000000000..e75467610 --- /dev/null +++ b/modules/arangodb/example_basic.py @@ -0,0 +1,91 @@ +import json + +from arango import ArangoClient + +from testcontainers.arangodb import ArangoDbContainer + + +def basic_example(): + with ArangoDbContainer() as arango: + # Get connection parameters + host = arango.get_container_host_ip() + port = arango.get_exposed_port(arango.port) + username = arango.username + password = arango.password + + # Create ArangoDB client + client = ArangoClient(hosts=f"http://{host}:{port}") + db = client.db("_system", username=username, password=password) + print("Connected to ArangoDB") + + # Create a test database + db_name = "test_db" + if not db.has_database(db_name): + db.create_database(db_name) + print(f"Created database: {db_name}") + + # Switch to test database + test_db = client.db(db_name, username=username, password=password) + + # Create a test collection + collection_name = "test_collection" + if not test_db.has_collection(collection_name): + test_db.create_collection(collection_name) + print(f"Created collection: {collection_name}") + + collection = test_db.collection(collection_name) + + # Insert test documents + test_docs = [ + {"_key": "1", "name": "test1", "value": 100, "category": "A"}, + {"_key": "2", "name": "test2", "value": 200, "category": "B"}, + {"_key": "3", "name": "test3", "value": 300, "category": "A"}, + ] + + collection.import_bulk(test_docs) + print("Inserted test documents") + + # Query documents + cursor = test_db.aql.execute(""" + FOR doc IN test_collection + FILTER doc.category == "A" + RETURN doc + """) + + print("\nQuery results:") + for doc in cursor: + print(json.dumps(doc, indent=2)) + + # Execute a more complex query + cursor = test_db.aql.execute(""" + FOR doc IN test_collection + COLLECT category = doc.category + AGGREGATE + count = COUNT(1), + avg_value = AVG(doc.value), + min_value = MIN(doc.value), + max_value = MAX(doc.value) + RETURN { + category: category, + count: count, + avg_value: avg_value, + min_value: min_value, + max_value: max_value + } + """) + + print("\nAggregation results:") + for result in cursor: + print(json.dumps(result, indent=2)) + + # Get collection info + collection_info = collection.properties() + print("\nCollection properties:") + print(f"Name: {collection_info['name']}") + print(f"Type: {collection_info['type']}") + print(f"Status: {collection_info['status']}") + print(f"Count: {collection.count()}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/aws/example_basic.py b/modules/aws/example_basic.py new file mode 100644 index 000000000..64410ed23 --- /dev/null +++ b/modules/aws/example_basic.py @@ -0,0 +1,117 @@ +import json +from datetime import datetime + +import boto3 + +from testcontainers.aws import AwsContainer + + +def basic_example(): + with AwsContainer() as aws: + # Get connection parameters + host = aws.get_container_host_ip() + port = aws.get_exposed_port(aws.port) + access_key = aws.access_key + secret_key = aws.secret_key + region = aws.region + + # Initialize AWS clients + s3 = boto3.client( + "s3", + endpoint_url=f"http://{host}:{port}", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region, + ) + + dynamodb = boto3.resource( + "dynamodb", + endpoint_url=f"http://{host}:{port}", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region, + ) + + sqs = boto3.client( + "sqs", + endpoint_url=f"http://{host}:{port}", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region, + ) + + print("Connected to AWS services") + + # Test S3 + bucket_name = f"test-bucket-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}" + s3.create_bucket(Bucket=bucket_name) + print(f"\nCreated S3 bucket: {bucket_name}") + + # Upload a file + s3.put_object(Bucket=bucket_name, Key="test.txt", Body="Hello, S3!") + print("Uploaded test file") + + # List objects + objects = s3.list_objects(Bucket=bucket_name) + print("\nObjects in bucket:") + for obj in objects.get("Contents", []): + print(f"- {obj['Key']}") + + # Test DynamoDB + table_name = "test_table" + table = dynamodb.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + print(f"\nCreated DynamoDB table: {table_name}") + + # Wait for table to be created + table.meta.client.get_waiter("table_exists").wait(TableName=table_name) + + # Insert items + table.put_item(Item={"id": "1", "name": "Test Item", "value": 42, "timestamp": datetime.utcnow().isoformat()}) + print("Inserted test item") + + # Query items + response = table.scan() + print("\nDynamoDB items:") + for item in response["Items"]: + print(json.dumps(item, indent=2)) + + # Test SQS + queue_name = "test-queue" + queue = sqs.create_queue(QueueName=queue_name) + queue_url = queue["QueueUrl"] + print(f"\nCreated SQS queue: {queue_name}") + + # Send message + response = sqs.send_message(QueueUrl=queue_url, MessageBody="Hello, SQS!") + print(f"Sent message: {response['MessageId']}") + + # Receive message + messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=1) + print("\nReceived messages:") + for message in messages.get("Messages", []): + print(json.dumps(message, indent=2)) + + # Clean up + # Delete S3 bucket and its contents + objects = s3.list_objects(Bucket=bucket_name) + for obj in objects.get("Contents", []): + s3.delete_object(Bucket=bucket_name, Key=obj["Key"]) + s3.delete_bucket(Bucket=bucket_name) + print("\nDeleted S3 bucket") + + # Delete DynamoDB table + table.delete() + print("Deleted DynamoDB table") + + # Delete SQS queue + sqs.delete_queue(QueueUrl=queue_url) + print("Deleted SQS queue") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/azurite/example_basic.py b/modules/azurite/example_basic.py new file mode 100644 index 000000000..872046e97 --- /dev/null +++ b/modules/azurite/example_basic.py @@ -0,0 +1,73 @@ +import json + +from azure.storage.blob import BlobServiceClient +from azure.storage.queue import QueueServiceClient + +from testcontainers.azurite import AzuriteContainer + + +def basic_example(): + with AzuriteContainer() as azurite: + # Get connection string + connection_string = azurite.get_connection_string() + + # Create BlobServiceClient + blob_service_client = BlobServiceClient.from_connection_string(connection_string) + + # Create QueueServiceClient + queue_service_client = QueueServiceClient.from_connection_string(connection_string) + + # Create a test container + container_name = "test-container" + container_client = blob_service_client.create_container(container_name) + print(f"Created container: {container_name}") + + # Upload test blobs + test_data = [ + {"name": "test1", "value": 100, "category": "A"}, + {"name": "test2", "value": 200, "category": "B"}, + {"name": "test3", "value": 300, "category": "A"}, + ] + + for i, data in enumerate(test_data, 1): + blob_name = f"test{i}.json" + blob_client = container_client.get_blob_client(blob_name) + blob_client.upload_blob(json.dumps(data), overwrite=True) + print(f"Uploaded blob: {blob_name}") + + # List blobs + print("\nBlobs in container:") + for blob in container_client.list_blobs(): + print(f"Name: {blob.name}, Size: {blob.size} bytes") + + # Download and read a blob + blob_client = container_client.get_blob_client("test1.json") + blob_data = blob_client.download_blob() + content = json.loads(blob_data.readall()) + print("\nBlob content:") + print(json.dumps(content, indent=2)) + + # Create a test queue + queue_name = "test-queue" + queue_client = queue_service_client.create_queue(queue_name) + print(f"\nCreated queue: {queue_name}") + + # Send test messages + test_messages = ["Hello Azurite!", "This is a test message", "Queue is working!"] + + for msg in test_messages: + queue_client.send_message(msg) + print(f"Sent message: {msg}") + + # Receive messages + print("\nReceived messages:") + for _ in range(len(test_messages)): + message = queue_client.receive_message() + if message: + print(f"Message: {message.content}") + queue_client.delete_message(message.id, message.pop_receipt) + print("Deleted message") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cassandra/example_basic.py b/modules/cassandra/example_basic.py new file mode 100644 index 000000000..54cee6f64 --- /dev/null +++ b/modules/cassandra/example_basic.py @@ -0,0 +1,153 @@ +import json +from datetime import datetime + +from cassandra.auth import PlainTextAuthProvider +from cassandra.cluster import Cluster + +from testcontainers.cassandra import CassandraContainer + + +def basic_example(): + with CassandraContainer() as cassandra: + # Get connection parameters + host = cassandra.get_container_host_ip() + port = cassandra.get_exposed_port(cassandra.port) + username = cassandra.username + password = cassandra.password + + # Create Cassandra client + auth_provider = PlainTextAuthProvider(username=username, password=password) + cluster = Cluster([host], port=port, auth_provider=auth_provider) + session = cluster.connect() + print("Connected to Cassandra") + + # Create keyspace + session.execute(""" + CREATE KEYSPACE IF NOT EXISTS test_keyspace + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} + """) + print("Created keyspace") + + # Use keyspace + session.set_keyspace("test_keyspace") + + # Create table + session.execute(""" + CREATE TABLE IF NOT EXISTS test_table ( + id UUID PRIMARY KEY, + name text, + value int, + category text, + created_at timestamp + ) + """) + print("Created table") + + # Insert test data + test_data = [ + { + "id": "550e8400-e29b-41d4-a716-446655440000", + "name": "test1", + "value": 100, + "category": "A", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440001", + "name": "test2", + "value": 200, + "category": "B", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440002", + "name": "test3", + "value": 300, + "category": "A", + "created_at": datetime.utcnow(), + }, + ] + + insert_stmt = session.prepare(""" + INSERT INTO test_table (id, name, value, category, created_at) + VALUES (uuid(), ?, ?, ?, ?) + """) + + for data in test_data: + session.execute(insert_stmt, (data["name"], data["value"], data["category"], data["created_at"])) + print("Inserted test data") + + # Query data + print("\nQuery results:") + rows = session.execute("SELECT * FROM test_table WHERE category = 'A' ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create materialized view + session.execute(""" + CREATE MATERIALIZED VIEW IF NOT EXISTS test_view AS + SELECT category, name, value, created_at + FROM test_table + WHERE category IS NOT NULL AND name IS NOT NULL + PRIMARY KEY (category, name) + """) + print("\nCreated materialized view") + + # Query materialized view + print("\nMaterialized view results:") + rows = session.execute("SELECT * FROM test_view WHERE category = 'A'") + for row in rows: + print( + json.dumps( + { + "category": row.category, + "name": row.name, + "value": row.value, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create secondary index + session.execute("CREATE INDEX IF NOT EXISTS ON test_table (value)") + print("\nCreated secondary index") + + # Query using secondary index + print("\nQuery using secondary index:") + rows = session.execute("SELECT * FROM test_table WHERE value > 150 ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Get table metadata + table_meta = session.cluster.metadata.keyspaces["test_keyspace"].tables["test_table"] + print("\nTable metadata:") + print(f"Columns: {[col.name for col in table_meta.columns.values()]}") + print(f"Partition key: {[col.name for col in table_meta.partition_key]}") + print(f"Clustering key: {[col.name for col in table_meta.clustering_key]}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/chroma/example_basic.py b/modules/chroma/example_basic.py new file mode 100644 index 000000000..3d22c01c7 --- /dev/null +++ b/modules/chroma/example_basic.py @@ -0,0 +1,65 @@ +import chromadb +from chromadb.config import Settings + +from testcontainers.chroma import ChromaContainer + + +def basic_example(): + with ChromaContainer() as chroma: + # Get connection URL + connection_url = chroma.get_connection_url() + + # Create Chroma client + client = chromadb.HttpClient(host=connection_url, settings=Settings(allow_reset=True)) + + # Create a collection + collection_name = "test_collection" + collection = client.create_collection(name=collection_name) + print(f"Created collection: {collection_name}") + + # Add documents and embeddings + documents = [ + "This is a test document about AI", + "Machine learning is a subset of AI", + "Deep learning uses neural networks", + ] + + embeddings = [ + [0.1, 0.2, 0.3], # Simple example embeddings + [0.2, 0.3, 0.4], + [0.3, 0.4, 0.5], + ] + + ids = ["doc1", "doc2", "doc3"] + metadatas = [ + {"source": "test1", "category": "AI"}, + {"source": "test2", "category": "ML"}, + {"source": "test3", "category": "DL"}, + ] + + collection.add(documents=documents, embeddings=embeddings, ids=ids, metadatas=metadatas) + print("Added documents to collection") + + # Query the collection + results = collection.query(query_embeddings=[[0.1, 0.2, 0.3]], n_results=2) + + print("\nQuery results:") + print(f"Documents: {results['documents'][0]}") + print(f"Distances: {results['distances'][0]}") + print(f"Metadatas: {results['metadatas'][0]}") + + # Get collection info + collection_info = client.get_collection(collection_name) + print("\nCollection info:") + print(f"Name: {collection_info.name}") + print(f"Count: {collection_info.count()}") + + # List all collections + collections = client.list_collections() + print("\nAvailable collections:") + for coll in collections: + print(f"- {coll.name}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/clickhouse/example_basic.py b/modules/clickhouse/example_basic.py new file mode 100644 index 000000000..1b4eb5c8d --- /dev/null +++ b/modules/clickhouse/example_basic.py @@ -0,0 +1,76 @@ +from datetime import datetime, timedelta + +import pandas as pd +from clickhouse_driver import Client + +from testcontainers.clickhouse import ClickHouseContainer + + +def basic_example(): + with ClickHouseContainer() as clickhouse: + # Get connection parameters + host = clickhouse.get_container_host_ip() + port = clickhouse.get_exposed_port(clickhouse.port) + + # Create ClickHouse client + client = Client(host=host, port=port) + + # Create a test table + client.execute(""" + CREATE TABLE IF NOT EXISTS test_table ( + id UInt32, + name String, + value Float64, + timestamp DateTime + ) ENGINE = MergeTree() + ORDER BY (id, timestamp) + """) + print("Created test table") + + # Generate test data + now = datetime.now() + data = [ + (1, "test1", 100.0, now), + (2, "test2", 200.0, now + timedelta(hours=1)), + (3, "test3", 300.0, now + timedelta(hours=2)), + ] + + # Insert data + client.execute("INSERT INTO test_table (id, name, value, timestamp) VALUES", data) + print("Inserted test data") + + # Query data + result = client.execute(""" + SELECT * + FROM test_table + ORDER BY id + """) + + print("\nQuery results:") + for row in result: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}, Timestamp: {row[3]}") + + # Execute a more complex query + result = client.execute(""" + SELECT + name, + avg(value) as avg_value, + min(value) as min_value, + max(value) as max_value + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + + print("\nAggregation results:") + for row in result: + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Min: {row[2]:.2f}, Max: {row[3]:.2f}") + + # Convert to pandas DataFrame + df = pd.DataFrame(result, columns=["name", "avg_value", "min_value", "max_value"]) + print("\nDataFrame:") + print(df) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cockroachdb/example_basic.py b/modules/cockroachdb/example_basic.py new file mode 100644 index 000000000..9da3f219c --- /dev/null +++ b/modules/cockroachdb/example_basic.py @@ -0,0 +1,90 @@ +import pandas as pd +import sqlalchemy +from sqlalchemy import text + +from testcontainers.cockroachdb import CockroachContainer + + +def basic_example(): + with CockroachContainer() as cockroach: + # Get connection URL + connection_url = cockroach.get_connection_url() + + # Create SQLAlchemy engine + engine = sqlalchemy.create_engine(connection_url) + + # Create a test table + with engine.begin() as conn: + conn.execute( + text(""" + CREATE TABLE IF NOT EXISTS test_table ( + id SERIAL PRIMARY KEY, + name VARCHAR(50), + value DECIMAL(10,2), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """) + ) + print("Created test table") + + # Insert test data + test_data = [(1, "test1", 100.0), (2, "test2", 200.0), (3, "test3", 300.0)] + + conn.execute( + text(""" + INSERT INTO test_table (id, name, value) + VALUES (:id, :name, :value) + """), + [{"id": item_id, "name": name, "value": value} for item_id, name, value in test_data], + ) + print("Inserted test data") + + # Query data + with engine.connect() as conn: + result = conn.execute( + text(""" + SELECT * + FROM test_table + ORDER BY id + """) + ) + + print("\nQuery results:") + for row in result: + print(f"ID: {row.id}, Name: {row.name}, Value: {row.value}, Created: {row.created_at}") + + # Execute a more complex query + with engine.connect() as conn: + result = conn.execute( + text(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count, + MIN(created_at) as first_created, + MAX(created_at) as last_created + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + ) + + print("\nAggregation results:") + for row in result: + print( + f"Name: {row.name}, " + f"Avg: {row.avg_value:.2f}, " + f"Count: {row.count}, " + f"First: {row.first_created}, " + f"Last: {row.last_created}" + ) + + # Convert to pandas DataFrame + with engine.connect() as conn: + df = pd.read_sql("SELECT * FROM test_table ORDER BY id", conn) + print("\nDataFrame:") + print(df) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cosmosdb/example_basic.py b/modules/cosmosdb/example_basic.py new file mode 100644 index 000000000..c836a1409 --- /dev/null +++ b/modules/cosmosdb/example_basic.py @@ -0,0 +1,75 @@ +import json + +from azure.cosmos import CosmosClient, PartitionKey + +from testcontainers.cosmosdb import CosmosDbContainer + + +def basic_example(): + with CosmosDbContainer() as cosmos: + # Get connection parameters + endpoint = cosmos.get_connection_url() + key = cosmos.get_primary_key() + + # Create CosmosDB client + client = CosmosClient(endpoint, key) + + # Create a database + database_name = "test_database" + database = client.create_database_if_not_exists(id=database_name) + print(f"Created database: {database_name}") + + # Create a container + container_name = "test_container" + container = database.create_container_if_not_exists( + id=container_name, partition_key=PartitionKey(path="/category") + ) + print(f"Created container: {container_name}") + + # Insert test items + test_items = [ + {"id": "1", "category": "test1", "name": "Item 1", "value": 100}, + {"id": "2", "category": "test2", "name": "Item 2", "value": 200}, + {"id": "3", "category": "test1", "name": "Item 3", "value": 300}, + ] + + for item in test_items: + container.create_item(body=item) + print("Inserted test items") + + # Query items + query = "SELECT * FROM c WHERE c.category = 'test1'" + items = list(container.query_items(query=query, enable_cross_partition_query=True)) + + print("\nQuery results:") + for item in items: + print(json.dumps(item, indent=2)) + + # Execute a more complex query + query = """ + SELECT + c.category, + COUNT(1) as count, + AVG(c.value) as avg_value, + MIN(c.value) as min_value, + MAX(c.value) as max_value + FROM c + GROUP BY c.category + """ + + results = list(container.query_items(query=query, enable_cross_partition_query=True)) + + print("\nAggregation results:") + for result in results: + print(json.dumps(result, indent=2)) + + # Get container info + container_properties = container.read() + print("\nContainer properties:") + print(f"ID: {container_properties['id']}") + print(f"Partition Key: {container_properties['partitionKey']}") + print(f"Indexing Policy: {json.dumps(container_properties['indexingPolicy'], indent=2)}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/db2/example_basic.py b/modules/db2/example_basic.py new file mode 100644 index 000000000..97b5d65f5 --- /dev/null +++ b/modules/db2/example_basic.py @@ -0,0 +1,89 @@ +import ibm_db +import ibm_db_dbi +import pandas as pd + +from testcontainers.db2 import Db2Container + + +def basic_example(): + with Db2Container() as db2: + # Get connection parameters + host = db2.get_container_host_ip() + port = db2.get_exposed_port(db2.port) + database = db2.database + username = db2.username + password = db2.password + + # Create connection string + conn_str = f"DATABASE={database};HOSTNAME={host};PORT={port};PROTOCOL=TCPIP;UID={username};PWD={password}" + + # Connect to DB2 + conn = ibm_db.connect(conn_str, "", "") + print("Connected to DB2") + + # Create a test table + create_table_sql = """ + CREATE TABLE test_table ( + id INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(50), + value DECIMAL(10,2), + created_at TIMESTAMP DEFAULT CURRENT TIMESTAMP + ) + """ + + try: + ibm_db.exec_immediate(conn, create_table_sql) + print("Created test table") + except Exception as e: + print(f"Table might already exist: {e}") + + # Insert test data + test_data = [(1, "test1", 100.0), (2, "test2", 200.0), (3, "test3", 300.0)] + + insert_sql = "INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)" + stmt = ibm_db.prepare(conn, insert_sql) + + for row in test_data: + ibm_db.execute(stmt, row) + print("Inserted test data") + + # Query data using ibm_db_dbi + conn_dbi = ibm_db_dbi.Connection(conn) + cursor = conn_dbi.cursor() + + cursor.execute("SELECT * FROM test_table ORDER BY id") + rows = cursor.fetchall() + + print("\nQuery results:") + for row in rows: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}, Created: {row[3]}") + + # Execute a more complex query + cursor.execute(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count, + MIN(created_at) as first_created, + MAX(created_at) as last_created + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + + print("\nAggregation results:") + for row in cursor.fetchall(): + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Count: {row[2]}, First: {row[3]}, Last: {row[4]}") + + # Convert to pandas DataFrame + df = pd.read_sql("SELECT * FROM test_table ORDER BY id", conn_dbi) + print("\nDataFrame:") + print(df) + + # Clean up + cursor.close() + ibm_db.close(conn) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/elasticsearch/example_basic.py b/modules/elasticsearch/example_basic.py new file mode 100644 index 000000000..1b3ed4077 --- /dev/null +++ b/modules/elasticsearch/example_basic.py @@ -0,0 +1,105 @@ +import json +from datetime import datetime + +from elasticsearch import Elasticsearch + +from testcontainers.elasticsearch import ElasticsearchContainer + + +def basic_example(): + with ElasticsearchContainer() as elasticsearch: + # Get connection parameters + host = elasticsearch.get_container_host_ip() + port = elasticsearch.get_exposed_port(elasticsearch.port) + + # Create Elasticsearch client + es = Elasticsearch(f"http://{host}:{port}") + print("Connected to Elasticsearch") + + # Create index + index_name = "test_index" + index_settings = { + "settings": {"number_of_shards": 1, "number_of_replicas": 0}, + "mappings": { + "properties": { + "name": {"type": "text"}, + "value": {"type": "integer"}, + "category": {"type": "keyword"}, + "created_at": {"type": "date"}, + } + }, + } + + if not es.indices.exists(index=index_name): + es.indices.create(index=index_name, body=index_settings) + print(f"Created index: {index_name}") + + # Insert test documents + test_docs = [ + {"name": "test1", "value": 100, "category": "A", "created_at": datetime.utcnow()}, + {"name": "test2", "value": 200, "category": "B", "created_at": datetime.utcnow()}, + {"name": "test3", "value": 300, "category": "A", "created_at": datetime.utcnow()}, + ] + + for i, doc in enumerate(test_docs, 1): + es.index(index=index_name, id=i, document=doc) + print("Inserted test documents") + + # Refresh index + es.indices.refresh(index=index_name) + + # Search documents + search_query = {"query": {"bool": {"must": [{"term": {"category": "A"}}]}}} + + print("\nSearch results:") + response = es.search(index=index_name, body=search_query) + for hit in response["hits"]["hits"]: + print(json.dumps(hit["_source"], default=str, indent=2)) + + # Execute aggregation + agg_query = { + "size": 0, + "aggs": { + "categories": { + "terms": {"field": "category"}, + "aggs": { + "avg_value": {"avg": {"field": "value"}}, + "min_value": {"min": {"field": "value"}}, + "max_value": {"max": {"field": "value"}}, + }, + } + }, + } + + print("\nAggregation results:") + response = es.search(index=index_name, body=agg_query) + for bucket in response["aggregations"]["categories"]["buckets"]: + print(f"\nCategory: {bucket['key']}") + print(f"Count: {bucket['doc_count']}") + print(f"Avg value: {bucket['avg_value']['value']:.2f}") + print(f"Min value: {bucket['min_value']['value']}") + print(f"Max value: {bucket['max_value']['value']}") + + # Update document + update_body = {"doc": {"value": 150, "updated_at": datetime.utcnow()}} + es.update(index=index_name, id=1, body=update_body) + print("\nUpdated document") + + # Get document + doc = es.get(index=index_name, id=1) + print("\nUpdated document:") + print(json.dumps(doc["_source"], default=str, indent=2)) + + # Delete document + es.delete(index=index_name, id=2) + print("\nDeleted document") + + # Get index stats + stats = es.indices.stats(index=index_name) + print("\nIndex stats:") + print(f"Documents: {stats['indices'][index_name]['total']['docs']['count']}") + print(f"Size: {stats['indices'][index_name]['total']['store']['size_in_bytes']} bytes") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/generic/example_basic.py b/modules/generic/example_basic.py new file mode 100644 index 000000000..107bcc7c2 --- /dev/null +++ b/modules/generic/example_basic.py @@ -0,0 +1,115 @@ +import requests + +from testcontainers.generic import GenericContainer + + +def basic_example(): + # Example 1: Nginx container + with GenericContainer("nginx:latest") as nginx: + # Get connection parameters + host = nginx.get_container_host_ip() + port = nginx.get_exposed_port(80) + + # Test Nginx + response = requests.get(f"http://{host}:{port}") + print("\nNginx response:") + print(f"Status code: {response.status_code}") + print(f"Content type: {response.headers.get('content-type')}") + + # Example 2: Redis container with custom configuration + with GenericContainer("redis:latest") as redis: + # Get connection parameters + host = redis.get_container_host_ip() + port = redis.get_exposed_port(6379) + + # Test Redis + import redis + + r = redis.Redis(host=host, port=port) + r.set("test_key", "Hello, Redis!") + value = r.get("test_key") + print("\nRedis test:") + print(f"Retrieved value: {value.decode()}") + + # Example 3: PostgreSQL container with environment variables + with GenericContainer( + "postgres:latest", + environment={"POSTGRES_USER": "testuser", "POSTGRES_PASSWORD": "testpass", "POSTGRES_DB": "testdb"}, + ) as postgres: + # Get connection parameters + host = postgres.get_container_host_ip() + port = postgres.get_exposed_port(5432) + + # Test PostgreSQL + import psycopg2 + + conn = psycopg2.connect(host=host, port=port, user="testuser", password="testpass", database="testdb") + cur = conn.cursor() + cur.execute("SELECT version();") + version = cur.fetchone() + print("\nPostgreSQL test:") + print(f"Version: {version[0]}") + cur.close() + conn.close() + + # Example 4: Custom container with volume mounting + with GenericContainer("python:3.9-slim", volumes={"/tmp/test": {"bind": "/app", "mode": "rw"}}) as python: + # Get container ID + container_id = python.get_container_id() + print(f"\nPython container ID: {container_id}") + + # Execute command in container + exit_code, output = python.exec_run("python -c 'print(\"Hello from container!\")'") + print(f"Command output: {output.decode()}") + + # Example 5: Container with health check + with GenericContainer( + "mongo:latest", + healthcheck={ + "test": ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"], + "interval": 1000000000, # 1 second + "timeout": 3000000000, # 3 seconds + "retries": 3, + }, + ) as mongo: + # Get connection parameters + host = mongo.get_container_host_ip() + port = mongo.get_exposed_port(27017) + + # Test MongoDB + from pymongo import MongoClient + + client = MongoClient(f"mongodb://{host}:{port}") + db = client.test_db + collection = db.test_collection + collection.insert_one({"test": "Hello, MongoDB!"}) + result = collection.find_one() + print("\nMongoDB test:") + print(f"Retrieved document: {result}") + + # Example 6: Container with network + with GenericContainer("nginx:latest", network="test_network") as nginx_network: + # Get network info + network_info = nginx_network.get_network_info() + print("\nNetwork test:") + print(f"Network name: {network_info['Name']}") + print(f"Network ID: {network_info['Id']}") + + # Example 7: Container with resource limits + with GenericContainer("nginx:latest", mem_limit="512m", cpu_period=100000, cpu_quota=50000) as nginx_limits: + # Get container stats + stats = nginx_limits.get_stats() + print("\nResource limits test:") + print(f"Memory limit: {stats['memory_stats']['limit']}") + print(f"CPU usage: {stats['cpu_stats']['cpu_usage']['total_usage']}") + + # Example 8: Container with custom command + with GenericContainer("python:3.9-slim", command=["python", "-c", "print('Custom command test')"]) as python_cmd: + # Get logs + logs = python_cmd.get_logs() + print("\nCustom command test:") + print(f"Container logs: {logs.decode()}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/google/example_basic.py b/modules/google/example_basic.py new file mode 100644 index 000000000..323b25817 --- /dev/null +++ b/modules/google/example_basic.py @@ -0,0 +1,127 @@ +import json +from datetime import datetime + +from google.cloud import bigquery, datastore, pubsub, storage + +from testcontainers.google import GoogleContainer + + +def basic_example(): + with GoogleContainer() as google: + # Get connection parameters + project_id = google.project_id + + # Initialize clients + storage_client = storage.Client(project=project_id) + pubsub_client = pubsub.PublisherClient() + bigquery_client = bigquery.Client(project=project_id) + datastore_client = datastore.Client(project=project_id) + + print("Connected to Google Cloud services") + + # Test Cloud Storage + bucket_name = f"test-bucket-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}" + bucket = storage_client.create_bucket(bucket_name) + print(f"\nCreated bucket: {bucket_name}") + + # Upload a file + blob = bucket.blob("test.txt") + blob.upload_from_string("Hello, Google Cloud Storage!") + print("Uploaded test file") + + # List files + blobs = list(bucket.list_blobs()) + print("\nFiles in bucket:") + for blob in blobs: + print(f"- {blob.name}") + + # Test Pub/Sub + topic_name = f"projects/{project_id}/topics/test-topic" + pubsub_client.create_topic(name=topic_name) + print(f"\nCreated topic: {topic_name}") + + # Create subscription + subscription_name = f"projects/{project_id}/subscriptions/test-subscription" + pubsub_client.create_subscription(name=subscription_name, topic=topic_name) + print(f"Created subscription: {subscription_name}") + + # Publish message + message = "Hello, Pub/Sub!" + future = pubsub_client.publish(topic_name, message.encode()) + message_id = future.result() + print(f"Published message: {message_id}") + + # Test BigQuery + dataset_id = "test_dataset" + bigquery_client.create_dataset(dataset_id) + print(f"\nCreated dataset: {dataset_id}") + + # Create table + table_id = f"{project_id}.{dataset_id}.test_table" + schema = [ + bigquery.SchemaField("name", "STRING"), + bigquery.SchemaField("age", "INTEGER"), + bigquery.SchemaField("city", "STRING"), + ] + table = bigquery_client.create_table(bigquery.Table(table_id, schema=schema)) + print(f"Created table: {table_id}") + + # Insert data + rows_to_insert = [ + {"name": "John", "age": 30, "city": "New York"}, + {"name": "Jane", "age": 25, "city": "Los Angeles"}, + {"name": "Bob", "age": 35, "city": "Chicago"}, + ] + errors = bigquery_client.insert_rows_json(table, rows_to_insert) + if not errors: + print("Inserted test data") + else: + print(f"Encountered errors: {errors}") + + # Query data + query = f"SELECT * FROM `{table_id}` WHERE age > 30" + query_job = bigquery_client.query(query) + results = query_job.result() + print("\nQuery results:") + for row in results: + print(json.dumps(dict(row), indent=2)) + + # Test Datastore + kind = "test_entity" + key = datastore_client.key(kind) + entity = datastore.Entity(key=key) + entity.update({"name": "Test Entity", "value": 42, "timestamp": datetime.utcnow()}) + datastore_client.put(entity) + print(f"\nCreated {kind} entity") + + # Query entities + query = datastore_client.query(kind=kind) + results = list(query.fetch()) + print("\nDatastore entities:") + for entity in results: + print(json.dumps(dict(entity), indent=2)) + + # Clean up + # Delete bucket and its contents + bucket.delete(force=True) + print("\nDeleted bucket") + + # Delete topic and subscription + pubsub_client.delete_subscription(subscription_name) + pubsub_client.delete_topic(topic_name) + print("Deleted Pub/Sub topic and subscription") + + # Delete BigQuery dataset and table + bigquery_client.delete_table(table_id) + bigquery_client.delete_dataset(dataset_id, delete_contents=True) + print("Deleted BigQuery dataset and table") + + # Delete Datastore entities + query = datastore_client.query(kind=kind) + keys = [entity.key for entity in query.fetch()] + datastore_client.delete_multi(keys) + print("Deleted Datastore entities") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/influxdb/example_basic.py b/modules/influxdb/example_basic.py new file mode 100644 index 000000000..580c4aa53 --- /dev/null +++ b/modules/influxdb/example_basic.py @@ -0,0 +1,192 @@ +import json +from datetime import datetime, timedelta + +from influxdb_client import InfluxDBClient, Point +from influxdb_client.client.write_api import SYNCHRONOUS + +from testcontainers.influxdb import InfluxDBContainer + + +def basic_example(): + with InfluxDBContainer() as influxdb: + # Get connection parameters + host = influxdb.get_container_host_ip() + port = influxdb.get_exposed_port(influxdb.port) + token = influxdb.token + org = influxdb.org + bucket = influxdb.bucket + + # Create InfluxDB client + client = InfluxDBClient(url=f"http://{host}:{port}", token=token, org=org) + print("Connected to InfluxDB") + + # Create write API + write_api = client.write_api(write_options=SYNCHRONOUS) + + # Create test data points + points = [] + for i in range(3): + point = ( + Point("test_measurement") + .tag("location", f"location_{i}") + .tag("device", f"device_{i}") + .field("temperature", 20 + i) + .field("humidity", 50 + i) + .time(datetime.utcnow() + timedelta(minutes=i)) + ) + points.append(point) + + # Write points + write_api.write(bucket=bucket, record=points) + print("Wrote test data points") + + # Create query API + query_api = client.query_api() + + # Query data + query = f''' + from(bucket: "{bucket}") + |> range(start: -1h) + |> filter(fn: (r) => r["_measurement"] == "test_measurement") + ''' + + result = query_api.query(query) + print("\nQuery results:") + for table in result: + for record in table.records: + print( + json.dumps( + { + "measurement": record.get_measurement(), + "time": record.get_time().isoformat(), + "location": record.values.get("location"), + "device": record.values.get("device"), + "field": record.get_field(), + "value": record.get_value(), + }, + indent=2, + ) + ) + + # Create aggregation query + agg_query = f''' + from(bucket: "{bucket}") + |> range(start: -1h) + |> filter(fn: (r) => r["_measurement"] == "test_measurement") + |> group(columns: ["location"]) + |> mean() + ''' + + agg_result = query_api.query(agg_query) + print("\nAggregation results:") + for table in agg_result: + for record in table.records: + print( + json.dumps( + { + "location": record.values.get("location"), + "field": record.get_field(), + "mean": record.get_value(), + }, + indent=2, + ) + ) + + # Create window query + window_query = f''' + from(bucket: "{bucket}") + |> range(start: -1h) + |> filter(fn: (r) => r["_measurement"] == "test_measurement") + |> window(every: 5m) + |> mean() + ''' + + window_result = query_api.query(window_query) + print("\nWindow results:") + for table in window_result: + for record in table.records: + print( + json.dumps( + { + "window_start": record.get_start().isoformat(), + "window_stop": record.get_stop().isoformat(), + "field": record.get_field(), + "mean": record.get_value(), + }, + indent=2, + ) + ) + + # Create task + task_flux = f''' + option task = {{ + name: "test_task", + every: 1h + }} + + from(bucket: "{bucket}") + |> range(start: -1h) + |> filter(fn: (r) => r["_measurement"] == "test_measurement") + |> mean() + |> to(bucket: "{bucket}", measurement: "test_measurement_agg") + ''' + + tasks_api = client.tasks_api() + task = tasks_api.create_task(name="test_task", flux=task_flux, org=org) + print("\nCreated task") + + # Get task info + task_info = tasks_api.find_task_by_id(task.id) + print("\nTask info:") + print( + json.dumps( + {"id": task_info.id, "name": task_info.name, "status": task_info.status, "every": task_info.every}, + indent=2, + ) + ) + + # Create dashboard + dashboards_api = client.dashboards_api() + dashboard = dashboards_api.create_dashboard(name="test_dashboard", org=org) + print("\nCreated dashboard") + + # Add cell to dashboard + dashboards_api.create_dashboard_cell( + dashboard_id=dashboard.id, name="test_cell", x=0, y=0, w=6, h=4, query=query + ) + print("Added cell to dashboard") + + # Get dashboard info + dashboard_info = dashboards_api.find_dashboard_by_id(dashboard.id) + print("\nDashboard info:") + print( + json.dumps( + {"id": dashboard_info.id, "name": dashboard_info.name, "cells": len(dashboard_info.cells)}, indent=2 + ) + ) + + # Create bucket + buckets_api = client.buckets_api() + new_bucket = buckets_api.create_bucket(bucket_name="test_bucket_2", org=org) + print("\nCreated new bucket") + + # Get bucket info + bucket_info = buckets_api.find_bucket_by_id(new_bucket.id) + print("\nBucket info:") + print(json.dumps({"id": bucket_info.id, "name": bucket_info.name, "org_id": bucket_info.org_id}, indent=2)) + + # Clean up + tasks_api.delete_task(task.id) + print("\nDeleted task") + + dashboards_api.delete_dashboard(dashboard.id) + print("Deleted dashboard") + + buckets_api.delete_bucket(new_bucket.id) + print("Deleted bucket") + + client.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/k3s/example_basic.py b/modules/k3s/example_basic.py new file mode 100644 index 000000000..75550f0b6 --- /dev/null +++ b/modules/k3s/example_basic.py @@ -0,0 +1,179 @@ +import json +import time + +import yaml +from kubernetes import client, config +from kubernetes.client.rest import ApiException + +from testcontainers.k3s import K3sContainer + + +def basic_example(): + with K3sContainer() as k3s: + # Get kubeconfig + kubeconfig = k3s.get_kubeconfig() + + # Load kubeconfig + config.load_kube_config_from_dict(yaml.safe_load(kubeconfig)) + print("Loaded kubeconfig") + + # Create API clients + v1 = client.CoreV1Api() + apps_v1 = client.AppsV1Api() + + # Create namespace + namespace = "test-namespace" + try: + v1.create_namespace(client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace))) + print(f"Created namespace: {namespace}") + except ApiException as e: + if e.status == 409: # Already exists + print(f"Namespace {namespace} already exists") + else: + raise + + # Create ConfigMap + configmap = client.V1ConfigMap( + metadata=client.V1ObjectMeta(name="test-config", namespace=namespace), data={"config.yaml": "key: value"} + ) + v1.create_namespaced_config_map(namespace=namespace, body=configmap) + print("Created ConfigMap") + + # Create Secret + secret = client.V1Secret( + metadata=client.V1ObjectMeta(name="test-secret", namespace=namespace), + type="Opaque", + data={"username": "dGVzdA==", "password": "cGFzc3dvcmQ="}, # base64 encoded + ) + v1.create_namespaced_secret(namespace=namespace, body=secret) + print("Created Secret") + + # Create Deployment + deployment = client.V1Deployment( + metadata=client.V1ObjectMeta(name="test-deployment", namespace=namespace), + spec=client.V1DeploymentSpec( + replicas=2, + selector=client.V1LabelSelector(match_labels={"app": "test-app"}), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta(labels={"app": "test-app"}), + spec=client.V1PodSpec( + containers=[ + client.V1Container( + name="nginx", image="nginx:latest", ports=[client.V1ContainerPort(container_port=80)] + ) + ] + ), + ), + ), + ) + apps_v1.create_namespaced_deployment(namespace=namespace, body=deployment) + print("Created Deployment") + + # Create Service + service = client.V1Service( + metadata=client.V1ObjectMeta(name="test-service", namespace=namespace), + spec=client.V1ServiceSpec( + selector={"app": "test-app"}, ports=[client.V1ServicePort(port=80, target_port=80)], type="ClusterIP" + ), + ) + v1.create_namespaced_service(namespace=namespace, body=service) + print("Created Service") + + # Wait for pods to be ready + print("\nWaiting for pods to be ready...") + time.sleep(10) # Give some time for pods to start + + # List pods + pods = v1.list_namespaced_pod(namespace=namespace) + print("\nPods:") + for pod in pods.items: + print(json.dumps({"name": pod.metadata.name, "phase": pod.status.phase, "ip": pod.status.pod_ip}, indent=2)) + + # Get deployment status + deployment_status = apps_v1.read_namespaced_deployment_status(name="test-deployment", namespace=namespace) + print("\nDeployment status:") + print( + json.dumps( + { + "name": deployment_status.metadata.name, + "replicas": deployment_status.spec.replicas, + "available_replicas": deployment_status.status.available_replicas, + "ready_replicas": deployment_status.status.ready_replicas, + }, + indent=2, + ) + ) + + # Get service details + service_details = v1.read_namespaced_service(name="test-service", namespace=namespace) + print("\nService details:") + print( + json.dumps( + { + "name": service_details.metadata.name, + "type": service_details.spec.type, + "cluster_ip": service_details.spec.cluster_ip, + "ports": [{"port": p.port, "target_port": p.target_port} for p in service_details.spec.ports], + }, + indent=2, + ) + ) + + # Create Ingress + ingress = client.V1Ingress( + metadata=client.V1ObjectMeta( + name="test-ingress", + namespace=namespace, + annotations={"nginx.ingress.kubernetes.io/rewrite-target": "/"}, + ), + spec=client.V1IngressSpec( + rules=[ + client.V1IngressRule( + host="test.local", + http=client.V1HTTPIngressRuleValue( + paths=[ + client.V1HTTPIngressPath( + path="/", + path_type="Prefix", + backend=client.V1IngressBackend( + service=client.V1IngressServiceBackend( + name="test-service", port=client.V1ServiceBackendPort(number=80) + ) + ), + ) + ] + ), + ) + ] + ), + ) + networking_v1 = client.NetworkingV1Api() + networking_v1.create_namespaced_ingress(namespace=namespace, body=ingress) + print("\nCreated Ingress") + + # Get ingress details + ingress_details = networking_v1.read_namespaced_ingress(name="test-ingress", namespace=namespace) + print("\nIngress details:") + print( + json.dumps( + { + "name": ingress_details.metadata.name, + "host": ingress_details.spec.rules[0].host, + "path": ingress_details.spec.rules[0].http.paths[0].path, + }, + indent=2, + ) + ) + + # Clean up + print("\nCleaning up resources...") + networking_v1.delete_namespaced_ingress(name="test-ingress", namespace=namespace) + v1.delete_namespaced_service(name="test-service", namespace=namespace) + apps_v1.delete_namespaced_deployment(name="test-deployment", namespace=namespace) + v1.delete_namespaced_secret(name="test-secret", namespace=namespace) + v1.delete_namespaced_config_map(name="test-config", namespace=namespace) + v1.delete_namespace(name=namespace) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/kafka/example_basic.py b/modules/kafka/example_basic.py new file mode 100644 index 000000000..37b9a32d0 --- /dev/null +++ b/modules/kafka/example_basic.py @@ -0,0 +1,80 @@ +import json +import time +from datetime import datetime +from threading import Thread + +from kafka import KafkaConsumer, KafkaProducer + +from testcontainers.kafka import KafkaContainer + + +def basic_example(): + with KafkaContainer() as kafka: + # Get connection parameters + bootstrap_servers = kafka.get_bootstrap_server() + + # Create Kafka producer + producer = KafkaProducer( + bootstrap_servers=bootstrap_servers, value_serializer=lambda v: json.dumps(v).encode("utf-8") + ) + print("Created Kafka producer") + + # Create Kafka consumer + consumer = KafkaConsumer( + bootstrap_servers=bootstrap_servers, + value_deserializer=lambda v: json.loads(v.decode("utf-8")), + auto_offset_reset="earliest", + group_id="test_group", + ) + print("Created Kafka consumer") + + # Define topics + topics = ["test_topic1", "test_topic2"] + + # Subscribe to topics + consumer.subscribe(topics) + print(f"Subscribed to topics: {topics}") + + # Start consuming in a separate thread + def consume_messages(): + for message in consumer: + print(f"\nReceived message from {message.topic}:") + print(json.dumps(message.value, indent=2)) + + consumer_thread = Thread(target=consume_messages) + consumer_thread.daemon = True + consumer_thread.start() + + # Produce test messages + test_messages = [ + { + "topic": "test_topic1", + "message": {"id": 1, "content": "Message for topic 1", "timestamp": datetime.utcnow().isoformat()}, + }, + { + "topic": "test_topic2", + "message": {"id": 2, "content": "Message for topic 2", "timestamp": datetime.utcnow().isoformat()}, + }, + ] + + for msg in test_messages: + producer.send(msg["topic"], msg["message"]) + print(f"Sent message to {msg['topic']}") + + # Wait for messages to be processed + time.sleep(2) + + # Get topic information + print("\nTopic information:") + for topic in topics: + partitions = consumer.partitions_for_topic(topic) + print(f"{topic}:") + print(f" Partitions: {partitions}") + + # Clean up + producer.close() + consumer.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/keycloak/example_basic.py b/modules/keycloak/example_basic.py new file mode 100644 index 000000000..f4299f989 --- /dev/null +++ b/modules/keycloak/example_basic.py @@ -0,0 +1,171 @@ +import json + +from keycloak import KeycloakAdmin, KeycloakOpenID + +from testcontainers.keycloak import KeycloakContainer + + +def basic_example(): + with KeycloakContainer() as keycloak: + # Get connection parameters + host = keycloak.get_container_host_ip() + port = keycloak.get_exposed_port(keycloak.port) + admin_username = keycloak.admin_username + admin_password = keycloak.admin_password + + # Create admin client + admin = KeycloakAdmin( + server_url=f"http://{host}:{port}/", + username=admin_username, + password=admin_password, + realm_name="master", + verify=False, + ) + print("Connected to Keycloak as admin") + + # Create realm + realm_name = "test-realm" + admin.create_realm(payload={"realm": realm_name, "enabled": True}) + print(f"\nCreated realm: {realm_name}") + + # Switch to new realm + admin.realm_name = realm_name + + # Create client + client_id = "test-client" + admin.create_client( + payload={ + "clientId": client_id, + "publicClient": True, + "redirectUris": ["http://localhost:8080/*"], + "webOrigins": ["http://localhost:8080"], + } + ) + print(f"Created client: {client_id}") + + # Get client details + client = admin.get_client(client_id=client_id) + print("\nClient details:") + print( + json.dumps( + { + "client_id": client["clientId"], + "public_client": client["publicClient"], + "redirect_uris": client["redirectUris"], + }, + indent=2, + ) + ) + + # Create user + username = "testuser" + admin.create_user( + payload={ + "username": username, + "email": "test@example.com", + "enabled": True, + "credentials": [{"type": "password", "value": "password", "temporary": False}], + } + ) + print(f"\nCreated user: {username}") + + # Get user details + user = admin.get_user(user_id=username) + print("\nUser details:") + print(json.dumps({"username": user["username"], "email": user["email"], "enabled": user["enabled"]}, indent=2)) + + # Create role + role_name = "test-role" + admin.create_realm_role(payload={"name": role_name, "description": "Test role"}) + print(f"\nCreated role: {role_name}") + + # Assign role to user + role = admin.get_realm_role(role_name=role_name) + admin.assign_realm_roles(user_id=user["id"], roles=[role]) + print(f"Assigned role {role_name} to user {username}") + + # Create group + group_name = "test-group" + admin.create_group(payload={"name": group_name}) + print(f"\nCreated group: {group_name}") + + # Add user to group + group = admin.get_group_by_path(path=f"/{group_name}") + admin.group_user_add(user_id=user["id"], group_id=group["id"]) + print(f"Added user {username} to group {group_name}") + + # Create OpenID client + openid = KeycloakOpenID( + server_url=f"http://{host}:{port}/", client_id=client_id, realm_name=realm_name, verify=False + ) + + # Get token + token = openid.token(username=username, password="password") + print("\nToken details:") + print( + json.dumps( + { + "access_token": token["access_token"][:20] + "...", + "refresh_token": token["refresh_token"][:20] + "...", + "expires_in": token["expires_in"], + }, + indent=2, + ) + ) + + # Get user info + userinfo = openid.userinfo(token["access_token"]) + print("\nUser info:") + print(json.dumps(userinfo, indent=2)) + + # Get realm roles + roles = admin.get_realm_roles() + print("\nRealm roles:") + for role in roles: + print(f"- {role['name']}") + + # Get user roles + user_roles = admin.get_realm_roles_of_user(user_id=user["id"]) + print("\nUser roles:") + for role in user_roles: + print(f"- {role['name']}") + + # Get groups + groups = admin.get_groups() + print("\nGroups:") + for group in groups: + print(f"- {group['name']}") + + # Get group members + group_members = admin.get_group_members(group_id=group["id"]) + print("\nGroup members:") + for member in group_members: + print(f"- {member['username']}") + + # Update user + admin.update_user(user_id=user["id"], payload={"firstName": "Test", "lastName": "User"}) + print("\nUpdated user") + + # Update client + admin.update_client(client_id=client["id"], payload={"description": "Updated test client"}) + print("Updated client") + + # Clean up + admin.delete_user(user_id=user["id"]) + print(f"\nDeleted user: {username}") + + admin.delete_client(client_id=client["id"]) + print(f"Deleted client: {client_id}") + + admin.delete_realm_role(role_name=role_name) + print(f"Deleted role: {role_name}") + + admin.delete_group(group_id=group["id"]) + print(f"Deleted group: {group_name}") + + admin.delete_realm(realm_name=realm_name) + print(f"Deleted realm: {realm_name}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/localstack/example_basic.py b/modules/localstack/example_basic.py new file mode 100644 index 000000000..8c622f223 --- /dev/null +++ b/modules/localstack/example_basic.py @@ -0,0 +1,72 @@ +import json + +import boto3 + +from testcontainers.localstack import LocalStackContainer + + +def basic_example(): + with LocalStackContainer() as localstack: + # Get endpoint URL + endpoint_url = localstack.get_endpoint_url() + + # Create S3 client + s3 = boto3.client( + "s3", + endpoint_url=endpoint_url, + aws_access_key_id="test", + aws_secret_access_key="test", + region_name="us-east-1", + ) + + # Create SQS client + sqs = boto3.client( + "sqs", + endpoint_url=endpoint_url, + aws_access_key_id="test", + aws_secret_access_key="test", + region_name="us-east-1", + ) + + # Create S3 bucket + bucket_name = "test-bucket" + s3.create_bucket(Bucket=bucket_name) + print(f"Created S3 bucket: {bucket_name}") + + # Upload file to S3 + test_data = {"message": "Hello from LocalStack!", "timestamp": "2024-01-01"} + s3.put_object(Bucket=bucket_name, Key="test.json", Body=json.dumps(test_data)) + print("Uploaded test.json to S3") + + # Create SQS queue + queue_name = "test-queue" + queue = sqs.create_queue(QueueName=queue_name) + queue_url = queue["QueueUrl"] + print(f"Created SQS queue: {queue_name}") + + # Send message to SQS + message = {"message": "Test message", "number": 42} + sqs.send_message(QueueUrl=queue_url, MessageBody=json.dumps(message)) + print("Sent message to SQS") + + # Receive message from SQS + response = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=1) + + if "Messages" in response: + received_message = json.loads(response["Messages"][0]["Body"]) + print("\nReceived message from SQS:") + print(json.dumps(received_message, indent=2)) + + # Delete message + sqs.delete_message(QueueUrl=queue_url, ReceiptHandle=response["Messages"][0]["ReceiptHandle"]) + print("Deleted message from queue") + + # List S3 objects + objects = s3.list_objects(Bucket=bucket_name) + print("\nS3 bucket contents:") + for obj in objects.get("Contents", []): + print(f"Key: {obj['Key']}, Size: {obj['Size']} bytes") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mailpit/example_basic.py b/modules/mailpit/example_basic.py new file mode 100644 index 000000000..ef97ab906 --- /dev/null +++ b/modules/mailpit/example_basic.py @@ -0,0 +1,62 @@ +import smtplib +import time +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +import requests + +from testcontainers.mailpit import MailpitContainer + + +def basic_example(): + with MailpitContainer() as mailpit: + # Get SMTP and API endpoints + smtp_host = mailpit.get_container_host_ip() + smtp_port = mailpit.get_exposed_smtp_port() + api_url = mailpit.get_base_api_url() + + # Create email message + msg = MIMEMultipart() + msg["From"] = "sender@example.com" + msg["To"] = "recipient@example.com" + msg["Subject"] = "Test Email" + + body = "This is a test email sent to Mailpit." + msg.attach(MIMEText(body, "plain")) + + # Send email using SMTP + with smtplib.SMTP(smtp_host, smtp_port) as server: + server.send_message(msg) + print("Email sent successfully") + + # Wait for email to be processed + time.sleep(1) + + # Check received emails using API + response = requests.get(f"{api_url}/api/v1/messages") + messages = response.json() + + print("\nReceived emails:") + for message in messages["messages"]: + print(f"From: {message['From']['Address']}") + print(f"To: {message['To'][0]['Address']}") + print(f"Subject: {message['Subject']}") + print(f"Body: {message['Text']}") + print("---") + + # Get specific email details + if messages["messages"]: + first_message = messages["messages"][0] + message_id = first_message["ID"] + + response = requests.get(f"{api_url}/api/v1/messages/{message_id}") + message_details = response.json() + + print("\nDetailed message info:") + print(f"Size: {message_details['Size']} bytes") + print(f"Created: {message_details['Created']}") + print(f"Attachments: {len(message_details['Attachments'])}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/memcached/example_basic.py b/modules/memcached/example_basic.py new file mode 100644 index 000000000..01e52dea8 --- /dev/null +++ b/modules/memcached/example_basic.py @@ -0,0 +1,135 @@ +import json +import pickle + +import memcache + +from testcontainers.memcached import MemcachedContainer + + +def basic_example(): + with MemcachedContainer() as memcached: + # Get connection parameters + host = memcached.get_container_host_ip() + port = memcached.get_exposed_port(memcached.port) + + # Create Memcached client + client = memcache.Client([f"{host}:{port}"]) + print("Connected to Memcached") + + # Store simple values + client.set("string_key", "Hello from Memcached") + client.set("int_key", 42) + client.set("float_key", 3.14) + print("Stored simple values") + + # Store complex data + complex_data = {"name": "test", "values": [1, 2, 3], "nested": {"key": "value"}} + client.set("complex_key", json.dumps(complex_data)) + print("Stored complex data") + + # Store with expiration + client.set("expiring_key", "This will expire", time=5) + print("Stored value with expiration") + + # Store with pickle + class TestObject: + def __init__(self, name, value): + self.name = name + self.value = value + + test_obj = TestObject("test", 123) + client.set("object_key", pickle.dumps(test_obj)) + print("Stored pickled object") + + # Retrieve values + print("\nRetrieved values:") + print(f"string_key: {client.get('string_key')}") + print(f"int_key: {client.get('int_key')}") + print(f"float_key: {client.get('float_key')}") + + # Retrieve complex data + complex_value = json.loads(client.get("complex_key")) + print("\nComplex data:") + print(json.dumps(complex_value, indent=2)) + + # Retrieve pickled object + obj_data = pickle.loads(client.get("object_key")) + print("\nPickled object:") + print(f"name: {obj_data.name}") + print(f"value: {obj_data.value}") + + # Check expiration + print("\nChecking expiring key:") + print(f"expiring_key: {client.get('expiring_key')}") + print("Waiting for key to expire...") + import time + + time.sleep(6) + print(f"expiring_key after expiration: {client.get('expiring_key')}") + + # Store multiple values + multi_data = {"key1": "value1", "key2": "value2", "key3": "value3"} + client.set_multi(multi_data) + print("\nStored multiple values") + + # Retrieve multiple values + multi_keys = ["key1", "key2", "key3"] + multi_values = client.get_multi(multi_keys) + print("\nMultiple values:") + print(json.dumps(multi_values, indent=2)) + + # Increment and decrement + client.set("counter", 0) + client.incr("counter") + client.incr("counter", 2) + print("\nCounter after increment:") + print(f"counter: {client.get('counter')}") + + client.decr("counter") + print("Counter after decrement:") + print(f"counter: {client.get('counter')}") + + # Store with flags + client.set("flagged_key", "value", flags=1) + print("\nStored value with flags") + + # Get stats + stats = client.get_stats() + print("\nMemcached stats:") + for server, server_stats in stats: + print(f"\nServer: {server}") + print(json.dumps(dict(server_stats), indent=2)) + + # Delete values + client.delete("string_key") + client.delete_multi(["key1", "key2", "key3"]) + print("\nDeleted values") + + # Check deleted values + print("\nChecking deleted values:") + print(f"string_key: {client.get('string_key')}") + print(f"key1: {client.get('key1')}") + + # Store with CAS + client.set("cas_key", "initial") + cas_value = client.gets("cas_key") + print("\nCAS value:") + print(f"value: {cas_value}") + + # Update with CAS + success = client.cas("cas_key", "updated", cas_value[1]) + print(f"CAS update success: {success}") + print(f"Updated value: {client.get('cas_key')}") + + # Try to update with invalid CAS + success = client.cas("cas_key", "failed", 0) + print(f"Invalid CAS update success: {success}") + print(f"Value after failed update: {client.get('cas_key')}") + + # Clean up + client.flush_all() + print("\nFlushed all values") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/milvus/example_basic.py b/modules/milvus/example_basic.py new file mode 100644 index 000000000..776aa11b3 --- /dev/null +++ b/modules/milvus/example_basic.py @@ -0,0 +1,138 @@ +import json +from datetime import datetime + +import numpy as np +from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections, utility + +from testcontainers.milvus import MilvusContainer + + +def basic_example(): + with MilvusContainer() as milvus: + # Get connection parameters + host = milvus.get_container_host_ip() + port = milvus.get_exposed_port(milvus.port) + + # Connect to Milvus + connections.connect(alias="default", host=host, port=port) + print("Connected to Milvus") + + # Create collection + collection_name = "test_collection" + dim = 128 + + fields = [ + FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True), + FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=dim), + FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=500), + FieldSchema(name="category", dtype=DataType.VARCHAR, max_length=100), + FieldSchema(name="tags", dtype=DataType.JSON), + FieldSchema(name="timestamp", dtype=DataType.VARCHAR, max_length=50), + ] + + schema = CollectionSchema(fields=fields, description="Test collection") + collection = Collection(name=collection_name, schema=schema) + print(f"Created collection: {collection_name}") + + # Create index + index_params = {"metric_type": "COSINE", "index_type": "IVF_FLAT", "params": {"nlist": 1024}} + collection.create_index(field_name="vector", index_params=index_params) + print("Created index on vector field") + + # Generate test data + num_entities = 5 + vectors = np.random.rand(num_entities, dim).tolist() + + texts = [ + "AI and machine learning are transforming industries", + "New study reveals benefits of meditation", + "Global warming reaches critical levels", + "Stock market shows strong growth", + "New restaurant opens in downtown", + ] + + categories = ["Technology", "Health", "Environment", "Finance", "Food"] + + tags = [ + ["AI", "ML", "innovation"], + ["wellness", "mental health"], + ["climate", "sustainability"], + ["investing", "markets"], + ["dining", "local"], + ] + + timestamps = [datetime.utcnow().isoformat() for _ in range(num_entities)] + + # Insert data + entities = [vectors, texts, categories, tags, timestamps] + + collection.insert(entities) + print("Inserted test data") + + # Flush collection + collection.flush() + print("Flushed collection") + + # Load collection + collection.load() + print("Loaded collection") + + # Search vectors + search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}} + + results = collection.search( + data=[vectors[0]], + anns_field="vector", + param=search_params, + limit=3, + output_fields=["text", "category", "tags"], + ) + + print("\nSearch results:") + for hits in results: + for hit in hits: + print(json.dumps({"id": hit.id, "distance": hit.distance, "entity": hit.entity}, indent=2)) + + # Query with filter + filter_expr = 'category == "Technology"' + query_results = collection.query(expr=filter_expr, output_fields=["text", "category", "tags"]) + + print("\nQuery results with filter:") + print(json.dumps(query_results, indent=2)) + + # Get collection stats + stats = collection.get_statistics() + print("\nCollection statistics:") + print(json.dumps(stats, indent=2)) + + # Create partition + partition_name = "test_partition" + collection.create_partition(partition_name) + print(f"\nCreated partition: {partition_name}") + + # List partitions + partitions = collection.partitions + print("\nPartitions:") + for partition in partitions: + print( + json.dumps( + {"name": partition.name, "is_empty": partition.is_empty, "num_entities": partition.num_entities}, + indent=2, + ) + ) + + # Delete partition + collection.drop_partition(partition_name) + print(f"Deleted partition: {partition_name}") + + # Clean up + utility.drop_collection(collection_name) + print("\nDropped collection") + + # Disconnect + connections.disconnect("default") + print("Disconnected from Milvus") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/minio/example_basic.py b/modules/minio/example_basic.py new file mode 100644 index 000000000..5318679be --- /dev/null +++ b/modules/minio/example_basic.py @@ -0,0 +1,120 @@ +import io +import json +from datetime import timedelta + +from minio import Minio + +from testcontainers.minio import MinioContainer + + +def basic_example(): + with MinioContainer() as minio: + # Get connection parameters + host = minio.get_container_host_ip() + port = minio.get_exposed_port(minio.port) + access_key = minio.access_key + secret_key = minio.secret_key + + # Create MinIO client + client = Minio(f"{host}:{port}", access_key=access_key, secret_key=secret_key, secure=False) + print("Connected to MinIO") + + # Create bucket + bucket_name = "test-bucket" + client.make_bucket(bucket_name) + print(f"Created bucket: {bucket_name}") + + # List buckets + buckets = client.list_buckets() + print("\nBuckets:") + for bucket in buckets: + print(f"- {bucket.name} (created: {bucket.creation_date})") + + # Upload test files + test_files = {"test1.txt": "Hello from test1", "test2.txt": "Hello from test2", "test3.txt": "Hello from test3"} + + for filename, content in test_files.items(): + data = io.BytesIO(content.encode()) + client.put_object(bucket_name, filename, data, len(content.encode()), content_type="text/plain") + print(f"Uploaded {filename}") + + # List objects + objects = client.list_objects(bucket_name) + print("\nObjects in bucket:") + for obj in objects: + print(f"- {obj.object_name} (size: {obj.size} bytes)") + + # Get object + print("\nObject contents:") + for filename in test_files: + response = client.get_object(bucket_name, filename) + content = response.read().decode() + print(f"{filename}: {content}") + + # Create directory structure + client.put_object( + bucket_name, "folder1/test4.txt", io.BytesIO(b"Hello from test4"), 15, content_type="text/plain" + ) + print("\nCreated directory structure") + + # List objects with prefix + objects = client.list_objects(bucket_name, prefix="folder1/") + print("\nObjects in folder1:") + for obj in objects: + print(f"- {obj.object_name}") + + # Copy object + client.copy_object(bucket_name, "test1.txt", f"{bucket_name}/folder1/test1_copy.txt") + print("\nCopied object") + + # Get object metadata + stat = client.stat_object(bucket_name, "test1.txt") + print("\nObject metadata:") + print( + json.dumps( + { + "name": stat.object_name, + "size": stat.size, + "content_type": stat.content_type, + "last_modified": stat.last_modified.isoformat(), + }, + indent=2, + ) + ) + + # Generate presigned URL + url = client.presigned_get_object(bucket_name, "test1.txt", expires=timedelta(hours=1)) + print(f"\nPresigned URL: {url}") + + # Set bucket policy + policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": ["s3:GetObject"], + "Resource": [f"arn:aws:s3:::{bucket_name}/*"], + } + ], + } + client.set_bucket_policy(bucket_name, json.dumps(policy)) + print("\nSet bucket policy") + + # Get bucket policy + policy = client.get_bucket_policy(bucket_name) + print("\nBucket policy:") + print(json.dumps(json.loads(policy), indent=2)) + + # Remove objects + for filename in test_files: + client.remove_object(bucket_name, filename) + print(f"Removed {filename}") + + # Remove bucket + client.remove_bucket(bucket_name) + print(f"\nRemoved bucket: {bucket_name}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mongodb/example_basic.py b/modules/mongodb/example_basic.py new file mode 100644 index 000000000..8fde30c65 --- /dev/null +++ b/modules/mongodb/example_basic.py @@ -0,0 +1,85 @@ +import json +from datetime import datetime + +from pymongo import MongoClient + +from testcontainers.mongodb import MongoDbContainer + + +def basic_example(): + with MongoDbContainer() as mongodb: + # Get connection URL + connection_url = mongodb.get_connection_url() + + # Create MongoDB client + client = MongoClient(connection_url) + print("Connected to MongoDB") + + # Get database and collection + db = client.test_db + collection = db.test_collection + + # Insert test documents + test_docs = [ + {"name": "test1", "value": 100, "category": "A", "created_at": datetime.utcnow()}, + {"name": "test2", "value": 200, "category": "B", "created_at": datetime.utcnow()}, + {"name": "test3", "value": 300, "category": "A", "created_at": datetime.utcnow()}, + ] + + result = collection.insert_many(test_docs) + print(f"Inserted {len(result.inserted_ids)} documents") + + # Query documents + print("\nQuery results:") + for doc in collection.find({"category": "A"}): + print(json.dumps(doc, default=str, indent=2)) + + # Execute aggregation pipeline + pipeline = [ + { + "$group": { + "_id": "$category", + "avg_value": {"$avg": "$value"}, + "count": {"$sum": 1}, + "min_value": {"$min": "$value"}, + "max_value": {"$max": "$value"}, + } + }, + {"$sort": {"avg_value": -1}}, + ] + + print("\nAggregation results:") + for result in collection.aggregate(pipeline): + print(json.dumps(result, default=str, indent=2)) + + # Create indexes + collection.create_index("name") + collection.create_index([("category", 1), ("value", -1)]) + print("\nCreated indexes") + + # List indexes + print("\nIndexes:") + for index in collection.list_indexes(): + print(json.dumps(index, default=str, indent=2)) + + # Update documents + result = collection.update_many({"category": "A"}, {"$set": {"updated": True}}) + print(f"\nUpdated {result.modified_count} documents") + + # Find updated documents + print("\nUpdated documents:") + for doc in collection.find({"updated": True}): + print(json.dumps(doc, default=str, indent=2)) + + # Delete documents + result = collection.delete_many({"category": "B"}) + print(f"\nDeleted {result.deleted_count} documents") + + # Get collection stats + stats = db.command("collstats", "test_collection") + print("\nCollection stats:") + print(json.dumps(stats, default=str, indent=2)) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mqtt/example_basic.py b/modules/mqtt/example_basic.py new file mode 100644 index 000000000..dc6de9fe3 --- /dev/null +++ b/modules/mqtt/example_basic.py @@ -0,0 +1,51 @@ +import time + +import paho.mqtt.client as mqtt + +from testcontainers.mqtt import MqttContainer + + +def basic_example(): + with MqttContainer() as mqtt_container: + # Get connection parameters + host = mqtt_container.get_container_host_ip() + port = mqtt_container.get_exposed_port(mqtt_container.port) + + # Create MQTT client + client = mqtt.Client() + + # Define callback functions + def on_connect(client, userdata, flags, rc): + print(f"Connected with result code {rc}") + # Subscribe to topics + client.subscribe("test/topic") + + def on_message(client, userdata, msg): + print(f"Received message on topic {msg.topic}: {msg.payload.decode()}") + + # Set callbacks + client.on_connect = on_connect + client.on_message = on_message + + # Connect to broker + client.connect(host, port) + client.loop_start() + + # Publish test messages + test_messages = ["Hello MQTT!", "This is a test message", "MQTT is working!"] + + for msg in test_messages: + client.publish("test/topic", msg) + print(f"Published message: {msg}") + time.sleep(1) # Wait a bit between messages + + # Wait for messages to be processed + time.sleep(2) + + # Clean up + client.loop_stop() + client.disconnect() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mssql/example_basic.py b/modules/mssql/example_basic.py new file mode 100644 index 000000000..bd06205c5 --- /dev/null +++ b/modules/mssql/example_basic.py @@ -0,0 +1,164 @@ +import pyodbc + +from testcontainers.mssql import MsSqlContainer + + +def basic_example(): + with MsSqlContainer() as mssql: + # Get connection parameters + host = mssql.get_container_host_ip() + port = mssql.get_exposed_port(mssql.port) + username = mssql.username + password = mssql.password + database = mssql.database + + # Create connection string + conn_str = f"DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={host},{port};DATABASE={database};UID={username};PWD={password}" + + # Connect to MSSQL + connection = pyodbc.connect(conn_str) + print("Connected to MSSQL") + + # Create cursor + cursor = connection.cursor() + + # Create test table + cursor.execute(""" + CREATE TABLE test_table ( + id INT IDENTITY(1,1) PRIMARY KEY, + name NVARCHAR(50), + value INT, + category NVARCHAR(10), + created_at DATETIME2 DEFAULT GETDATE() + ) + """) + print("Created test table") + + # Insert test data + test_data = [("test1", 100, "A"), ("test2", 200, "B"), ("test3", 300, "A")] + + cursor.executemany( + """ + INSERT INTO test_table (name, value, category) + VALUES (?, ?, ?) + """, + test_data, + ) + print("Inserted test data") + + # Commit changes + connection.commit() + + # Query data + print("\nQuery results:") + cursor.execute("SELECT * FROM test_table WHERE category = 'A'") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Create view + cursor.execute(""" + CREATE OR ALTER VIEW test_view AS + SELECT category, COUNT(*) as count, AVG(value) as avg_value + FROM test_table + GROUP BY category + """) + print("\nCreated view") + + # Query view + print("\nView results:") + cursor.execute("SELECT * FROM test_view") + for row in cursor: + print({"category": row[0], "count": row[1], "avg_value": float(row[2])}) + + # Create index + cursor.execute("CREATE INDEX test_idx ON test_table (value)") + print("\nCreated index") + + # Query using index + print("\nQuery using index:") + cursor.execute("SELECT * FROM test_table WHERE value > 150") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Get table metadata + cursor.execute(""" + SELECT + c.name as column_name, + t.name as data_type, + c.max_length, + c.is_nullable + FROM sys.columns c + JOIN sys.types t ON c.user_type_id = t.user_type_id + WHERE OBJECT_ID = OBJECT_ID('test_table') + ORDER BY c.column_id + """) + print("\nTable metadata:") + for row in cursor: + print({"column": row[0], "type": row[1], "length": row[2], "nullable": row[3]}) + + # Create stored procedure + cursor.execute(""" + CREATE OR ALTER PROCEDURE test_proc + @category NVARCHAR(10), + @count INT OUTPUT + AS + BEGIN + SELECT @count = COUNT(*) + FROM test_table + WHERE category = @category + END + """) + print("\nCreated stored procedure") + + # Execute stored procedure + cursor.execute(""" + DECLARE @count INT + EXEC test_proc @category = 'A', @count = @count OUTPUT + SELECT @count as count + """) + count = cursor.fetchone()[0] + print(f"Count for category A: {count}") + + # Create function + cursor.execute(""" + CREATE OR ALTER FUNCTION test_func(@category NVARCHAR(10)) + RETURNS TABLE + AS + RETURN + ( + SELECT name, value + FROM test_table + WHERE category = @category + ) + """) + print("\nCreated function") + + # Use function + print("\nFunction results:") + cursor.execute("SELECT * FROM test_func('A')") + for row in cursor: + print({"name": row[0], "value": row[1]}) + + # Create trigger + cursor.execute(""" + CREATE OR ALTER TRIGGER test_trigger + ON test_table + AFTER INSERT + AS + BEGIN + PRINT 'New row inserted' + END + """) + print("\nCreated trigger") + + # Test trigger + cursor.execute("INSERT INTO test_table (name, value, category) VALUES ('test4', 400, 'B')") + connection.commit() + + # Clean up + cursor.close() + connection.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mysql/example_basic.py b/modules/mysql/example_basic.py new file mode 100644 index 000000000..ba3418b28 --- /dev/null +++ b/modules/mysql/example_basic.py @@ -0,0 +1,16 @@ +import sqlalchemy + +from testcontainers.mysql import MySqlContainer + + +def basic_example(): + config = MySqlContainer("mysql:8.3.0", dialect="pymysql") + + with config as mysql: + connection_url = mysql.get_connection_url() + + engine = sqlalchemy.create_engine(connection_url) + with engine.begin() as connection: + result = connection.execute(sqlalchemy.text("select version()")) + for row in result: + print(f"MySQL version: {row[0]}") diff --git a/modules/nats/example_basic.py b/modules/nats/example_basic.py new file mode 100644 index 000000000..9e941bf9b --- /dev/null +++ b/modules/nats/example_basic.py @@ -0,0 +1,152 @@ +import asyncio +import json + +from nats.aio.client import Client as NATS +from nats.aio.msg import Msg + +from testcontainers.nats import NatsContainer + + +async def message_handler(msg: Msg): + subject = msg.subject + data = msg.data.decode() + print(f"Received message on {subject}: {data}") + + +async def basic_example(): + with NatsContainer() as nats_container: + # Get connection parameters + host = nats_container.get_container_host_ip() + port = nats_container.get_exposed_port(nats_container.port) + + # Create NATS client + nc = NATS() + await nc.connect(f"nats://{host}:{port}") + print("Connected to NATS") + + # Create JetStream context + js = nc.jetstream() + + # Create stream + stream = await js.add_stream(name="test-stream", subjects=["test.>"]) + print(f"\nCreated stream: {stream.config.name}") + + # Create consumer + consumer = await js.add_consumer(stream_name="test-stream", durable_name="test-consumer") + print(f"Created consumer: {consumer.name}") + + # Subscribe to subjects + subjects = ["test.1", "test.2", "test.3"] + for subject in subjects: + await nc.subscribe(subject, cb=message_handler) + print(f"Subscribed to {subject}") + + # Publish messages + messages = {"test.1": "Hello from test.1", "test.2": "Hello from test.2", "test.3": "Hello from test.3"} + + for subject, message in messages.items(): + await nc.publish(subject, message.encode()) + print(f"Published to {subject}") + + # Publish with headers + headers = {"header1": "value1", "header2": "value2"} + await nc.publish("test.headers", b"Message with headers", headers=headers) + print("\nPublished message with headers") + + # Publish with reply + reply_subject = "test.reply" + await nc.subscribe(reply_subject, cb=message_handler) + print(f"Subscribed to {reply_subject}") + + response = await nc.request("test.request", b"Request message", timeout=1) + print(f"Received reply: {response.data.decode()}") + + # Publish to JetStream + for subject, message in messages.items(): + ack = await js.publish(subject, message.encode()) + print(f"Published to JetStream {subject}: {ack.stream}") + + # Get stream info + stream_info = await js.stream_info("test-stream") + print("\nStream info:") + print( + json.dumps( + { + "name": stream_info.config.name, + "subjects": stream_info.config.subjects, + "messages": stream_info.state.messages, + "bytes": stream_info.state.bytes, + }, + indent=2, + ) + ) + + # Get consumer info + consumer_info = await js.consumer_info("test-stream", "test-consumer") + print("\nConsumer info:") + print( + json.dumps( + { + "name": consumer_info.name, + "stream_name": consumer_info.stream_name, + "delivered": consumer_info.delivered.stream_seq, + "ack_floor": consumer_info.ack_floor.stream_seq, + }, + indent=2, + ) + ) + + # Create key-value store + kv = await js.create_key_value(bucket="test-kv", history=5, ttl=3600) + print("\nCreated key-value store") + + # Put values + await kv.put("key1", b"value1") + await kv.put("key2", b"value2") + print("Put values in key-value store") + + # Get values + entry = await kv.get("key1") + print(f"Got value: {entry.value.decode()}") + + # List keys + keys = await kv.keys() + print("\nKeys in store:") + for key in keys: + print(f"- {key}") + + # Delete key + await kv.delete("key1") + print("Deleted key1") + + # Create object store + os = await js.create_object_store(bucket="test-os", ttl=3600) + print("\nCreated object store") + + # Put object + await os.put("test.txt", b"Hello from object store") + print("Put object in store") + + # Get object + obj = await os.get("test.txt") + print(f"Got object: {obj.data.decode()}") + + # List objects + objects = await os.list() + print("\nObjects in store:") + for obj in objects: + print(f"- {obj.name}") + + # Delete object + await os.delete("test.txt") + print("Deleted object") + + # Clean up + await js.delete_stream("test-stream") + print("\nDeleted stream") + + await nc.close() + + +if __name__ == "__main__": + asyncio.run(basic_example()) diff --git a/modules/neo4j/example_basic.py b/modules/neo4j/example_basic.py new file mode 100644 index 000000000..c6114bc70 --- /dev/null +++ b/modules/neo4j/example_basic.py @@ -0,0 +1,198 @@ +import json + +from neo4j import GraphDatabase + +from testcontainers.neo4j import Neo4jContainer + + +def basic_example(): + with Neo4jContainer() as neo4j: + # Get connection parameters + host = neo4j.get_container_host_ip() + port = neo4j.get_exposed_port(neo4j.port) + username = neo4j.username + password = neo4j.password + + # Create Neo4j driver + driver = GraphDatabase.driver(f"bolt://{host}:{port}", auth=(username, password)) + print("Connected to Neo4j") + + # Create session + with driver.session() as session: + # Create nodes + create_nodes_query = """ + CREATE (p1:Person {name: 'Alice', age: 30}) + CREATE (p2:Person {name: 'Bob', age: 35}) + CREATE (p3:Person {name: 'Charlie', age: 25}) + CREATE (c1:Company {name: 'Tech Corp', founded: 2000}) + CREATE (c2:Company {name: 'Data Inc', founded: 2010}) + """ + session.run(create_nodes_query) + print("Created nodes") + + # Create relationships + create_rels_query = """ + MATCH (p1:Person {name: 'Alice'}), (c1:Company {name: 'Tech Corp'}) + CREATE (p1)-[:WORKS_AT {since: 2015}]->(c1) + + MATCH (p2:Person {name: 'Bob'}), (c1:Company {name: 'Tech Corp'}) + CREATE (p2)-[:WORKS_AT {since: 2018}]->(c1) + + MATCH (p3:Person {name: 'Charlie'}), (c2:Company {name: 'Data Inc'}) + CREATE (p3)-[:WORKS_AT {since: 2020}]->(c2) + + MATCH (p1:Person {name: 'Alice'}), (p2:Person {name: 'Bob'}) + CREATE (p1)-[:KNOWS {since: 2016}]->(p2) + """ + session.run(create_rels_query) + print("Created relationships") + + # Query nodes + query_nodes = """ + MATCH (n) + RETURN n + """ + result = session.run(query_nodes) + print("\nAll nodes:") + for record in result: + node = record["n"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Query relationships + query_rels = """ + MATCH (n)-[r]->(m) + RETURN n, r, m + """ + result = session.run(query_rels) + print("\nAll relationships:") + for record in result: + print( + json.dumps( + { + "from": {"labels": list(record["n"].labels), "properties": dict(record["n"])}, + "relationship": {"type": record["r"].type, "properties": dict(record["r"])}, + "to": {"labels": list(record["m"].labels), "properties": dict(record["m"])}, + }, + indent=2, + ) + ) + + # Create index + create_index = """ + CREATE INDEX person_name IF NOT EXISTS + FOR (p:Person) + ON (p.name) + """ + session.run(create_index) + print("\nCreated index on Person.name") + + # Query using index + query_indexed = """ + MATCH (p:Person) + WHERE p.name = 'Alice' + RETURN p + """ + result = session.run(query_indexed) + print("\nQuery using index:") + for record in result: + node = record["p"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Create constraint + create_constraint = """ + CREATE CONSTRAINT company_name IF NOT EXISTS + FOR (c:Company) + REQUIRE c.name IS UNIQUE + """ + session.run(create_constraint) + print("\nCreated constraint on Company.name") + + # Create full-text index + create_ft_index = """ + CALL db.index.fulltext.createNodeIndex( + "personSearch", + ["Person"], + ["name"] + ) + """ + session.run(create_ft_index) + print("Created full-text index") + + # Query using full-text index + query_ft = """ + CALL db.index.fulltext.queryNodes( + "personSearch", + "Alice" + ) + YIELD node + RETURN node + """ + result = session.run(query_ft) + print("\nFull-text search results:") + for record in result: + node = record["node"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Create stored procedure + create_proc = """ + CALL apoc.custom.asProcedure( + 'getCompanyEmployees', + 'MATCH (p:Person)-[:WORKS_AT]->(c:Company {name: $companyName}) + RETURN p', + 'READ', + [['p', 'NODE']], + [['companyName', 'STRING']] + ) + """ + session.run(create_proc) + print("\nCreated stored procedure") + + # Call stored procedure + call_proc = """ + CALL custom.getCompanyEmployees('Tech Corp') + YIELD p + RETURN p + """ + result = session.run(call_proc) + print("\nStored procedure results:") + for record in result: + node = record["p"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Create trigger + create_trigger = """ + CALL apoc.trigger.add( + 'setTimestamp', + 'UNWIND apoc.trigger.nodesByLabel($assignedLabels, "Person") AS n + SET n.updated_at = datetime()', + {phase: 'after'} + ) + """ + session.run(create_trigger) + print("\nCreated trigger") + + # Test trigger + test_trigger = """ + MATCH (p:Person {name: 'Alice'}) + SET p.age = 31 + RETURN p + """ + result = session.run(test_trigger) + print("\nTrigger test results:") + for record in result: + node = record["p"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Clean up + cleanup = """ + MATCH (n) + DETACH DELETE n + """ + session.run(cleanup) + print("\nCleaned up database") + + driver.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/nginx/example_basic.py b/modules/nginx/example_basic.py new file mode 100644 index 000000000..d7aaec122 --- /dev/null +++ b/modules/nginx/example_basic.py @@ -0,0 +1,116 @@ +import json +import os +from pathlib import Path + +import requests + +from testcontainers.nginx import NginxContainer + + +def basic_example(): + with NginxContainer() as nginx: + # Get connection parameters + host = nginx.get_container_host_ip() + port = nginx.get_exposed_port(nginx.port) + nginx_url = f"http://{host}:{port}" + print(f"Nginx URL: {nginx_url}") + + # Create test HTML file + test_html = """ + + + + Test Page + + +

    Hello from Nginx!

    +

    This is a test page.

    + + + """ + + # Create test directory and file + test_dir = Path("/tmp/nginx_test") + test_dir.mkdir(exist_ok=True) + test_file = test_dir / "index.html" + test_file.write_text(test_html) + + # Copy test file to container + nginx.get_container().copy_to_container(test_file, "/usr/share/nginx/html/") + print("Copied test file to container") + + # Test basic HTTP request + response = requests.get(nginx_url) + print(f"\nBasic request status: {response.status_code}") + print(f"Content type: {response.headers.get('content-type')}") + print(f"Content length: {response.headers.get('content-length')}") + + # Test HEAD request + head_response = requests.head(nginx_url) + print("\nHEAD request headers:") + print(json.dumps(dict(head_response.headers), indent=2)) + + # Create test configuration + test_config = """ + server { + listen 80; + server_name test.local; + + location /test { + return 200 'Test location'; + } + + location /redirect { + return 301 /test; + } + + location /error { + return 404 'Not Found'; + } + } + """ + + # Write and copy configuration + config_file = test_dir / "test.conf" + config_file.write_text(test_config) + nginx.get_container().copy_to_container(config_file, "/etc/nginx/conf.d/") + print("\nCopied test configuration") + + # Reload Nginx configuration + nginx.get_container().exec_run("nginx -s reload") + print("Reloaded Nginx configuration") + + # Test custom location + test_response = requests.get(f"{nginx_url}/test") + print(f"\nTest location response: {test_response.text}") + + # Test redirect + redirect_response = requests.get(f"{nginx_url}/redirect", allow_redirects=False) + print(f"\nRedirect status: {redirect_response.status_code}") + print(f"Redirect location: {redirect_response.headers.get('location')}") + + # Test error + error_response = requests.get(f"{nginx_url}/error") + print(f"\nError status: {error_response.status_code}") + print(f"Error response: {error_response.text}") + + # Get Nginx version + version_response = requests.get(nginx_url) + server = version_response.headers.get("server") + print(f"\nNginx version: {server}") + + # Test with different HTTP methods + methods = ["GET", "POST", "PUT", "DELETE", "OPTIONS"] + print("\nHTTP method tests:") + for method in methods: + response = requests.request(method, nginx_url) + print(f"{method}: {response.status_code}") + + # Clean up + os.remove(test_file) + os.remove(config_file) + os.rmdir(test_dir) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/ollama/example_basic.py b/modules/ollama/example_basic.py new file mode 100644 index 000000000..134b636f5 --- /dev/null +++ b/modules/ollama/example_basic.py @@ -0,0 +1,50 @@ +import requests + +from testcontainers.ollama import OllamaContainer + + +def basic_example(): + with OllamaContainer() as ollama: + # Get API endpoint + api_url = ollama.get_api_url() + + # Pull a model + model_name = "llama2" + print(f"Pulling model: {model_name}") + response = requests.post(f"{api_url}/api/pull", json={"name": model_name}) + print(f"Pull response: {response.json()}") + + # Generate text + prompt = "Write a short poem about programming." + print(f"\nGenerating text for prompt: {prompt}") + + response = requests.post( + f"{api_url}/api/generate", json={"model": model_name, "prompt": prompt, "stream": False} + ) + + result = response.json() + print("\nGenerated text:") + print(result["response"]) + + # Embed text + text_to_embed = "The quick brown fox jumps over the lazy dog" + print(f"\nGenerating embedding for: {text_to_embed}") + + response = requests.post(f"{api_url}/api/embeddings", json={"model": model_name, "prompt": text_to_embed}) + + embedding = response.json() + print("\nEmbedding:") + print(f"Length: {len(embedding['embedding'])}") + print(f"First 5 values: {embedding['embedding'][:5]}") + + # List available models + response = requests.get(f"{api_url}/api/tags") + models = response.json() + + print("\nAvailable models:") + for model in models["models"]: + print(f"Name: {model['name']}, Size: {model['size']}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/opensearch/example_basic.py b/modules/opensearch/example_basic.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/oracle-free/example_basic.py b/modules/oracle-free/example_basic.py new file mode 100644 index 000000000..8abad4d01 --- /dev/null +++ b/modules/oracle-free/example_basic.py @@ -0,0 +1,140 @@ +import oracledb + +from testcontainers.oracle_free import OracleFreeContainer + + +def basic_example(): + with OracleFreeContainer() as oracle: + # Get connection parameters + host = oracle.get_container_host_ip() + port = oracle.get_exposed_port(oracle.port) + username = oracle.username + password = oracle.password + service_name = oracle.service_name + + # Create connection string + dsn = f"{host}:{port}/{service_name}" + + # Connect to Oracle + connection = oracledb.connect(user=username, password=password, dsn=dsn) + print("Connected to Oracle") + + # Create cursor + cursor = connection.cursor() + + # Create test table + cursor.execute(""" + CREATE TABLE test_table ( + id NUMBER GENERATED ALWAYS AS IDENTITY, + name VARCHAR2(50), + value NUMBER, + category VARCHAR2(10), + created_at TIMESTAMP DEFAULT SYSTIMESTAMP + ) + """) + print("Created test table") + + # Insert test data + test_data = [("test1", 100, "A"), ("test2", 200, "B"), ("test3", 300, "A")] + + cursor.executemany( + """ + INSERT INTO test_table (name, value, category) + VALUES (:1, :2, :3) + """, + test_data, + ) + print("Inserted test data") + + # Commit changes + connection.commit() + + # Query data + print("\nQuery results:") + cursor.execute("SELECT * FROM test_table WHERE category = 'A'") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Create view + cursor.execute(""" + CREATE OR REPLACE VIEW test_view AS + SELECT category, COUNT(*) as count, AVG(value) as avg_value + FROM test_table + GROUP BY category + """) + print("\nCreated view") + + # Query view + print("\nView results:") + cursor.execute("SELECT * FROM test_view") + for row in cursor: + print({"category": row[0], "count": row[1], "avg_value": float(row[2])}) + + # Create index + cursor.execute("CREATE INDEX test_idx ON test_table (value)") + print("\nCreated index") + + # Query using index + print("\nQuery using index:") + cursor.execute("SELECT * FROM test_table WHERE value > 150") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Get table metadata + cursor.execute(""" + SELECT column_name, data_type, data_length, nullable + FROM user_tab_columns + WHERE table_name = 'TEST_TABLE' + ORDER BY column_id + """) + print("\nTable metadata:") + for row in cursor: + print({"column": row[0], "type": row[1], "length": row[2], "nullable": row[3]}) + + # Create sequence + cursor.execute(""" + CREATE SEQUENCE test_seq + START WITH 1 + INCREMENT BY 1 + NOCACHE + NOCYCLE + """) + print("\nCreated sequence") + + # Use sequence + cursor.execute("SELECT test_seq.NEXTVAL FROM DUAL") + next_val = cursor.fetchone()[0] + print(f"Next sequence value: {next_val}") + + # Create procedure + cursor.execute(""" + CREATE OR REPLACE PROCEDURE test_proc ( + p_category IN VARCHAR2, + p_count OUT NUMBER + ) AS + BEGIN + SELECT COUNT(*) + INTO p_count + FROM test_table + WHERE category = p_category; + END; + """) + print("\nCreated procedure") + + # Execute procedure + cursor.execute(""" + DECLARE + v_count NUMBER; + BEGIN + test_proc('A', v_count); + DBMS_OUTPUT.PUT_LINE('Count for category A: ' || v_count); + END; + """) + + # Clean up + cursor.close() + connection.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/postgres/example_basic.py b/modules/postgres/example_basic.py new file mode 100644 index 000000000..611081023 --- /dev/null +++ b/modules/postgres/example_basic.py @@ -0,0 +1,99 @@ +import pandas as pd +import sqlalchemy +from sqlalchemy import text + +from testcontainers.postgres import PostgresContainer + + +def basic_example(): + with PostgresContainer() as postgres: + # Get connection URL + connection_url = postgres.get_connection_url() + + # Create SQLAlchemy engine + engine = sqlalchemy.create_engine(connection_url) + print("Connected to PostgreSQL") + + # Create a test table + create_table_sql = """ + CREATE TABLE test_table ( + id SERIAL PRIMARY KEY, + name VARCHAR(50), + value DECIMAL(10,2), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + + with engine.begin() as connection: + connection.execute(text(create_table_sql)) + print("Created test table") + + # Insert test data + test_data = [ + {"name": "test1", "value": 100.0}, + {"name": "test2", "value": 200.0}, + {"name": "test3", "value": 300.0}, + ] + + with engine.begin() as connection: + for data in test_data: + connection.execute(text("INSERT INTO test_table (name, value) VALUES (:name, :value)"), data) + print("Inserted test data") + + # Query data + with engine.connect() as connection: + result = connection.execute(text("SELECT * FROM test_table ORDER BY id")) + rows = result.fetchall() + + print("\nQuery results:") + for row in rows: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}, Created: {row[3]}") + + # Execute a more complex query + with engine.connect() as connection: + result = connection.execute( + text(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count, + MIN(created_at) as first_created, + MAX(created_at) as last_created + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + ) + + print("\nAggregation results:") + for row in result: + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Count: {row[2]}, First: {row[3]}, Last: {row[4]}") + + # Convert to pandas DataFrame + df = pd.read_sql("SELECT * FROM test_table ORDER BY id", engine) + print("\nDataFrame:") + print(df) + + # Create and query a view + create_view_sql = """ + CREATE OR REPLACE VIEW test_view AS + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count + FROM test_table + GROUP BY name + """ + + with engine.begin() as connection: + connection.execute(text(create_view_sql)) + print("\nCreated view") + + result = connection.execute(text("SELECT * FROM test_view")) + print("\nView results:") + for row in result: + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Count: {row[2]}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/qdrant/example_basic.py b/modules/qdrant/example_basic.py new file mode 100644 index 000000000..589735e1e --- /dev/null +++ b/modules/qdrant/example_basic.py @@ -0,0 +1,149 @@ +import json +from datetime import datetime + +import numpy as np +from qdrant_client import QdrantClient +from qdrant_client.http import models + +from testcontainers.qdrant import QdrantContainer + + +def basic_example(): + with QdrantContainer() as qdrant: + # Get connection parameters + host = qdrant.get_container_host_ip() + port = qdrant.get_exposed_port(qdrant.port) + + # Create Qdrant client + client = QdrantClient(host=host, port=port) + print("Connected to Qdrant") + + # Create collection + collection_name = "test_collection" + vector_size = 128 + + client.create_collection( + collection_name=collection_name, + vectors_config=models.VectorParams(size=vector_size, distance=models.Distance.COSINE), + ) + print(f"Created collection: {collection_name}") + + # Generate test vectors and payloads + num_vectors = 5 + vectors = np.random.rand(num_vectors, vector_size).tolist() + + payloads = [ + { + "text": "AI and machine learning are transforming industries", + "category": "Technology", + "tags": ["AI", "ML", "innovation"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "New study reveals benefits of meditation", + "category": "Health", + "tags": ["wellness", "mental health"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "Global warming reaches critical levels", + "category": "Environment", + "tags": ["climate", "sustainability"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "Stock market shows strong growth", + "category": "Finance", + "tags": ["investing", "markets"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "New restaurant opens in downtown", + "category": "Food", + "tags": ["dining", "local"], + "timestamp": datetime.utcnow().isoformat(), + }, + ] + + # Upload vectors with payloads + client.upsert( + collection_name=collection_name, + points=models.Batch(ids=list(range(num_vectors)), vectors=vectors, payloads=payloads), + ) + print("Uploaded vectors with payloads") + + # Search vectors + search_result = client.search(collection_name=collection_name, query_vector=vectors[0], limit=3) + print("\nSearch results:") + for scored_point in search_result: + print( + json.dumps( + {"id": scored_point.id, "score": scored_point.score, "payload": scored_point.payload}, indent=2 + ) + ) + + # Filtered search + filter_result = client.search( + collection_name=collection_name, + query_vector=vectors[0], + query_filter=models.Filter( + must=[models.FieldCondition(key="category", match=models.MatchValue(value="Technology"))] + ), + limit=2, + ) + print("\nFiltered search results:") + for scored_point in filter_result: + print( + json.dumps( + {"id": scored_point.id, "score": scored_point.score, "payload": scored_point.payload}, indent=2 + ) + ) + + # Create payload index + client.create_payload_index( + collection_name=collection_name, field_name="category", field_schema=models.PayloadFieldSchema.KEYWORD + ) + print("\nCreated payload index on category field") + + # Create vector index + client.create_payload_index( + collection_name=collection_name, field_name="tags", field_schema=models.PayloadFieldSchema.KEYWORD + ) + print("Created payload index on tags field") + + # Scroll through collection + scroll_result = client.scroll(collection_name=collection_name, limit=10, with_payload=True, with_vectors=True) + print("\nScrolled through collection:") + for point in scroll_result[0]: + print(json.dumps({"id": point.id, "payload": point.payload}, indent=2)) + + # Get collection info + collection_info = client.get_collection(collection_name) + print("\nCollection info:") + print( + json.dumps( + { + "name": collection_info.name, + "vectors_count": collection_info.vectors_count, + "points_count": collection_info.points_count, + "status": collection_info.status, + }, + indent=2, + ) + ) + + # Update payload + client.set_payload(collection_name=collection_name, payload={"new_field": "updated value"}, points=[0, 1]) + print("\nUpdated payload for points 0 and 1") + + # Delete points + client.delete(collection_name=collection_name, points_selector=models.PointIdsList(points=[4])) + print("Deleted point with id 4") + + # Clean up + client.delete_collection(collection_name) + print("\nDeleted collection") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/rabbitmq/example_basic.py b/modules/rabbitmq/example_basic.py new file mode 100644 index 000000000..906a0e24f --- /dev/null +++ b/modules/rabbitmq/example_basic.py @@ -0,0 +1,98 @@ +import json +import time +from threading import Thread + +import pika + +from testcontainers.rabbitmq import RabbitMQContainer + + +def basic_example(): + with RabbitMQContainer() as rabbitmq: + # Get connection parameters + host = rabbitmq.get_container_host_ip() + port = rabbitmq.get_exposed_port(rabbitmq.port) + username = rabbitmq.username + password = rabbitmq.password + + # Create connection + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, credentials=credentials) + connection = pika.BlockingConnection(parameters) + channel = connection.channel() + print("Connected to RabbitMQ") + + # Declare exchange + exchange_name = "test_exchange" + channel.exchange_declare(exchange=exchange_name, exchange_type="direct", durable=True) + print(f"Declared exchange: {exchange_name}") + + # Declare queues + queues = {"queue1": "routing_key1", "queue2": "routing_key2"} + + for queue_name, routing_key in queues.items(): + channel.queue_declare(queue=queue_name, durable=True) + channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key) + print(f"Declared and bound queue: {queue_name}") + + # Define message handler + def message_handler(ch, method, properties, body): + message = json.loads(body) + print(f"\nReceived message on {method.routing_key}:") + print(json.dumps(message, indent=2)) + ch.basic_ack(delivery_tag=method.delivery_tag) + + # Start consuming in a separate thread + def consume_messages(): + channel.basic_qos(prefetch_count=1) + for queue_name in queues: + channel.basic_consume(queue=queue_name, on_message_callback=message_handler) + channel.start_consuming() + + consumer_thread = Thread(target=consume_messages) + consumer_thread.daemon = True + consumer_thread.start() + + # Publish messages + test_messages = [ + { + "queue": "queue1", + "routing_key": "routing_key1", + "message": {"id": 1, "content": "Message for queue 1", "timestamp": time.time()}, + }, + { + "queue": "queue2", + "routing_key": "routing_key2", + "message": {"id": 2, "content": "Message for queue 2", "timestamp": time.time()}, + }, + ] + + for msg in test_messages: + channel.basic_publish( + exchange=exchange_name, + routing_key=msg["routing_key"], + body=json.dumps(msg["message"]), + properties=pika.BasicProperties( + delivery_mode=2, # make message persistent + content_type="application/json", + ), + ) + print(f"Published message to {msg['queue']}") + + # Wait for messages to be processed + time.sleep(2) + + # Get queue information + print("\nQueue information:") + for queue_name in queues: + queue = channel.queue_declare(queue=queue_name, passive=True) + print(f"{queue_name}:") + print(f" Messages: {queue.method.message_count}") + print(f" Consumers: {queue.method.consumer_count}") + + # Clean up + connection.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/redis/example_basic.py b/modules/redis/example_basic.py new file mode 100644 index 000000000..5fce0a7b7 --- /dev/null +++ b/modules/redis/example_basic.py @@ -0,0 +1,84 @@ +from datetime import timedelta + +import redis + +from testcontainers.redis import RedisContainer + + +def basic_example(): + with RedisContainer() as redis_container: + # Get connection parameters + host = redis_container.get_container_host_ip() + port = redis_container.get_exposed_port(redis_container.port) + + # Create Redis client + client = redis.Redis(host=host, port=port, decode_responses=True) + print("Connected to Redis") + + # String operations + client.set("greeting", "Hello, Redis!") + value = client.get("greeting") + print(f"\nString value: {value}") + + # List operations + client.lpush("tasks", "task1", "task2", "task3") + tasks = client.lrange("tasks", 0, -1) + print("\nTasks list:") + for task in tasks: + print(f"- {task}") + + # Set operations + client.sadd("tags", "python", "redis", "docker", "testing") + tags = client.smembers("tags") + print("\nTags set:") + for tag in tags: + print(f"- {tag}") + + # Hash operations + user_data = {"name": "John Doe", "email": "john@example.com", "age": "30"} + client.hset("user:1", mapping=user_data) + user = client.hgetall("user:1") + print("\nUser hash:") + for field, value in user.items(): + print(f"{field}: {value}") + + # Sorted set operations + scores = {"player1": 100, "player2": 200, "player3": 150} + client.zadd("leaderboard", scores) + leaderboard = client.zrevrange("leaderboard", 0, -1, withscores=True) + print("\nLeaderboard:") + for player, score in leaderboard: + print(f"{player}: {score}") + + # Key expiration + client.setex("temp_key", timedelta(seconds=10), "This will expire") + ttl = client.ttl("temp_key") + print(f"\nTemp key TTL: {ttl} seconds") + + # Pipeline operations + with client.pipeline() as pipe: + pipe.set("pipeline_key1", "value1") + pipe.set("pipeline_key2", "value2") + pipe.set("pipeline_key3", "value3") + pipe.execute() + print("\nPipeline operations completed") + + # Pub/Sub operations + pubsub = client.pubsub() + pubsub.subscribe("test_channel") + + # Publish a message + client.publish("test_channel", "Hello from Redis!") + + # Get the message + message = pubsub.get_message() + if message and message["type"] == "message": + print(f"\nReceived message: {message['data']}") + + # Clean up + pubsub.unsubscribe() + pubsub.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/registry/example_basic.py b/modules/registry/example_basic.py new file mode 100644 index 000000000..0bd136872 --- /dev/null +++ b/modules/registry/example_basic.py @@ -0,0 +1,92 @@ +import json + +import requests + +from testcontainers.registry import RegistryContainer + + +def basic_example(): + with RegistryContainer() as registry: + # Get connection parameters + host = registry.get_container_host_ip() + port = registry.get_exposed_port(registry.port) + registry_url = f"http://{host}:{port}" + print(f"Registry URL: {registry_url}") + + # Get registry version + version_response = requests.get(f"{registry_url}/v2/") + print(f"Registry version: {version_response.headers.get('Docker-Distribution-Api-Version')}") + + # List repositories + catalog_response = requests.get(f"{registry_url}/v2/_catalog") + repositories = catalog_response.json()["repositories"] + print("\nRepositories:") + print(json.dumps(repositories, indent=2)) + + # Create test repository + test_repo = "test-repo" + test_tag = "latest" + + # Create a simple manifest + manifest = { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1000, + "digest": "sha256:1234567890abcdef", + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 2000, + "digest": "sha256:abcdef1234567890", + } + ], + } + + # Upload manifest + manifest_url = f"{registry_url}/v2/{test_repo}/manifests/{test_tag}" + headers = {"Content-Type": "application/vnd.docker.distribution.manifest.v2+json"} + manifest_response = requests.put(manifest_url, json=manifest, headers=headers) + print(f"\nUploaded manifest: {manifest_response.status_code}") + + # List tags for repository + tags_url = f"{registry_url}/v2/{test_repo}/tags/list" + tags_response = requests.get(tags_url) + tags = tags_response.json()["tags"] + print("\nTags:") + print(json.dumps(tags, indent=2)) + + # Get manifest + manifest_response = requests.get(manifest_url, headers=headers) + manifest_data = manifest_response.json() + print("\nManifest:") + print(json.dumps(manifest_data, indent=2)) + + # Get manifest digest + digest = manifest_response.headers.get("Docker-Content-Digest") + print(f"\nManifest digest: {digest}") + + # Delete manifest + delete_response = requests.delete(manifest_url) + print(f"\nDeleted manifest: {delete_response.status_code}") + + # Verify deletion + verify_response = requests.get(manifest_url) + print(f"Manifest exists: {verify_response.status_code == 200}") + + # Get registry configuration + config_url = f"{registry_url}/v2/" + config_response = requests.get(config_url) + print("\nRegistry configuration:") + print(json.dumps(dict(config_response.headers), indent=2)) + + # Get registry health + health_url = f"{registry_url}/v2/" + health_response = requests.get(health_url) + print(f"\nRegistry health: {health_response.status_code == 200}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/scylla/example_basic.py b/modules/scylla/example_basic.py new file mode 100644 index 000000000..fa26369cc --- /dev/null +++ b/modules/scylla/example_basic.py @@ -0,0 +1,153 @@ +import json +from datetime import datetime + +from cassandra.auth import PlainTextAuthProvider +from cassandra.cluster import Cluster + +from testcontainers.scylla import ScyllaContainer + + +def basic_example(): + with ScyllaContainer() as scylla: + # Get connection parameters + host = scylla.get_container_host_ip() + port = scylla.get_exposed_port(scylla.port) + username = scylla.username + password = scylla.password + + # Create Scylla client + auth_provider = PlainTextAuthProvider(username=username, password=password) + cluster = Cluster([host], port=port, auth_provider=auth_provider) + session = cluster.connect() + print("Connected to Scylla") + + # Create keyspace + session.execute(""" + CREATE KEYSPACE IF NOT EXISTS test_keyspace + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} + """) + print("Created keyspace") + + # Use keyspace + session.set_keyspace("test_keyspace") + + # Create table + session.execute(""" + CREATE TABLE IF NOT EXISTS test_table ( + id UUID PRIMARY KEY, + name text, + value int, + category text, + created_at timestamp + ) + """) + print("Created table") + + # Insert test data + test_data = [ + { + "id": "550e8400-e29b-41d4-a716-446655440000", + "name": "test1", + "value": 100, + "category": "A", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440001", + "name": "test2", + "value": 200, + "category": "B", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440002", + "name": "test3", + "value": 300, + "category": "A", + "created_at": datetime.utcnow(), + }, + ] + + insert_stmt = session.prepare(""" + INSERT INTO test_table (id, name, value, category, created_at) + VALUES (uuid(), ?, ?, ?, ?) + """) + + for data in test_data: + session.execute(insert_stmt, (data["name"], data["value"], data["category"], data["created_at"])) + print("Inserted test data") + + # Query data + print("\nQuery results:") + rows = session.execute("SELECT * FROM test_table WHERE category = 'A' ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create materialized view + session.execute(""" + CREATE MATERIALIZED VIEW IF NOT EXISTS test_view AS + SELECT category, name, value, created_at + FROM test_table + WHERE category IS NOT NULL AND name IS NOT NULL + PRIMARY KEY (category, name) + """) + print("\nCreated materialized view") + + # Query materialized view + print("\nMaterialized view results:") + rows = session.execute("SELECT * FROM test_view WHERE category = 'A'") + for row in rows: + print( + json.dumps( + { + "category": row.category, + "name": row.name, + "value": row.value, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create secondary index + session.execute("CREATE INDEX IF NOT EXISTS ON test_table (value)") + print("\nCreated secondary index") + + # Query using secondary index + print("\nQuery using secondary index:") + rows = session.execute("SELECT * FROM test_table WHERE value > 150 ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Get table metadata + table_meta = session.cluster.metadata.keyspaces["test_keyspace"].tables["test_table"] + print("\nTable metadata:") + print(f"Columns: {[col.name for col in table_meta.columns.values()]}") + print(f"Partition key: {[col.name for col in table_meta.partition_key]}") + print(f"Clustering key: {[col.name for col in table_meta.clustering_key]}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/selenium/example_basic.py b/modules/selenium/example_basic.py new file mode 100644 index 000000000..f136126fb --- /dev/null +++ b/modules/selenium/example_basic.py @@ -0,0 +1,49 @@ +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +from testcontainers.selenium import SeleniumContainer + + +def basic_example(): + with SeleniumContainer() as selenium: + # Get the Selenium WebDriver + driver = selenium.get_driver() + + try: + # Navigate to a test page + driver.get("https://www.python.org") + print("Navigated to python.org") + + # Wait for the search box to be present + search_box = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "id-search-field"))) + + # Type in the search box + search_box.send_keys("selenium") + print("Entered search term") + + # Click the search button + search_button = driver.find_element(By.ID, "submit") + search_button.click() + print("Clicked search button") + + # Wait for search results + WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "list-recent-events"))) + + # Get search results + results = driver.find_elements(By.CSS_SELECTOR, ".list-recent-events li") + print("\nSearch results:") + for result in results[:3]: # Print first 3 results + print(result.text) + + # Take a screenshot + driver.save_screenshot("python_search_results.png") + print("\nScreenshot saved as 'python_search_results.png'") + + finally: + # Clean up + driver.quit() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/sftp/example_basic.py b/modules/sftp/example_basic.py new file mode 100644 index 000000000..f5d2058eb --- /dev/null +++ b/modules/sftp/example_basic.py @@ -0,0 +1,137 @@ +import json +import os +from datetime import datetime + +import paramiko + +from testcontainers.sftp import SftpContainer + + +def basic_example(): + with SftpContainer() as sftp: + # Get connection parameters + host = sftp.get_container_host_ip() + port = sftp.get_exposed_port(sftp.port) + username = sftp.username + password = sftp.password + + # Create SSH client + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(host, port, username, password) + print("Connected to SFTP server") + + # Create SFTP client + sftp_client = ssh.open_sftp() + + # Create test directory + test_dir = "/home/testuser/test_dir" + sftp_client.mkdir(test_dir) + print(f"Created directory: {test_dir}") + + # Create and upload test files + test_files = [ + {"name": "test1.txt", "content": "This is test file 1"}, + {"name": "test2.txt", "content": "This is test file 2"}, + {"name": "test3.txt", "content": "This is test file 3"}, + ] + + for file_info in test_files: + local_path = f"/tmp/{file_info['name']}" + remote_path = f"{test_dir}/{file_info['name']}" + + # Create local file + with open(local_path, "w") as f: + f.write(file_info["content"]) + + # Upload file + sftp_client.put(local_path, remote_path) + print(f"Uploaded file: {file_info['name']}") + + # Remove local file + os.remove(local_path) + + # List directory contents + print("\nDirectory contents:") + for entry in sftp_client.listdir_attr(test_dir): + print( + json.dumps( + { + "filename": entry.filename, + "size": entry.st_size, + "modified": datetime.fromtimestamp(entry.st_mtime).isoformat(), + }, + indent=2, + ) + ) + + # Download and read file + print("\nReading file contents:") + for file_info in test_files: + remote_path = f"{test_dir}/{file_info['name']}" + local_path = f"/tmp/{file_info['name']}" + + # Download file + sftp_client.get(remote_path, local_path) + + # Read and print contents + with open(local_path) as f: + content = f.read() + print(f"\n{file_info['name']}:") + print(content) + + # Remove local file + os.remove(local_path) + + # Create nested directory + nested_dir = f"{test_dir}/nested" + sftp_client.mkdir(nested_dir) + print(f"\nCreated nested directory: {nested_dir}") + + # Move file to nested directory + old_path = f"{test_dir}/test1.txt" + new_path = f"{nested_dir}/test1.txt" + sftp_client.rename(old_path, new_path) + print("Moved file to nested directory") + + # List nested directory + print("\nNested directory contents:") + for entry in sftp_client.listdir_attr(nested_dir): + print( + json.dumps( + { + "filename": entry.filename, + "size": entry.st_size, + "modified": datetime.fromtimestamp(entry.st_mtime).isoformat(), + }, + indent=2, + ) + ) + + # Get file attributes + print("\nFile attributes:") + for file_info in test_files: + remote_path = f"{test_dir}/{file_info['name']}" + try: + attrs = sftp_client.stat(remote_path) + print(f"\n{file_info['name']}:") + print( + json.dumps( + { + "size": attrs.st_size, + "permissions": oct(attrs.st_mode)[-3:], + "modified": datetime.fromtimestamp(attrs.st_mtime).isoformat(), + }, + indent=2, + ) + ) + except FileNotFoundError: + print(f"File not found: {file_info['name']}") + + # Clean up + sftp_client.close() + ssh.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/test_module_import/example_basic.py b/modules/test_module_import/example_basic.py new file mode 100644 index 000000000..7737333d9 --- /dev/null +++ b/modules/test_module_import/example_basic.py @@ -0,0 +1,144 @@ +import importlib +import pkgutil +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_module_import(): + try: + import test_module + + print("\nSuccessfully imported test_module") + print(f"Module version: {test_module.__version__}") + print(f"Module description: {test_module.__description__}") + except ImportError as e: + print(f"\nFailed to import test_module: {e}") + + +def test_submodule_import(): + try: + from test_module import submodule + + print("\nSuccessfully imported test_module.submodule") + print(f"Submodule function result: {submodule.test_function()}") + except ImportError as e: + print(f"\nFailed to import test_module.submodule: {e}") + + +def test_package_import(): + try: + import test_package + + print("\nSuccessfully imported test_package") + print(f"Package version: {test_package.__version__}") + except ImportError as e: + print(f"\nFailed to import test_package: {e}") + + +def test_module_reloading(): + try: + importlib.reload(test_module) + print("\nSuccessfully reloaded test_module") + except NameError: + print("\nCould not reload test_module (not imported)") + + +def test_version_import(): + try: + import test_module_v2 + + print("\nSuccessfully imported test_module_v2") + print(f"Module version: {test_module_v2.__version__}") + except ImportError as e: + print(f"\nFailed to import test_module_v2: {e}") + + +def test_deps_import(): + try: + import test_module_with_deps + + print("\nSuccessfully imported test_module_with_deps") + print(f"Dependencies: {test_module_with_deps.DEPENDENCIES}") + except ImportError as e: + print(f"\nFailed to import test_module_with_deps: {e}") + + +def test_env_import(): + try: + import test_module_with_env + + print("\nSuccessfully imported test_module_with_env") + print(f"Environment variables: {test_module_with_env.ENV_VARS}") + except ImportError as e: + print(f"\nFailed to import test_module_with_env: {e}") + + +def test_custom_loader_import(): + try: + import test_module_custom_loader + + print("\nSuccessfully imported test_module_custom_loader") + print(f"Loader type: {test_module_custom_loader.LOADER_TYPE}") + except ImportError as e: + print(f"\nFailed to import test_module_custom_loader: {e}") + + +def test_namespace_import(): + try: + import test_namespace_package + + print("\nSuccessfully imported test_namespace_package") + print(f"Namespace: {test_namespace_package.__namespace__}") + except ImportError as e: + print(f"\nFailed to import test_namespace_package: {e}") + + +def test_entry_points_import(): + try: + import test_module_with_entry_points + + print("\nSuccessfully imported test_module_with_entry_points") + print(f"Entry points: {test_module_with_entry_points.ENTRY_POINTS}") + except ImportError as e: + print(f"\nFailed to import test_module_with_entry_points: {e}") + + +def basic_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test various imports + test_module_import() + test_submodule_import() + test_package_import() + + # List all available modules + print("\nAvailable modules in Python path:") + for module_info in pkgutil.iter_modules(): + print(f"- {module_info.name}") + + # Test module reloading + test_module_reloading() + + # Test other imports + test_version_import() + test_deps_import() + test_env_import() + test_custom_loader_import() + test_namespace_import() + test_entry_points_import() + + # Clean up + if "test_module" in sys.modules: + del sys.modules["test_module"] + if "test_package" in sys.modules: + del sys.modules["test_package"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/trino/example_basic.py b/modules/trino/example_basic.py new file mode 100644 index 000000000..f2b351243 --- /dev/null +++ b/modules/trino/example_basic.py @@ -0,0 +1,66 @@ +import trino +from trino.exceptions import TrinoQueryError + +from testcontainers.trino import TrinoContainer + + +def basic_example(): + with TrinoContainer() as trino_container: + # Get connection parameters + host = trino_container.get_container_host_ip() + port = trino_container.get_exposed_port(trino_container.port) + + # Create Trino client + conn = trino.dbapi.connect(host=host, port=port, user="test", catalog="memory", schema="default") + cur = conn.cursor() + + # Create a test table + try: + cur.execute(""" + CREATE TABLE memory.default.test_table ( + id BIGINT, + name VARCHAR, + value DOUBLE + ) + """) + print("Created test table") + except TrinoQueryError as e: + print(f"Table might already exist: {e}") + + # Insert test data + test_data = [(1, "test1", 100.0), (2, "test2", 200.0), (3, "test3", 300.0)] + + for row in test_data: + cur.execute("INSERT INTO memory.default.test_table VALUES (%s, %s, %s)", row) + print("Inserted test data") + + # Query data + cur.execute("SELECT * FROM memory.default.test_table ORDER BY id") + rows = cur.fetchall() + + print("\nQuery results:") + for row in rows: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}") + + # Execute a more complex query + cur.execute(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count + FROM memory.default.test_table + GROUP BY name + ORDER BY avg_value DESC + """) + + print("\nAggregation results:") + for row in cur.fetchall(): + print(f"Name: {row[0]}, Average Value: {row[1]}, Count: {row[2]}") + + # Clean up + cur.close() + conn.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/vault/example_basic.py b/modules/vault/example_basic.py new file mode 100644 index 000000000..2dd873f7a --- /dev/null +++ b/modules/vault/example_basic.py @@ -0,0 +1,75 @@ +import json + +import hvac + +from testcontainers.vault import VaultContainer + + +def basic_example(): + with VaultContainer() as vault: + # Get connection parameters + host = vault.get_container_host_ip() + port = vault.get_exposed_port(vault.port) + token = vault.token + + # Create Vault client + client = hvac.Client(url=f"http://{host}:{port}", token=token) + print("Connected to Vault") + + # Enable KV secrets engine + client.sys.enable_secrets_engine(backend_type="kv", path="secret", options={"version": "2"}) + print("Enabled KV secrets engine") + + # Write secrets + test_secrets = { + "database": {"username": "admin", "password": "secret123", "host": "localhost"}, + "api": {"key": "api-key-123", "endpoint": "https://api.example.com"}, + } + + for path, secret in test_secrets.items(): + client.secrets.kv.v2.create_or_update_secret(path=path, secret=secret) + print(f"Created secret at: {path}") + + # Read secrets + print("\nReading secrets:") + for path in test_secrets: + secret = client.secrets.kv.v2.read_secret_version(path=path) + print(f"\nSecret at {path}:") + print(json.dumps(secret["data"]["data"], indent=2)) + + # Enable and configure AWS secrets engine + client.sys.enable_secrets_engine(backend_type="aws", path="aws") + print("\nEnabled AWS secrets engine") + + # Configure AWS credentials + client.secrets.aws.configure_root( + access_key="test-access-key", secret_key="test-secret-key", region="us-east-1" + ) + print("Configured AWS credentials") + + # Create a role + client.secrets.aws.create_role( + name="test-role", + credential_type="iam_user", + policy_document=json.dumps( + { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": "s3:ListAllMyBuckets", "Resource": "*"}], + } + ), + ) + print("Created AWS role") + + # Generate AWS credentials + aws_creds = client.secrets.aws.generate_credentials(name="test-role") + print("\nGenerated AWS credentials:") + print(json.dumps(aws_creds["data"], indent=2)) + + # List enabled secrets engines + print("\nEnabled secrets engines:") + for path, engine in client.sys.list_mounted_secrets_engines()["data"].items(): + print(f"Path: {path}, Type: {engine['type']}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/weaviate/example_basic.py b/modules/weaviate/example_basic.py new file mode 100644 index 000000000..0c7097723 --- /dev/null +++ b/modules/weaviate/example_basic.py @@ -0,0 +1,143 @@ +import json +from datetime import datetime + +import weaviate + +from testcontainers.weaviate import WeaviateContainer + + +def basic_example(): + with WeaviateContainer() as weaviate_container: + # Get connection parameters + host = weaviate_container.get_container_host_ip() + port = weaviate_container.get_exposed_port(weaviate_container.port) + + # Create Weaviate client + client = weaviate.Client( + url=f"http://{host}:{port}", auth_client_secret=weaviate.AuthApiKey(api_key=weaviate_container.api_key) + ) + print("Connected to Weaviate") + + # Create schema + schema = { + "classes": [ + { + "class": "Article", + "description": "A class for news articles", + "vectorizer": "text2vec-transformers", + "properties": [ + {"name": "title", "dataType": ["text"], "description": "The title of the article"}, + {"name": "content", "dataType": ["text"], "description": "The content of the article"}, + {"name": "category", "dataType": ["text"], "description": "The category of the article"}, + {"name": "publishedAt", "dataType": ["date"], "description": "When the article was published"}, + ], + } + ] + } + + client.schema.create(schema) + print("Created schema") + + # Add objects + articles = [ + { + "title": "AI Breakthrough in Natural Language Processing", + "content": "Researchers have made significant progress in understanding and generating human language.", + "category": "Technology", + "publishedAt": datetime.utcnow().isoformat(), + }, + { + "title": "New Study Shows Benefits of Exercise", + "content": "Regular physical activity has been linked to improved mental health and longevity.", + "category": "Health", + "publishedAt": datetime.utcnow().isoformat(), + }, + { + "title": "Global Climate Summit Reaches Agreement", + "content": "World leaders have agreed on new measures to combat climate change.", + "category": "Environment", + "publishedAt": datetime.utcnow().isoformat(), + }, + ] + + for article in articles: + client.data_object.create(data_object=article, class_name="Article") + print("Added test articles") + + # Query objects + result = client.query.get("Article", ["title", "category", "publishedAt"]).do() + print("\nAll articles:") + print(json.dumps(result, indent=2)) + + # Semantic search + semantic_result = ( + client.query.get("Article", ["title", "content", "category"]) + .with_near_text({"concepts": ["artificial intelligence"]}) + .with_limit(2) + .do() + ) + print("\nSemantic search results:") + print(json.dumps(semantic_result, indent=2)) + + # Filtered search + filtered_result = ( + client.query.get("Article", ["title", "category"]) + .with_where({"path": ["category"], "operator": "Equal", "valueText": "Technology"}) + .do() + ) + print("\nFiltered search results:") + print(json.dumps(filtered_result, indent=2)) + + # Create cross-reference + cross_ref_schema = { + "classes": [ + { + "class": "Author", + "description": "A class for article authors", + "vectorizer": "text2vec-transformers", + "properties": [ + {"name": "name", "dataType": ["text"], "description": "The name of the author"}, + {"name": "writes", "dataType": ["Article"], "description": "Articles written by the author"}, + ], + } + ] + } + + client.schema.create(cross_ref_schema) + print("\nCreated cross-reference schema") + + # Add author with cross-reference + author_uuid = client.data_object.create(data_object={"name": "John Doe"}, class_name="Author") + + article_uuid = result["data"]["Get"]["Article"][0]["_additional"]["id"] + client.data_object.reference.add( + from_uuid=author_uuid, + from_property_name="writes", + to_uuid=article_uuid, + from_class_name="Author", + to_class_name="Article", + ) + print("Added author with cross-reference") + + # Query with cross-reference + cross_ref_result = ( + client.query.get("Author", ["name"]) + .with_additional(["id"]) + .with_references({"writes": {"properties": ["title", "category"]}}) + .do() + ) + print("\nCross-reference query results:") + print(json.dumps(cross_ref_result, indent=2)) + + # Create aggregation + agg_result = client.query.aggregate("Article").with_fields("category").with_meta_count().do() + print("\nAggregation results:") + print(json.dumps(agg_result, indent=2)) + + # Clean up + client.schema.delete_all() + print("\nCleaned up schema") + + +if __name__ == "__main__": + basic_example() diff --git a/pyproject.toml b/pyproject.toml index 360fe31b2..c8fe4bf2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -277,6 +277,9 @@ ignore = [ "A004", ] +[tool.ruff.lint.per-file-ignores] +"**/example_*.py" = ["T201"] + [tool.ruff.lint.pyupgrade] keep-runtime-typing = true From 4fa960b0f29521d31df3dca69d5f2c1c4925783a Mon Sep 17 00:00:00 2001 From: Terry Date: Wed, 14 May 2025 16:37:47 -0300 Subject: [PATCH 08/16] docs: fixing one example module and generating feature and quickstart docs --- docs/features/authentication.md | 109 ++++++++++ docs/features/building_images.md | 190 ++++++++++++++++++ docs/features/configuration.md | 153 +++++++++++++- docs/features/container_logs.md | 151 ++++++++++++++ docs/features/copying_data.md | 166 +++++++++++++++ docs/features/creating_container.md | 134 +++++++++--- docs/features/docker_compose.md | 117 +++++++++++ docs/features/executing_commands.md | 157 +++++++++++++++ docs/features/networking.md | 137 +++++++++++++ docs/features/wait_strategies.md | 122 +++++++++++ docs/modules/test_module_import.md | 83 +++++++- docs/quickstart.md | 134 ++++++------ mkdocs.yml | 12 +- modules/test_module_import/example_basic.py | 144 ------------- .../examples/01_basic_import.py | 58 ++++++ .../examples/02_module_reloading.py | 41 ++++ .../examples/03_version_specific.py | 34 ++++ .../examples/04_dependencies_and_env.py | 48 +++++ .../examples/05_advanced_features.py | 59 ++++++ pyproject.toml | 1 + 20 files changed, 1782 insertions(+), 268 deletions(-) create mode 100644 docs/features/authentication.md create mode 100644 docs/features/building_images.md create mode 100644 docs/features/container_logs.md create mode 100644 docs/features/copying_data.md create mode 100644 docs/features/docker_compose.md create mode 100644 docs/features/executing_commands.md create mode 100644 docs/features/networking.md create mode 100644 docs/features/wait_strategies.md delete mode 100644 modules/test_module_import/example_basic.py create mode 100644 modules/test_module_import/examples/01_basic_import.py create mode 100644 modules/test_module_import/examples/02_module_reloading.py create mode 100644 modules/test_module_import/examples/03_version_specific.py create mode 100644 modules/test_module_import/examples/04_dependencies_and_env.py create mode 100644 modules/test_module_import/examples/05_advanced_features.py diff --git a/docs/features/authentication.md b/docs/features/authentication.md new file mode 100644 index 000000000..138fd6816 --- /dev/null +++ b/docs/features/authentication.md @@ -0,0 +1,109 @@ +# Docker Authentication + +Testcontainers-Python supports various methods of authenticating with Docker registries. This is essential when working with private registries or when you need to pull images that require authentication. + +## Basic Authentication + +The simplest way to authenticate is using Docker's built-in credential store. Testcontainers-Python will automatically use credentials stored by Docker: + +```python +from testcontainers.generic import GenericContainer + +# Docker will automatically use stored credentials +container = GenericContainer("private.registry.com/myimage:latest") +``` + +## Environment Variables + +You can provide registry credentials using environment variables: + +```bash +# Set registry credentials +export DOCKER_USERNAME=myuser +export DOCKER_PASSWORD=mypassword +export DOCKER_REGISTRY=private.registry.com +``` + +## Configuration File + +You can also configure authentication in the `.testcontainers.properties` file: + +```properties +registry.username=myuser +registry.password=mypassword +registry.url=private.registry.com +``` + +## Programmatic Authentication + +For more control, you can provide credentials programmatically: + +```python +from testcontainers.core.config import TestcontainersConfiguration + +# Configure registry credentials +config = TestcontainersConfiguration() +config.registry_username = "myuser" +config.registry_password = "mypassword" +config.registry_url = "private.registry.com" + +# Use the configuration +container = GenericContainer("private.registry.com/myimage:latest") +``` + +## AWS ECR Authentication + +For Amazon Elastic Container Registry (ECR), Testcontainers-Python supports automatic authentication: + +```python +from testcontainers.generic import GenericContainer + +# ECR authentication is handled automatically +container = GenericContainer("123456789012.dkr.ecr.region.amazonaws.com/myimage:latest") +``` + +## Google Container Registry (GCR) + +For Google Container Registry, you can use Google Cloud credentials: + +```python +from testcontainers.generic import GenericContainer + +# GCR authentication using Google Cloud credentials +container = GenericContainer("gcr.io/myproject/myimage:latest") +``` + +## Azure Container Registry (ACR) + +For Azure Container Registry, you can use Azure credentials: + +```python +from testcontainers.generic import GenericContainer + +# ACR authentication using Azure credentials +container = GenericContainer("myregistry.azurecr.io/myimage:latest") +``` + +## Best Practices + +1. Never commit credentials to version control +2. Use environment variables or secure credential stores +3. Rotate credentials regularly +4. Use the least privileged credentials necessary +5. Consider using Docker credential helpers +6. Use registry-specific authentication when available +7. Keep credentials secure and encrypted +8. Use separate credentials for different environments + +## Troubleshooting + +If you encounter authentication issues: + +1. Verify your credentials are correct +2. Check if the registry is accessible +3. Ensure your Docker daemon is running +4. Check Docker's credential store +5. Verify network connectivity +6. Check for any proxy settings +7. Look for any rate limiting +8. Check registry-specific requirements diff --git a/docs/features/building_images.md b/docs/features/building_images.md new file mode 100644 index 000000000..d59e14a2b --- /dev/null +++ b/docs/features/building_images.md @@ -0,0 +1,190 @@ +# Building Images from Dockerfiles + +Testcontainers-Python allows you to build Docker images from Dockerfiles during test execution. This is useful when you need to test custom images or when you want to ensure your Dockerfile builds correctly. + +## Basic Image Building + +The simplest way to build an image is using the `build_image` function: + +```python +from testcontainers.core.container import build_image + +# Build an image from a Dockerfile +image = build_image( + path="path/to/dockerfile/directory", + tag="myapp:test" +) + +# Use the built image +with GenericContainer(image) as container: + # Your test code here + pass +``` + +## Building with Options + +You can customize the build process with various options: + +```python +# Build with specific Dockerfile +image = build_image( + path="path/to/dockerfile/directory", + dockerfile="Dockerfile.test", + tag="myapp:test" +) + +# Build with build arguments +image = build_image( + path="path/to/dockerfile/directory", + buildargs={ + "VERSION": "1.0.0", + "ENVIRONMENT": "test" + }, + tag="myapp:test" +) + +# Build with target stage +image = build_image( + path="path/to/dockerfile/directory", + target="test", + tag="myapp:test" +) +``` + +## Building with Context + +You can specify a build context: + +```python +# Build with specific context +image = build_image( + path="path/to/dockerfile/directory", + context="path/to/build/context", + tag="myapp:test" +) +``` + +## Building with Cache + +You can control build caching: + +```python +# Build without cache +image = build_image( + path="path/to/dockerfile/directory", + nocache=True, + tag="myapp:test" +) + +# Build with specific cache from +image = build_image( + path="path/to/dockerfile/directory", + cache_from=["myapp:latest"], + tag="myapp:test" +) +``` + +## Building with Platform + +You can specify the target platform: + +```python +# Build for specific platform +image = build_image( + path="path/to/dockerfile/directory", + platform="linux/amd64", + tag="myapp:test" +) +``` + +## Building with Labels + +You can add labels to the built image: + +```python +# Build with labels +image = build_image( + path="path/to/dockerfile/directory", + labels={ + "test": "true", + "environment": "test" + }, + tag="myapp:test" +) +``` + +## Best Practices + +1. Use appropriate tags +2. Clean up built images +3. Use build arguments for configuration +4. Consider build context size +5. Use appropriate build caching +6. Handle build failures +7. Use appropriate platforms +8. Add meaningful labels + +## Common Use Cases + +### Building Test Images + +```python +def test_custom_image(): + # Build test image + image = build_image( + path="path/to/dockerfile/directory", + buildargs={"TEST_MODE": "true"}, + tag="myapp:test" + ) + + # Use the test image + with GenericContainer(image) as container: + # Your test code here + pass +``` + +### Building with Dependencies + +```python +def test_with_dependencies(): + # Build base image + base_image = build_image( + path="path/to/base/dockerfile/directory", + tag="myapp:base" + ) + + # Build test image using base + test_image = build_image( + path="path/to/test/dockerfile/directory", + cache_from=[base_image], + tag="myapp:test" + ) +``` + +### Building for Different Environments + +```python +def test_different_environments(): + # Build for different environments + environments = ["dev", "test", "staging"] + + for env in environments: + image = build_image( + path="path/to/dockerfile/directory", + buildargs={"ENVIRONMENT": env}, + tag=f"myapp:{env}" + ) +``` + +## Troubleshooting + +If you encounter issues with image building: + +1. Check Dockerfile syntax +2. Verify build context +3. Check for missing files +4. Verify build arguments +5. Check for platform compatibility +6. Verify cache settings +7. Check for resource limits +8. Verify Docker daemon state diff --git a/docs/features/configuration.md b/docs/features/configuration.md index b2a711dd0..835bedb79 100644 --- a/docs/features/configuration.md +++ b/docs/features/configuration.md @@ -1,19 +1,74 @@ # Custom configuration -..... +You can override some default properties if your environment requires that. + +## Configuration locations + +The configuration will be loaded from multiple locations. Properties are considered in the following order: + +1. Environment variables +2. `.testcontainers.properties` in user's home folder. Example locations: + **Linux:** `/home/myuser/.testcontainers.properties` + **Windows:** `C:/Users/myuser/.testcontainers.properties` + **macOS:** `/Users/myuser/.testcontainers.properties` + +Note that when using environment variables, configuration property names should be set in upper case with underscore separators, preceded by `TESTCONTAINERS_` - e.g. `ryuk.disabled` becomes `TESTCONTAINERS_RYUK_DISABLED`. + +### Supported properties + +Testcontainers-Python provides a configuration class to represent the settings: + +```python +from testcontainers.core.config import TestcontainersConfiguration + +# Default configuration +config = TestcontainersConfiguration() + +# Access configuration values +max_tries = config.max_tries +sleep_time = config.sleep_time +ryuk_image = config.ryuk_image +ryuk_privileged = config.ryuk_privileged +ryuk_disabled = config.ryuk_disabled +ryuk_docker_socket = config.ryuk_docker_socket +ryuk_reconnection_timeout = config.ryuk_reconnection_timeout +tc_host_override = config.tc_host_override +``` + +The following properties are supported: + +| Property | Environment Variable | Description | Default | +| --------------------------- | ------------------------------------------- | ---------------------------------------------------- | ------------------------- | +| `tc.host` | `TC_HOST` or `TESTCONTAINERS_HOST_OVERRIDE` | Testcontainers host address | - | +| `docker.host` | `DOCKER_HOST` | Address of the Docker daemon | - | +| `docker.tls.verify` | `DOCKER_TLS_VERIFY` | Enable/disable TLS verification | 0 | +| `docker.cert.path` | `DOCKER_CERT_PATH` | Path to Docker certificates | - | +| `ryuk.disabled` | `TESTCONTAINERS_RYUK_DISABLED` | Disable the Garbage Collector | false | +| `ryuk.container.privileged` | `TESTCONTAINERS_RYUK_PRIVILEGED` | Run Ryuk in privileged mode | false | +| `ryuk.reconnection.timeout` | `RYUK_RECONNECTION_TIMEOUT` | Time to wait before reconnecting | 10s | +| `ryuk.image` | `RYUK_CONTAINER_IMAGE` | Ryuk container image | testcontainers/ryuk:0.8.1 | +| `connection.mode` | `TESTCONTAINERS_CONNECTION_MODE` | Connection mode (bridge_ip, gateway_ip, docker_host) | - | + +Additional configuration options: + +| Environment Variable | Description | Default | +| --------------------- | ------------------------------------------- | ------- | +| `TC_MAX_TRIES` | Maximum number of connection attempts | 120 | +| `TC_POOLING_INTERVAL` | Time between connection attempts | 1 | +| `DOCKER_AUTH_CONFIG` | Docker authentication config (experimental) | - | ## Docker host detection -_Testcontainers for Go_ will attempt to detect the Docker environment and configure everything to work automatically. +Testcontainers-Python will attempt to detect the Docker environment and configure everything to work automatically. -However, sometimes customization is required. _Testcontainers for Go_ will respect the following order: +However, sometimes customization is required. Testcontainers-Python will respect the following order: 1. Read the **tc.host** property in the `~/.testcontainers.properties` file. E.g. `tc.host=tcp://my.docker.host:1234` -2. Read the **DOCKER_HOST** environment variable. E.g. `DOCKER_HOST=unix:///var/run/docker.sock` - See [Docker environment variables](https://docs.docker.com/engine/reference/commandline/cli/#environment-variables) for more information. +2. Read the **TC_HOST** or **TESTCONTAINERS_HOST_OVERRIDE** environment variable. E.g. `TC_HOST=tcp://my.docker.host:1234` -3. Read the Go context for the **DOCKER_HOST** key. E.g. `ctx.Value("DOCKER_HOST")`. This is used internally for the library to pass the Docker host to the resource reaper. +3. Read the **DOCKER_HOST** environment variable. E.g. `DOCKER_HOST=unix:///var/run/docker.sock` + See [Docker environment variables](https://docs.docker.com/engine/reference/commandline/cli/#environment-variables) for more information. 4. Read the default Docker socket path, without the unix schema. E.g. `/var/run/docker.sock` @@ -21,9 +76,85 @@ However, sometimes customization is required. _Testcontainers for Go_ will respe 6. Read the rootless Docker socket path, checking in the following alternative locations: - 1. `${XDG_RUNTIME_DIR}/.docker/run/docker.sock`. - 2. `${HOME}/.docker/run/docker.sock`. - 3. `${HOME}/.docker/desktop/docker.sock`. - 4. `/run/user/${UID}/docker.sock`, where `${UID}` is the user ID of the current user. + 1. `${XDG_RUNTIME_DIR}/.docker/run/docker.sock` + 2. `${HOME}/.docker/run/docker.sock` + 3. `${HOME}/.docker/desktop/docker.sock` + 4. `/run/user/${UID}/docker.sock`, where `${UID}` is the user ID of the current user + +7. The library will raise a `DockerHostError` if none of the above are set, meaning that the Docker host was not detected. + +## Docker socket path detection + +Testcontainers-Python will attempt to detect the Docker socket path and configure everything to work automatically. + +However, sometimes customization is required. Testcontainers-Python will respect the following order: + +1. Read the **TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE** environment variable. Path to Docker's socket. Used by Ryuk, Docker Compose, and a few other containers that need to perform Docker actions. + Example: `/var/run/docker-alt.sock` + +2. If the Operating System retrieved by the Docker client is "Docker Desktop", and the host is running on Windows, it will return the `//var/run/docker.sock` UNC Path. Else return the default docker socket path for rootless docker. + +3. Get the current Docker Host from the existing strategies: see Docker host detection. + +4. If the socket contains the unix schema, the schema is removed (e.g. `unix:///var/run/docker.sock` -> `/var/run/docker.sock`) + +5. Else, the default location of the docker socket is used: `/var/run/docker.sock` + +The library will raise a `DockerHostError` if the Docker host cannot be discovered. + +## Connection Modes + +Testcontainers-Python supports different connection modes that determine how containers are accessed: + +1. `bridge_ip` (default): Uses the bridge network IP address +2. `gateway_ip`: Uses the gateway IP address +3. `docker_host`: Uses the Docker host address + +You can set the connection mode using the `TESTCONTAINERS_CONNECTION_MODE` environment variable or the `connection.mode` property in `.testcontainers.properties`. + +## Example configuration file + +Here's an example of a `.testcontainers.properties` file: + +```properties +# Docker host configuration +docker.host=tcp://my.docker.host:1234 +docker.tls.verify=1 +docker.cert.path=/path/to/certs + +# Ryuk configuration +ryuk.disabled=false +ryuk.container.privileged=true +ryuk.reconnection.timeout=30s +ryuk.image=testcontainers/ryuk:0.8.1 + +# Testcontainers configuration +tc.host=tcp://my.testcontainers.host:1234 +connection.mode=bridge_ip +``` + +## Using configuration in code + +You can access and modify the configuration programmatically: + +```python +from testcontainers.core.config import testcontainers_config + +# Access configuration values +max_tries = testcontainers_config.max_tries +sleep_time = testcontainers_config.sleep_time + +# The configuration is read-only by default +# Changes should be made through environment variables or .testcontainers.properties +``` + +## Best practices -7. The library panics if none of the above are set, meaning that the Docker host was not detected. +1. Use environment variables for CI/CD environments +2. Use `.testcontainers.properties` for local development +3. Set appropriate timeouts for your environment +4. Enable verbose logging when debugging +5. Consider disabling Ryuk if your environment already handles container cleanup +6. Use privileged mode for Ryuk only when necessary +7. Set proper TLS verification and certificate paths for secure environments +8. Choose the appropriate connection mode for your environment diff --git a/docs/features/container_logs.md b/docs/features/container_logs.md new file mode 100644 index 000000000..c4fe06069 --- /dev/null +++ b/docs/features/container_logs.md @@ -0,0 +1,151 @@ +# Container Logs + +Testcontainers-Python provides several ways to access and follow container logs. This is essential for debugging and monitoring container behavior during tests. + +## Basic Log Access + +The simplest way to access logs is using the `get_logs` method: + +```python +from testcontainers.generic import GenericContainer + +with GenericContainer("nginx:alpine") as container: + # Get all logs + stdout, stderr = container.get_logs() + print(f"STDOUT: {stdout}") + print(f"STDERR: {stderr}") +``` + +## Following Logs + +To follow logs in real-time: + +```python +with GenericContainer("nginx:alpine") as container: + # Follow logs + for line in container.follow_logs(): + print(line) # Each line as it appears +``` + +## Log Access with Options + +You can customize log access with various options: + +```python +with GenericContainer("nginx:alpine") as container: + # Get logs with timestamps + stdout, stderr = container.get_logs(timestamps=True) + + # Get logs since a specific time + import datetime + since = datetime.datetime.now() - datetime.timedelta(minutes=5) + stdout, stderr = container.get_logs(since=since) + + # Get logs with tail + stdout, stderr = container.get_logs(tail=100) # Last 100 lines +``` + +## Log Streams + +You can access specific log streams: + +```python +with GenericContainer("nginx:alpine") as container: + # Get only stdout + stdout, _ = container.get_logs() + + # Get only stderr + _, stderr = container.get_logs() + + # Get both streams + stdout, stderr = container.get_logs() +``` + +## Log Following with Callback + +You can use a callback function to process logs: + +```python +def log_callback(line): + print(f"Log line: {line}") + +with GenericContainer("nginx:alpine") as container: + # Follow logs with callback + container.follow_logs(callback=log_callback) +``` + +## Log Access in Tests + +Here's how to use logs in tests: + +```python +import pytest +from testcontainers.generic import GenericContainer + +def test_container_logs(): + with GenericContainer("nginx:alpine") as container: + # Wait for specific log message + for line in container.follow_logs(): + if "Configuration complete" in line: + break + + # Verify log content + stdout, stderr = container.get_logs() + assert "Configuration complete" in stdout +``` + +## Best Practices + +1. Use appropriate log levels +2. Handle log streams separately +3. Use timestamps for debugging +4. Consider log rotation +5. Use log following for real-time monitoring +6. Clean up log resources +7. Use appropriate log formats +8. Consider log volume + +## Common Use Cases + +### Application Startup Verification + +```python +with GenericContainer("myapp:latest") as container: + # Wait for application to start + for line in container.follow_logs(): + if "Application started" in line: + break +``` + +### Error Detection + +```python +with GenericContainer("myapp:latest") as container: + # Monitor for errors + for line in container.follow_logs(): + if "ERROR" in line: + print(f"Error detected: {line}") +``` + +### Performance Monitoring + +```python +with GenericContainer("myapp:latest") as container: + # Monitor performance metrics + for line in container.follow_logs(): + if "Performance" in line: + print(f"Performance metric: {line}") +``` + +## Troubleshooting + +If you encounter issues with log access: + +1. Check container state +2. Verify log configuration +3. Check for log rotation +4. Verify log permissions +5. Check for log volume +6. Verify log format +7. Check for log buffering +8. Verify log drivers diff --git a/docs/features/copying_data.md b/docs/features/copying_data.md new file mode 100644 index 000000000..8623ec73f --- /dev/null +++ b/docs/features/copying_data.md @@ -0,0 +1,166 @@ +# Copying Data into Containers + +Testcontainers-Python provides several ways to copy data into containers. This is essential for setting up test data, configuration files, or any other files needed for your tests. + +## Basic File Copy + +The simplest way to copy a file is using the `copy_file_to_container` method: + +```python +from testcontainers.generic import GenericContainer + +with GenericContainer("alpine:latest") as container: + # Copy a single file + container.copy_file_to_container( + local_path="path/to/local/file.txt", + container_path="/path/in/container/file.txt" + ) +``` + +## Copying Multiple Files + +You can copy multiple files at once: + +```python +with GenericContainer("alpine:latest") as container: + # Copy multiple files + container.copy_files_to_container([ + ("path/to/local/file1.txt", "/path/in/container/file1.txt"), + ("path/to/local/file2.txt", "/path/in/container/file2.txt") + ]) +``` + +## Copying Directories + +You can copy entire directories: + +```python +with GenericContainer("alpine:latest") as container: + # Copy a directory + container.copy_directory_to_container( + local_path="path/to/local/directory", + container_path="/path/in/container/directory" + ) +``` + +## Copying with Permissions + +You can set permissions for copied files: + +```python +with GenericContainer("alpine:latest") as container: + # Copy file with specific permissions + container.copy_file_to_container( + local_path="path/to/local/file.txt", + container_path="/path/in/container/file.txt", + permissions=0o644 # rw-r--r-- + ) +``` + +## Copying with User + +You can specify the owner of copied files: + +```python +with GenericContainer("alpine:latest") as container: + # Copy file with specific owner + container.copy_file_to_container( + local_path="path/to/local/file.txt", + container_path="/path/in/container/file.txt", + user="nobody" + ) +``` + +## Copying from Memory + +You can copy data directly from memory: + +```python +with GenericContainer("alpine:latest") as container: + # Copy data from memory + data = b"Hello, World!" + container.copy_data_to_container( + data=data, + container_path="/path/in/container/file.txt" + ) +``` + +## Best Practices + +1. Use appropriate file permissions +2. Clean up copied files +3. Use absolute paths +4. Handle file encoding +5. Consider file size +6. Use appropriate owners +7. Handle file conflicts +8. Consider security implications + +## Common Use Cases + +### Setting Up Test Data + +```python +def test_with_data(): + with GenericContainer("alpine:latest") as container: + # Copy test data + container.copy_file_to_container( + local_path="tests/data/test_data.json", + container_path="/app/data/test_data.json" + ) + + # Copy configuration + container.copy_file_to_container( + local_path="tests/config/test_config.yaml", + container_path="/app/config/config.yaml" + ) +``` + +### Setting Up Application Files + +```python +def test_application(): + with GenericContainer("myapp:latest") as container: + # Copy application files + container.copy_directory_to_container( + local_path="app/static", + container_path="/app/static" + ) + + # Copy templates + container.copy_directory_to_container( + local_path="app/templates", + container_path="/app/templates" + ) +``` + +### Setting Up Database Files + +```python +def test_database(): + with GenericContainer("postgres:latest") as container: + # Copy database initialization script + container.copy_file_to_container( + local_path="tests/db/init.sql", + container_path="/docker-entrypoint-initdb.d/init.sql" + ) + + # Copy test data + container.copy_file_to_container( + local_path="tests/db/test_data.sql", + container_path="/docker-entrypoint-initdb.d/test_data.sql" + ) +``` + +## Troubleshooting + +If you encounter issues with copying data: + +1. Check file permissions +2. Verify file paths +3. Check file encoding +4. Verify file size +5. Check container state +6. Verify user permissions +7. Check for file conflicts +8. Verify disk space diff --git a/docs/features/creating_container.md b/docs/features/creating_container.md index 09cb7bc5d..eca96f097 100644 --- a/docs/features/creating_container.md +++ b/docs/features/creating_container.md @@ -7,68 +7,136 @@ Testcontainers-Python is a thin wrapper around Docker designed for use in tests. - Log services (e.g. Logstash, Kibana) - Other services developed by your team/organization which are already Dockerized -## Run +## Basic Container Creation -- Since Testcontainers-Python [v3.10.0]() +The simplest way to create a container is using the `GenericContainer` class: -You can use the high-level run helper to start a container in one call, similar to Docker’s docker run. Under the hood it builds a temporary network, mounts files or tmpfs, and waits for readiness for you. +```python +from testcontainers.generic import GenericContainer + +def test_basic_container(): + with GenericContainer("nginx:alpine") as nginx: + # Get container connection details + host = nginx.get_container_host_ip() + port = nginx.get_exposed_port(80) + + # Your test code here + # For example, make HTTP requests to the nginx server + import requests + response = requests.get(f"http://{host}:{port}") + assert response.status_code == 200 +``` + +## Advanced Container Configuration + +For more complex scenarios, you can use the `run` helper function which provides a high-level interface similar to Docker's `docker run` command. This helper automatically handles: + +- Creating temporary networks +- Mounting files or tmpfs +- Waiting for container readiness +- Container cleanup + +Here's an example showing various configuration options: ```python import io import pytest -from docker import DockerClient from testcontainers.core.container import run from testcontainers.core.network import DockerNetwork from testcontainers.core.waiting_utils import wait_for_logs -def test_nginx_run(): +def test_nginx_advanced(): # Create an isolated network network = DockerNetwork() network.create() pytest.addfinalizer(network.remove) - # File to mount into the container - test_file_content = b"Hello from file!" + # Create a test file to mount + test_file_content = b"Hello from test file!" host_file = io.BytesIO(test_file_content) # Run the container with various options container = run( image="nginx:alpine", network=network.name, - files=[(host_file, "/tmp/file.txt")], - tmpfs={"/tmp": "rw"}, - labels={"testcontainers.label": "true"}, - environment={"TEST": "true"}, - ports={"80/tcp": None}, # expose port 80 - command=["/bin/sh", "-c", "echo hello world"], - wait=wait_for_logs("Configuration complete; ready for start"), - startup_timeout=5, + files=[(host_file, "/usr/share/nginx/html/test.txt")], # Mount file + tmpfs={"/tmp": "rw"}, # Mount tmpfs + labels={"testcontainers.label": "true"}, # Add labels + environment={"TEST": "true"}, # Set environment variables + ports={"80/tcp": None}, # Expose port 80 + command=["nginx", "-g", "daemon off;"], # Override default command + wait=wait_for_logs("Configuration complete; ready for start"), # Wait for logs + startup_timeout=30, # Set startup timeout ) + # Ensure cleanup pytest.addfinalizer(container.stop) pytest.addfinalizer(container.remove) - # Inspect runtime state - client = DockerClient.from_env() - info = client.containers.get(container.id).attrs + # Test the container + host = container.get_container_host_ip() + port = container.get_exposed_port(80) + + # Verify the mounted file + import requests + response = requests.get(f"http://{host}:{port}/test.txt") + assert response.text == "Hello from test file!" +``` + +## Container Lifecycle Management + +Testcontainers-Python provides several ways to manage container lifecycle: - # Networks - aliases = info["NetworkSettings"]["Networks"][network.name]["Aliases"] - assert "nginx-alias" in aliases +1. Using context manager (recommended): - # Environment - env = info["Config"]["Env"] - assert any(e.startswith("TEST=true") for e in env) +```python +with GenericContainer("nginx:alpine") as container: + # Container is automatically started and stopped + pass +``` - # Tmpfs - tmpfs = info["HostConfig"]["Tmpfs"].get("/tmp") - assert tmpfs == "" +2. Manual management: - # Labels - assert info["Config"]["Labels"]["testcontainers.label"] == "true" +```python +container = GenericContainer("nginx:alpine") +container.start() +try: + # Your test code here + pass +finally: + container.stop() + container.remove() +``` - # File copy - bits, _ = client.api.get_archive(container.id, "/tmp/file.txt") - archive = io.BytesIO().join(bits) - # extract and verify... +3. Using pytest fixtures: + +```python +import pytest +from testcontainers.generic import GenericContainer + +@pytest.fixture +def nginx_container(): + container = GenericContainer("nginx:alpine") + container.start() + yield container + container.stop() + container.remove() + +def test_with_nginx(nginx_container): + # Your test code here + pass ``` + +## Container Readiness + +For information about waiting for containers to be ready, see the [Wait Strategies](wait_strategies.md) documentation. + +## Best Practices + +1. Always use context managers or ensure proper cleanup +2. Set appropriate timeouts for container startup +3. Use isolated networks for tests +4. Mount test files instead of copying them +5. Use tmpfs for temporary data +6. Add meaningful labels to containers +7. Configure proper wait conditions diff --git a/docs/features/docker_compose.md b/docs/features/docker_compose.md new file mode 100644 index 000000000..006a12b92 --- /dev/null +++ b/docs/features/docker_compose.md @@ -0,0 +1,117 @@ +# Docker Compose Support + +Testcontainers-Python provides support for running Docker Compose environments in your tests. This is useful when you need to test against multiple containers that work together. + +## Basic Usage + +The simplest way to use Docker Compose is with the `DockerCompose` class: + +```python +from testcontainers.compose import DockerCompose + +# Create a compose environment +compose = DockerCompose( + context="path/to/compose/directory", + compose_file_name="docker-compose.yml" +) + +# Start the environment +with compose: + # Your test code here + pass +``` + +## Configuration Options + +The `DockerCompose` class supports various configuration options: + +```python +compose = DockerCompose( + context="path/to/compose/directory", + compose_file_name=["docker-compose.yml", "docker-compose.override.yml"], # Multiple compose files + pull=True, # Pull images before starting + build=True, # Build images before starting + wait=True, # Wait for services to be healthy + env_file=".env", # Environment file + services=["service1", "service2"], # Specific services to run + profiles=["profile1", "profile2"], # Compose profiles to use + keep_volumes=False # Whether to keep volumes after stopping +) +``` + +## Accessing Services + +You can access service information and interact with containers: + +```python +with DockerCompose("path/to/compose/directory") as compose: + # Get service host and port + host = compose.get_service_host("web") + port = compose.get_service_port("web", 8080) + + # Get both host and port + host, port = compose.get_service_host_and_port("web", 8080) + + # Execute commands in a container + stdout, stderr, exit_code = compose.exec_in_container( + ["ls", "-la"], + service_name="web" + ) + + # Get container logs + stdout, stderr = compose.get_logs("web") +``` + +## Waiting for Services + +You can wait for services to be ready: + +```python +with DockerCompose("path/to/compose/directory") as compose: + # Wait for a specific URL to be accessible + compose.wait_for("http://localhost:8080/health") +``` + +## Example with Multiple Services + +Here's a complete example using multiple services: + +```python +from testcontainers.compose import DockerCompose +import requests + +def test_web_application(): + compose = DockerCompose( + "path/to/compose/directory", + compose_file_name="docker-compose.yml", + pull=True, + build=True + ) + + with compose: + # Get web service details + host = compose.get_service_host("web") + port = compose.get_service_port("web", 8080) + + # Make a request to the web service + response = requests.get(f"http://{host}:{port}/api/health") + assert response.status_code == 200 + + # Execute a command in the database service + stdout, stderr, exit_code = compose.exec_in_container( + ["psql", "-U", "postgres", "-c", "SELECT 1"], + service_name="db" + ) + assert exit_code == 0 +``` + +## Best Practices + +1. Use context managers (`with` statement) to ensure proper cleanup +2. Set appropriate timeouts for service startup +3. Use health checks in your compose files +4. Keep compose files in your test directory +5. Use environment variables for configuration +6. Consider using profiles for different test scenarios +7. Clean up volumes when not needed +8. Use specific service names in your tests diff --git a/docs/features/executing_commands.md b/docs/features/executing_commands.md new file mode 100644 index 000000000..9db76a89c --- /dev/null +++ b/docs/features/executing_commands.md @@ -0,0 +1,157 @@ +# Executing Commands in Containers + +Testcontainers-Python provides several ways to execute commands inside containers. This is useful for setup, verification, and debugging during tests. + +## Basic Command Execution + +The simplest way to execute a command is using the `exec` method: + +```python +from testcontainers.generic import GenericContainer + +with GenericContainer("alpine:latest") as container: + # Execute a simple command + exit_code, output = container.exec(["ls", "-la"]) + print(output) # Command output as string +``` + +## Command Execution with Options + +You can customize command execution with various options: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with user + exit_code, output = container.exec( + ["whoami"], + user="nobody" + ) + + # Execute command with environment variables + exit_code, output = container.exec( + ["echo", "$TEST_VAR"], + environment={"TEST_VAR": "test_value"} + ) + + # Execute command with working directory + exit_code, output = container.exec( + ["pwd"], + workdir="/tmp" + ) +``` + +## Interactive Commands + +For interactive commands, you can use the `exec_interactive` method: + +```python +with GenericContainer("alpine:latest") as container: + # Start an interactive shell + container.exec_interactive(["sh"]) +``` + +## Command Execution with Timeout + +You can set a timeout for command execution: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with timeout + try: + exit_code, output = container.exec( + ["sleep", "10"], + timeout=5 # Timeout in seconds + ) + except TimeoutError: + print("Command timed out") +``` + +## Command Execution with Privileges + +For commands that require elevated privileges: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with privileges + exit_code, output = container.exec( + ["mount"], + privileged=True + ) +``` + +## Command Execution with TTY + +For commands that require a TTY: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with TTY + exit_code, output = container.exec( + ["top"], + tty=True + ) +``` + +## Best Practices + +1. Use appropriate timeouts for long-running commands +2. Handle command failures gracefully +3. Use environment variables for configuration +4. Consider security implications of privileged commands +5. Clean up after command execution +6. Use appropriate user permissions +7. Handle command output appropriately +8. Consider using shell scripts for complex commands + +## Common Use Cases + +### Database Setup + +```python +from testcontainers.postgres import PostgresContainer + +with PostgresContainer() as postgres: + # Create a database + postgres.exec(["createdb", "testdb"]) + + # Run migrations + postgres.exec(["psql", "-d", "testdb", "-f", "/path/to/migrations.sql"]) +``` + +### File Operations + +```python +with GenericContainer("alpine:latest") as container: + # Create a directory + container.exec(["mkdir", "-p", "/data"]) + + # Set permissions + container.exec(["chmod", "755", "/data"]) + + # List files + exit_code, output = container.exec(["ls", "-la", "/data"]) +``` + +### Service Management + +```python +with GenericContainer("nginx:alpine") as container: + # Check service status + exit_code, output = container.exec(["nginx", "-t"]) + + # Reload configuration + container.exec(["nginx", "-s", "reload"]) +``` + +## Troubleshooting + +If you encounter issues with command execution: + +1. Check command syntax and arguments +2. Verify user permissions +3. Check container state +4. Verify command availability +5. Check for timeout issues +6. Verify environment variables +7. Check working directory +8. Verify TTY requirements diff --git a/docs/features/networking.md b/docs/features/networking.md new file mode 100644 index 000000000..cb7730a88 --- /dev/null +++ b/docs/features/networking.md @@ -0,0 +1,137 @@ +# Networking and Container Communication + +Testcontainers-Python provides several ways to configure networking between containers and your test code. This is essential for testing distributed systems and microservices. + +## Connection Modes + +Testcontainers-Python supports three connection modes that determine how containers are accessed: + +1. `bridge_ip` (default): Uses the bridge network IP address +2. `gateway_ip`: Uses the gateway IP address +3. `docker_host`: Uses the Docker host address + +You can set the connection mode using the `TESTCONTAINERS_CONNECTION_MODE` environment variable or the `connection.mode` property in `.testcontainers.properties`. + +## Creating Networks + +You can create isolated networks for your containers: + +```python +from testcontainers.core.network import Network + +# Create a new network +network = Network() +network.create() + +# Use the network with containers +container1 = GenericContainer("nginx:alpine") +container1.with_network(network) +container1.with_network_aliases(["web"]) + +container2 = GenericContainer("redis:alpine") +container2.with_network(network) +container2.with_network_aliases(["cache"]) + +# Start containers +with container1, container2: + # Containers can communicate using their network aliases + # e.g., "web" can connect to "cache:6379" + pass +``` + +## Port Mapping + +You can map container ports to host ports: + +```python +from testcontainers.core.container import DockerContainer + +container = DockerContainer("nginx:alpine") +container.with_bind_ports(80, 8080) # Map container port 80 to host port 8080 +container.with_bind_ports("443/tcp", 8443) # Map container port 443 to host port 8443 +``` + +## Container Communication + +Containers can communicate with each other in several ways: + +1. Using network aliases: + +```python +# Container 1 can reach Container 2 using its network alias +container1 = GenericContainer("app:latest") +container1.with_network(network) +container1.with_network_aliases(["app"]) + +container2 = GenericContainer("db:latest") +container2.with_network(network) +container2.with_network_aliases(["database"]) + +# Container 1 can connect to Container 2 using "database:5432" +``` + +2. Using container IP addresses: + +```python +with container1, container2: + # Get container IP addresses + container1_ip = container1.get_container_host_ip() + container2_ip = container2.get_container_host_ip() + + # Containers can communicate using IP addresses + # e.g., container1 can connect to container2_ip:5432 +``` + +3. Using host networking: + +```python +container = GenericContainer("nginx:alpine") +container.with_network_mode("host") # Use host networking +``` + +## Example: Multi-Container Application + +Here's a complete example of a multi-container application: + +```python +from testcontainers.core.network import Network +from testcontainers.postgres import PostgresContainer +from testcontainers.redis import RedisContainer + +def test_multi_container_app(): + # Create a network + network = Network() + network.create() + + # Create containers + postgres = PostgresContainer() + postgres.with_network(network) + postgres.with_network_aliases(["db"]) + + redis = RedisContainer() + redis.with_network(network) + redis.with_network_aliases(["cache"]) + + # Start containers + with postgres, redis: + # Get connection details + db_host = postgres.get_container_host_ip() + db_port = postgres.get_exposed_port(5432) + + redis_host = redis.get_container_host_ip() + redis_port = redis.get_exposed_port(6379) + + # Your test code here + pass +``` + +## Best Practices + +1. Use isolated networks for tests +2. Use meaningful network aliases +3. Avoid using host networking unless necessary +4. Use appropriate connection modes for your environment +5. Clean up networks after tests +6. Use port mapping for external access +7. Consider using Docker Compose for complex setups +8. Use environment variables for configuration diff --git a/docs/features/wait_strategies.md b/docs/features/wait_strategies.md new file mode 100644 index 000000000..8c588a710 --- /dev/null +++ b/docs/features/wait_strategies.md @@ -0,0 +1,122 @@ +# Wait Strategies + +Testcontainers-Python provides several strategies to wait for containers to be ready before proceeding with tests. This is crucial for ensuring that your tests don't start before the container is fully initialized and ready to accept connections. + +## Basic Wait Strategy + +The simplest way to wait for a container is using the `wait_container_is_ready` decorator: + +```python +from testcontainers.core.waiting_utils import wait_container_is_ready + +class MyContainer(DockerContainer): + @wait_container_is_ready() + def _connect(self): + # Your connection logic here + pass +``` + +This decorator will retry the method until it succeeds or times out. By default, it will retry for 120 seconds with a 1-second interval between attempts. + +## Log-based Waiting + +Wait for specific log messages to appear: + +```python +from testcontainers.core.waiting_utils import wait_for_logs + +# Wait for a specific log message +container = GenericContainer( + "nginx:alpine", + wait=wait_for_logs("Configuration complete; ready for start") +) + +# Wait for a log pattern using regex +container = GenericContainer( + "postgres:latest", + wait=wait_for_logs("database system is ready to accept connections") +) + +# Wait for logs in both stdout and stderr +container = GenericContainer( + "myapp:latest", + wait=wait_for_logs("Ready", predicate_streams_and=True) +) +``` + +## HTTP-based Waiting + +Wait for an HTTP endpoint to be accessible: + +```python +from testcontainers.core.waiting_utils import wait_for_http + +# Wait for an HTTP endpoint +container = GenericContainer( + "nginx:alpine", + wait=wait_for_http("/", port=80) +) + +# Wait for a specific HTTP status code +container = GenericContainer( + "myapp:latest", + wait=wait_for_http("/health", port=8080, status_code=200) +) +``` + +## Custom Wait Conditions + +You can create custom wait conditions by implementing your own wait function: + +```python +def custom_wait(container): + # Your custom logic here + # Return True if the container is ready, False otherwise + return True + +container = GenericContainer( + "myapp:latest", + wait=custom_wait +) +``` + +## Connection-based Waiting + +Many container implementations include built-in connection waiting. For example: + +```python +from testcontainers.redis import RedisContainer +from testcontainers.postgres import PostgresContainer + +# Redis container waits for connection +redis = RedisContainer() +redis.start() # Will wait until Redis is ready to accept connections + +# PostgreSQL container waits for connection +postgres = PostgresContainer() +postgres.start() # Will wait until PostgreSQL is ready to accept connections +``` + +## Configuring Wait Behavior + +You can configure the wait behavior using environment variables: + +- `TC_MAX_TRIES`: Maximum number of connection attempts (default: 120) +- `TC_POOLING_INTERVAL`: Time between connection attempts in seconds (default: 1) + +Example: + +```bash +export TC_MAX_TRIES=60 +export TC_POOLING_INTERVAL=2 +``` + +## Best Practices + +1. Always use appropriate wait strategies for your containers +2. Set reasonable timeouts for your environment +3. Use specific wait conditions rather than generic ones when possible +4. Consider using connection-based waiting for database containers +5. Use log-based waiting for applications that output clear startup messages +6. Use HTTP-based waiting for web services +7. Implement custom wait conditions for complex startup scenarios diff --git a/docs/modules/test_module_import.md b/docs/modules/test_module_import.md index fa01570fa..ee9c2ecd3 100644 --- a/docs/modules/test_module_import.md +++ b/docs/modules/test_module_import.md @@ -4,7 +4,13 @@ Since testcontainers-python `. When you do so, `dockerd` maps the selected `` from inside the container to a random one available on your host. -In the previous example, we expose `6379` for `tcp` traffic to the outside. This +In the previous example, we expose the default Redis port (6379) for `tcp` traffic to the outside. This allows Redis to be reachable from your code that runs outside the container, but -it also makes parallelization possible because if you add `t.Parallel` to your -tests, and each of them starts a Redis container each of them will be exposed on a -different random port. +it also makes parallelization possible because if you run your tests in parallel, each test will get its own Redis container exposed on a different random port. -`testcontainers.GenericContainer` creates the container. In this example we are -using `Started: true`. It means that the container function will wait for the -container to be up and running. If you set the `Start` value to `false` it won't -start, leaving to you the decision about when to start it. - -All the containers must be removed at some point, otherwise they will run until -the host is overloaded. One of the ways we have to clean up is by deferring the -terminated function: `defer testcontainers.TerminateContainer(redisC)` which -automatically handles nil container so is safe to use even in the error case. +The container is automatically cleaned up when the test finishes, thanks to the context manager (`with` statement). This ensures that no containers are left running after your tests complete. !!!tip @@ -81,35 +61,39 @@ automatically handles nil container so is safe to use even in the error case. ## 4. Make your code to talk with the container -This is just an example, but usually Go applications that rely on Redis are -using the [redis-go](https://github.com/go-redis/redis) client. This code gets +This is just an example, but usually Python applications that rely on Redis are +using the [redis-py](https://github.com/redis/redis-py) client. This code gets the endpoint from the container we just started, and it configures the client. -```go -endpoint, err := redisC.Endpoint(ctx, "") -if err != nil { - t.Error(err) -} - -client := redis.NewClient(&redis.Options{ - Addr: endpoint, -}) - -_ = client +```python +def test_redis_operations(): + with RedisContainer() as redis_container: + # Get connection parameters + host = redis_container.get_container_host_ip() + port = redis_container.get_exposed_port(redis_container.port) + + # Create Redis client + client = redis.Redis(host=host, port=port, decode_responses=True) + + # Test various Redis operations + # String operations + client.set("greeting", "Hello, Redis!") + value = client.get("greeting") + assert value == "Hello, Redis!" + + # List operations + client.lpush("tasks", "task1", "task2", "task3") + tasks = client.lrange("tasks", 0, -1) + assert tasks == ["task3", "task2", "task1"] ``` -We expose only one port, so the `Endpoint` does not need a second argument set. - -!!!tip - - If you expose more than one port you can specify the one you need as a second - argument. - -In this case it returns: `localhost:`. - ## 5. Run the test -You can run the test via `go test ./...` +You can run the test via `pytest`: + +```bash +pytest test_redis.py +``` ## 6. Want to go deeper with Redis? diff --git a/mkdocs.yml b/mkdocs.yml index 7efc7d524..273909fa6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -38,8 +38,16 @@ nav: - Home: index.md - Quickstart: quickstart.md - Features: - - features/creating_container.md - - features/configuration.md + - Creating Containers: features/creating_container.md + - Configuration: features/configuration.md + - Authentication: features/authentication.md + - Executing Commands: features/executing_commands.md + - Container Logs: features/container_logs.md + - Building Images: features/building_images.md + - Copying Data: features/copying_data.md + - Wait Strategies: features/wait_strategies.md + - Docker Compose: features/docker_compose.md + - Networking: features/networking.md - Modules: - Databases: - modules/arangodb.md diff --git a/modules/test_module_import/example_basic.py b/modules/test_module_import/example_basic.py deleted file mode 100644 index 7737333d9..000000000 --- a/modules/test_module_import/example_basic.py +++ /dev/null @@ -1,144 +0,0 @@ -import importlib -import pkgutil -import sys -from pathlib import Path - -from testcontainers.test_module_import import TestModuleImportContainer - - -def test_module_import(): - try: - import test_module - - print("\nSuccessfully imported test_module") - print(f"Module version: {test_module.__version__}") - print(f"Module description: {test_module.__description__}") - except ImportError as e: - print(f"\nFailed to import test_module: {e}") - - -def test_submodule_import(): - try: - from test_module import submodule - - print("\nSuccessfully imported test_module.submodule") - print(f"Submodule function result: {submodule.test_function()}") - except ImportError as e: - print(f"\nFailed to import test_module.submodule: {e}") - - -def test_package_import(): - try: - import test_package - - print("\nSuccessfully imported test_package") - print(f"Package version: {test_package.__version__}") - except ImportError as e: - print(f"\nFailed to import test_package: {e}") - - -def test_module_reloading(): - try: - importlib.reload(test_module) - print("\nSuccessfully reloaded test_module") - except NameError: - print("\nCould not reload test_module (not imported)") - - -def test_version_import(): - try: - import test_module_v2 - - print("\nSuccessfully imported test_module_v2") - print(f"Module version: {test_module_v2.__version__}") - except ImportError as e: - print(f"\nFailed to import test_module_v2: {e}") - - -def test_deps_import(): - try: - import test_module_with_deps - - print("\nSuccessfully imported test_module_with_deps") - print(f"Dependencies: {test_module_with_deps.DEPENDENCIES}") - except ImportError as e: - print(f"\nFailed to import test_module_with_deps: {e}") - - -def test_env_import(): - try: - import test_module_with_env - - print("\nSuccessfully imported test_module_with_env") - print(f"Environment variables: {test_module_with_env.ENV_VARS}") - except ImportError as e: - print(f"\nFailed to import test_module_with_env: {e}") - - -def test_custom_loader_import(): - try: - import test_module_custom_loader - - print("\nSuccessfully imported test_module_custom_loader") - print(f"Loader type: {test_module_custom_loader.LOADER_TYPE}") - except ImportError as e: - print(f"\nFailed to import test_module_custom_loader: {e}") - - -def test_namespace_import(): - try: - import test_namespace_package - - print("\nSuccessfully imported test_namespace_package") - print(f"Namespace: {test_namespace_package.__namespace__}") - except ImportError as e: - print(f"\nFailed to import test_namespace_package: {e}") - - -def test_entry_points_import(): - try: - import test_module_with_entry_points - - print("\nSuccessfully imported test_module_with_entry_points") - print(f"Entry points: {test_module_with_entry_points.ENTRY_POINTS}") - except ImportError as e: - print(f"\nFailed to import test_module_with_entry_points: {e}") - - -def basic_example(): - with TestModuleImportContainer(): - # Add test module to Python path - sys.path.append(str(Path(__file__).parent)) - print("Added test module to Python path") - - # Test various imports - test_module_import() - test_submodule_import() - test_package_import() - - # List all available modules - print("\nAvailable modules in Python path:") - for module_info in pkgutil.iter_modules(): - print(f"- {module_info.name}") - - # Test module reloading - test_module_reloading() - - # Test other imports - test_version_import() - test_deps_import() - test_env_import() - test_custom_loader_import() - test_namespace_import() - test_entry_points_import() - - # Clean up - if "test_module" in sys.modules: - del sys.modules["test_module"] - if "test_package" in sys.modules: - del sys.modules["test_package"] - print("\nCleaned up imported modules") - - -if __name__ == "__main__": - basic_example() diff --git a/modules/test_module_import/examples/01_basic_import.py b/modules/test_module_import/examples/01_basic_import.py new file mode 100644 index 000000000..9068c9944 --- /dev/null +++ b/modules/test_module_import/examples/01_basic_import.py @@ -0,0 +1,58 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_module_import(): + try: + import test_module + + print("\nSuccessfully imported test_module") + print(f"Module version: {test_module.__version__}") + print(f"Module description: {test_module.__description__}") + except ImportError as e: + print(f"\nFailed to import test_module: {e}") + + +def test_submodule_import(): + try: + from test_module import submodule + + print("\nSuccessfully imported test_module.submodule") + print(f"Submodule function result: {submodule.test_function()}") + except ImportError as e: + print(f"\nFailed to import test_module.submodule: {e}") + + +def test_package_import(): + try: + import test_package + + print("\nSuccessfully imported test_package") + print(f"Package version: {test_package.__version__}") + except ImportError as e: + print(f"\nFailed to import test_package: {e}") + + +def basic_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test various imports + test_module_import() + test_submodule_import() + test_package_import() + + # Clean up + if "test_module" in sys.modules: + del sys.modules["test_module"] + if "test_package" in sys.modules: + del sys.modules["test_package"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/test_module_import/examples/02_module_reloading.py b/modules/test_module_import/examples/02_module_reloading.py new file mode 100644 index 000000000..4e05ff8bd --- /dev/null +++ b/modules/test_module_import/examples/02_module_reloading.py @@ -0,0 +1,41 @@ +import importlib +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_module_reloading(): + try: + import test_module + + print("\nSuccessfully imported test_module") + print(f"Initial version: {test_module.__version__}") + + # Simulate module changes by reloading + importlib.reload(test_module) + print("\nSuccessfully reloaded test_module") + print(f"Updated version: {test_module.__version__}") + except ImportError as e: + print(f"\nFailed to import test_module: {e}") + except NameError: + print("\nCould not reload test_module (not imported)") + + +def reloading_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test module reloading + test_module_reloading() + + # Clean up + if "test_module" in sys.modules: + del sys.modules["test_module"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + reloading_example() diff --git a/modules/test_module_import/examples/03_version_specific.py b/modules/test_module_import/examples/03_version_specific.py new file mode 100644 index 000000000..b24a6b47e --- /dev/null +++ b/modules/test_module_import/examples/03_version_specific.py @@ -0,0 +1,34 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_version_import(): + try: + import test_module_v2 + + print("\nSuccessfully imported test_module_v2") + print(f"Module version: {test_module_v2.__version__}") + print(f"Module features: {test_module_v2.FEATURES}") + except ImportError as e: + print(f"\nFailed to import test_module_v2: {e}") + + +def version_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test version-specific imports + test_version_import() + + # Clean up + if "test_module_v2" in sys.modules: + del sys.modules["test_module_v2"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + version_example() diff --git a/modules/test_module_import/examples/04_dependencies_and_env.py b/modules/test_module_import/examples/04_dependencies_and_env.py new file mode 100644 index 000000000..de49fc55b --- /dev/null +++ b/modules/test_module_import/examples/04_dependencies_and_env.py @@ -0,0 +1,48 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_deps_import(): + try: + import test_module_with_deps + + print("\nSuccessfully imported test_module_with_deps") + print(f"Dependencies: {test_module_with_deps.DEPENDENCIES}") + print(f"Required versions: {test_module_with_deps.REQUIRED_VERSIONS}") + except ImportError as e: + print(f"\nFailed to import test_module_with_deps: {e}") + + +def test_env_import(): + try: + import test_module_with_env + + print("\nSuccessfully imported test_module_with_env") + print(f"Environment variables: {test_module_with_env.ENV_VARS}") + print(f"Environment values: {test_module_with_env.ENV_VALUES}") + except ImportError as e: + print(f"\nFailed to import test_module_with_env: {e}") + + +def deps_and_env_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test dependencies and environment imports + test_deps_import() + test_env_import() + + # Clean up + if "test_module_with_deps" in sys.modules: + del sys.modules["test_module_with_deps"] + if "test_module_with_env" in sys.modules: + del sys.modules["test_module_with_env"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + deps_and_env_example() diff --git a/modules/test_module_import/examples/05_advanced_features.py b/modules/test_module_import/examples/05_advanced_features.py new file mode 100644 index 000000000..45c24faa8 --- /dev/null +++ b/modules/test_module_import/examples/05_advanced_features.py @@ -0,0 +1,59 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_custom_loader_import(): + try: + import test_module_custom_loader + + print("\nSuccessfully imported test_module_custom_loader") + print(f"Loader type: {test_module_custom_loader.LOADER_TYPE}") + print(f"Loader configuration: {test_module_custom_loader.LOADER_CONFIG}") + except ImportError as e: + print(f"\nFailed to import test_module_custom_loader: {e}") + + +def test_namespace_import(): + try: + import test_namespace_package + + print("\nSuccessfully imported test_namespace_package") + print(f"Namespace: {test_namespace_package.__namespace__}") + print(f"Available subpackages: {test_namespace_package.SUBPACKAGES}") + except ImportError as e: + print(f"\nFailed to import test_namespace_package: {e}") + + +def test_entry_points_import(): + try: + import test_module_with_entry_points + + print("\nSuccessfully imported test_module_with_entry_points") + print(f"Entry points: {test_module_with_entry_points.ENTRY_POINTS}") + print(f"Entry point groups: {test_module_with_entry_points.ENTRY_POINT_GROUPS}") + except ImportError as e: + print(f"\nFailed to import test_module_with_entry_points: {e}") + + +def advanced_features_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test advanced features + test_custom_loader_import() + test_namespace_import() + test_entry_points_import() + + # Clean up + for module in ["test_module_custom_loader", "test_namespace_package", "test_module_with_entry_points"]: + if module in sys.modules: + del sys.modules[module] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + advanced_features_example() diff --git a/pyproject.toml b/pyproject.toml index c8fe4bf2b..e5cec61af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -279,6 +279,7 @@ ignore = [ [tool.ruff.lint.per-file-ignores] "**/example_*.py" = ["T201"] +"**/examples/*.py" = ["T201"] [tool.ruff.lint.pyupgrade] keep-runtime-typing = true From 4e8ede261794e77dddb25faf091336923c057963 Mon Sep 17 00:00:00 2001 From: Terry Date: Wed, 14 May 2025 16:47:26 -0300 Subject: [PATCH 09/16] docs: fix minor css issue on code block rendering --- docs/index.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/index.md b/docs/index.md index 6ad48f82a..05d5d8e55 100644 --- a/docs/index.md +++ b/docs/index.md @@ -24,10 +24,7 @@ Inline documentation and docs where the code live is crucial for us. Testcontain ## Who is using Testcontainers Python? -- [AWS](https://aws.amazon.com/) -- [Google](https://google.com/) -- [Grafana](https://grafana.com/) -- [Timescale](https://www.timescale.com/) +- [Timescale](https://www.timescale.com/) - Uses testcontainers-python in their pgai project for testing PostgreSQL integrations, AI model interactions, and AWS service integrations ## License From 5e70144c2a1fe6f24d9fa2d0b78a3094cf3dfcf0 Mon Sep 17 00:00:00 2001 From: Terry Date: Thu, 15 May 2025 23:38:39 -0300 Subject: [PATCH 10/16] docs: fix quickstart links, add garbage collector and advanced features section pages, fix some formatting issues --- docs/features/advanced_features.md | 424 +++++++++++++++++++++++++++++ docs/features/garbage_collector.md | 51 ++++ docs/features/networking.md | 113 ++++++-- docs/features/wait_strategies.md | 9 + docs/index.md | 2 + docs/quickstart.md | 4 +- mkdocs.yml | 2 + 7 files changed, 580 insertions(+), 25 deletions(-) create mode 100644 docs/features/advanced_features.md create mode 100644 docs/features/garbage_collector.md diff --git a/docs/features/advanced_features.md b/docs/features/advanced_features.md new file mode 100644 index 000000000..e1f1ee2fc --- /dev/null +++ b/docs/features/advanced_features.md @@ -0,0 +1,424 @@ +# Advanced Features and Best Practices + +This document covers advanced features and best practices for using testcontainers-python in complex scenarios. + +## Docker-in-Docker (DinD) Support + +Testcontainers-python provides robust support for running tests inside Docker containers, enabling true isolation and reproducibility of test environments. This feature is particularly valuable for CI/CD pipelines, integration testing, and scenarios requiring consistent, isolated environments. + +### Use Cases + +- **CI/CD Pipelines**: Run tests in isolated containers within your CI environment +- **Integration Testing**: Test interactions between multiple services in a controlled environment +- **Environment Consistency**: Ensure tests run in the same environment across different machines +- **Resource Isolation**: Prevent test interference and resource conflicts + +### Connection Modes + +Testcontainers-python supports three connection modes for container networking: + +- **`bridge_ip`**: Use this mode when containers need to communicate over a bridge network. This is the default mode and provides isolated network communication between containers. +- **`gateway_ip`**: Use this mode when containers need to access the host network. This is useful when containers need to communicate with services running on the host machine. +- **`docker_host`**: Use this mode for local development. This mode uses the host's Docker socket directly, which is more efficient but provides less isolation. + +### Network Configuration + +Here's how to set up container networking: + +```python +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +# Create an isolated network +network = Network() + +# Create containers on the network +container1 = DockerContainer("nginx:alpine") +container1.with_network(network) +container1.with_network_aliases(["web"]) + +container2 = DockerContainer("redis:alpine") +container2.with_network(network) +container2.with_network_aliases(["cache"]) +``` + +### Volume Mounting + +Mount host directories into containers for data persistence or configuration: + +```python +container = DockerContainer("nginx:alpine") +container.with_volume_mapping("/host/path", "/container/path", "ro") # Read-only mount +container.with_volume_mapping("/host/data", "/container/data", "rw") # Read-write mount +``` + +### Best Practices + +When working with Docker-in-Docker, it's crucial to follow a comprehensive set of best practices to ensure optimal performance, security, and maintainability. Start by carefully managing your resources: set appropriate memory and CPU limits for your containers, actively monitor their resource usage, and ensure proper cleanup after tests complete. This helps prevent resource exhaustion and maintains system stability. + +Security should be a top priority in your DinD implementation. Always use read-only volume mounts when possible to prevent unauthorized modifications, avoid running containers with privileged access unless absolutely necessary, and implement proper network isolation to prevent unauthorized access between containers. These measures help maintain a secure testing environment. + +For optimal performance, focus on using appropriate base images. Alpine-based images are often a good choice due to their small footprint, but consider your specific needs. Implement proper health checks to ensure containers are truly ready before proceeding with tests, and consider using container caching strategies to speed up test execution. When dealing with complex setups, consider using Docker Compose to manage multiple containers and their interactions. + +## ARM64 Support + +Testcontainers-python provides comprehensive support for ARM64 architecture through automatic emulation, making it seamless to run tests on ARM-based systems like Apple Silicon (M1/M2) Macs and ARM-based cloud instances. + +### Using ARM64 Support + +```python +from testcontainers.core.container import DockerContainer + +# Basic usage with automatic emulation +container = DockerContainer("nginx:alpine") +container.maybe_emulate_amd64() # Automatically handles ARM64 emulation + +# Advanced configuration with resource limits +container = DockerContainer("nginx:alpine") +container.maybe_emulate_amd64() +container.with_memory_limit("512m") +container.with_cpu_limit(0.5) # Use 50% of available CPU +``` + +### Performance Considerations + +1. **Emulation Overhead**: + - Expect 20-30% performance impact when running x86_64 containers on ARM + - Use ARM-native images when available for better performance + - Consider using multi-architecture images (e.g., `nginx:alpine`) + +2. **Resource Management**: + - Monitor memory usage during emulation + - Adjust CPU limits based on your workload + - Use appropriate base images to minimize emulation overhead + +### Best Practices + +When working with ARM64 architecture, a thoughtful approach to image selection and resource management is essential. Prioritize using multi-architecture images when available, as they provide the best compatibility across different platforms. For optimal performance, use minimal base images to reduce emulation overhead, and thoroughly test your setup with different image variants to find the best balance between size and functionality. + +In your development workflow, ensure you test your applications on both ARM and x86_64 environments to catch any architecture-specific issues early. When setting up CI/CD pipelines, make sure they support ARM64 architecture and document any architecture-specific considerations in your project documentation. This helps maintain consistency across different development environments and deployment targets. + +## TCP Forwarding with Socat + +The `SocatContainer` provides powerful TCP forwarding capabilities, enabling complex networking scenarios and service communication patterns. + +### Using Socat Container + +```python +from testcontainers.socat import SocatContainer + +# Basic TCP forwarding +socat = SocatContainer() +socat.with_target(8080, "host.docker.internal", 80) +socat.start() + +# Multiple port forwarding +socat = SocatContainer() +socat.with_target(8080, "host.docker.internal", 80) +socat.with_target(5432, "postgres", 5432) # Forward to another container +socat.start() + +# UDP forwarding +socat = SocatContainer() +socat.with_target(53, "8.8.8.8", 53, protocol="udp") +socat.start() +``` + +### Advanced Configuration + +```python +# Custom Socat options +socat = SocatContainer() +socat.with_option("-d") # Enable debug output +socat.with_option("-v") # Verbose mode +socat.with_target(8080, "host.docker.internal", 80) +socat.start() +``` + +### Best Practices + +When working with Socat, security should be your primary concern. Only forward the ports that are absolutely necessary for your application to function, and implement appropriate access controls to prevent unauthorized access. For sensitive traffic, consider using TLS to encrypt the forwarded connections. Regularly monitor your forwarded connections to detect any suspicious activity or performance issues. + +Performance optimization is crucial for maintaining a responsive system. Monitor connection latency to identify potential bottlenecks, and adjust buffer sizes based on your specific use case. For high-load scenarios, consider implementing connection pooling to manage resources efficiently. Regular maintenance is also important: document your forwarding rules clearly, implement proper cleanup procedures, and monitor connection health to ensure reliable operation. + +## Environment Variables and Configuration + +Testcontainers-python offers flexible configuration options through environment variables, configuration files, and properties. + +### Using Environment Variables + +**Direct Environment Variables**: + +```python +container = DockerContainer("nginx:alpine") +container.with_env("NGINX_HOST", "example.com") +container.with_env("NGINX_PORT", "8080") +container.with_env("DEBUG", "true") +``` + +**Environment Files**: + +```python +# .env file +NGINX_HOST=example.com +NGINX_PORT=8080 +DEBUG=true + +# Python code +container = DockerContainer("nginx:alpine") +container.with_env_file(".env") +``` + +**Configuration Properties**: + +```properties +# .testcontainers.properties +ryuk.container.privileged=true +ryuk.reconnection.timeout=10s +docker.client.strategy=org.testcontainers.dockerclient.UnixSocketClientProviderStrategy +``` + +### Best Practices + +Configuration management in testcontainers-python requires a careful balance between flexibility and security. Never commit sensitive data to version control; instead, use environment variables for secrets and consider implementing a secrets manager for more complex scenarios. When dealing with configuration files, ensure they are well-documented and include validation to catch errors early. + +In your development workflow, provide example configuration files to help new team members get started quickly. Document all required environment variables and their purposes, and implement configuration testing to catch issues before they reach production. Use configuration templates to maintain consistency across different environments while allowing for environment-specific customization. + +## Container Health Checks + +Testcontainers-python provides robust health checking mechanisms to ensure containers are ready for testing. + +### Custom Health Checks + +```python +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_container_is_ready +import requests + +class WebContainer(DockerContainer): + @wait_container_is_ready() + def _connect(self): + response = requests.get(f"http://{self.get_container_host_ip()}:{self.get_exposed_port(80)}") + return response.status_code == 200 + +class DatabaseContainer(DockerContainer): + @wait_container_is_ready() + def _connect(self): + # Implement database connection check + pass +``` + +### Health Check Strategies + +1. **HTTP Health Checks**: + - Check HTTP endpoints + - Verify response status codes + - Validate response content + +2. **TCP Health Checks**: + - Verify port availability + - Check connection establishment + - Monitor connection stability + +3. **Application-Specific Checks**: + - Verify service readiness + - Check data consistency + - Validate business logic + +### Best Practices + +Health checks are a critical component of reliable containerized applications. When implementing health checks, use appropriate timeouts and implement retry mechanisms to handle temporary issues gracefully. Log health check failures with sufficient detail to aid in debugging, and consider using multiple check strategies to ensure comprehensive coverage of your application's health. + +Monitoring is essential for maintaining system health. Track health check metrics to identify patterns and potential issues, implement proper logging to capture relevant information, and set up alerts for failures to enable quick response to problems. Regular maintenance is also important: review your health checks periodically, update check criteria as your application evolves, and test check reliability to ensure they continue to provide accurate information. + +## Error Handling and Debugging + +### Common Issues and Solutions + +**Container Startup Failures**: + +```python +try: + container = DockerContainer("nginx:alpine") + container.start() +except Exception as e: + print(f"Container startup failed: {e}") + print(f"Container logs: {container.get_logs()}") + raise +``` + +**Network Issues**: + +```python +# Debug network configuration +container_id = container.get_wrapped_container().id +network_info = container.get_docker_client().inspect_network(network_name) +print(f"Network configuration: {network_info}") + +# Check container connectivity +host_ip = container.get_container_host_ip() +print(f"Container host IP: {host_ip}") +``` + +**Resource Cleanup**: + +```python +from contextlib import contextmanager + +@contextmanager +def managed_container(): + container = DockerContainer("nginx:alpine") + try: + container.start() + yield container + finally: + container.stop() + container.remove() +``` + +### Debugging Tools + +**Container Logs**: + +```python +# Get all logs +stdout, stderr = container.get_logs() + +# Get recent logs +stdout, stderr = container.get_logs(since="2024-01-01T00:00:00Z") + +# Follow logs +for line in container.get_logs(stream=True): + print(line) +``` + +**Container Information**: + +```python +# Get container details +container_id = container.get_wrapped_container().id +container_info = container.get_docker_client().inspect_container(container_id) + +# Get resource usage +stats = container.get_docker_client().stats(container_id) +``` + +**Network Information**: + +```python +# Get network details +network_name = container.get_docker_client().network_name(container_id) +network_info = container.get_docker_client().inspect_network(network_name) + +# List connected containers +connected_containers = container.get_docker_client().list_containers( + filters={"network": network_name} +) +``` + +### Best Practices + +Error handling and debugging in containerized environments require a systematic approach. Start by implementing proper validation and using appropriate timeouts to prevent common issues. Set up monitoring to catch problems early, and document known issues and their solutions to help team members resolve similar problems quickly. + +When debugging issues, collect relevant logs and analyze error patterns to identify root causes. Use appropriate tools for different types of problems, and document your solutions to build a knowledge base for future reference. Regular maintenance is crucial: perform regular system checks, keep documentation up to date, monitor error rates, and implement improvements based on your findings. + +## Performance Optimization + +Optimizing the performance of your testcontainers-python setup is crucial for maintaining efficient test execution and resource utilization. This section covers key strategies and best practices for achieving optimal performance. + +### Image Selection and Management + +The choice of base images significantly impacts your container's performance and resource usage. When selecting images, consider the following: + +```python +# Using minimal base images +container = DockerContainer("nginx:alpine") # ~7MB +container = DockerContainer("python:3.9-slim") # ~125MB +container = DockerContainer("python:3.9") # ~900MB + +# Using multi-stage builds for custom images +from testcontainers.core.container import DockerContainer +from testcontainers.core.docker_client import DockerClient + +client = DockerClient() +client.build_image( + path=".", + tag="my-optimized-app:latest", + dockerfile=""" + FROM python:3.9-slim as builder + WORKDIR /app + COPY requirements.txt . + RUN pip install --no-cache-dir -r requirements.txt + + FROM python:3.9-slim + WORKDIR /app + COPY --from=builder /usr/local/lib/python3.9/site-packages /usr/local/lib/python3.9/site-packages + COPY . . + """ +) +``` + +### Resource Management + +Proper resource allocation is essential for maintaining system stability and performance. Here's how to manage resources effectively: + +```python +# Setting resource limits +container = DockerContainer("nginx:alpine") +container.with_memory_limit("512m") # Limit memory usage +container.with_cpu_limit(0.5) # Use 50% of available CPU +container.with_shm_size("256m") # Set shared memory size + +# Monitoring resource usage +stats = container.get_docker_client().stats(container.get_wrapped_container().id) +print(f"CPU Usage: {stats['cpu_stats']['cpu_usage']['total_usage']}") +print(f"Memory Usage: {stats['memory_stats']['usage']}") +``` + +### Parallel Execution + +Running tests in parallel can significantly reduce overall execution time. Here's how to implement parallel execution: + +```python +import concurrent.futures +from testcontainers.core.container import DockerContainer + +def run_test(container_config): + with DockerContainer(**container_config) as container: + # Run your test + pass + +# Run multiple containers in parallel +container_configs = [ + {"image": "nginx:alpine", "ports": {"80": 8080}}, + {"image": "redis:alpine", "ports": {"6379": 6379}}, + {"image": "postgres:alpine", "ports": {"5432": 5432}} +] + +with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: + futures = [executor.submit(run_test, config) for config in container_configs] + concurrent.futures.wait(futures) +``` + +### Caching Strategies + +Implementing effective caching strategies can significantly improve test execution time: + +```python +# Using Docker layer caching +container = DockerContainer("python:3.9-slim") +container.with_volume_mapping( + "${HOME}/.cache/pip", # Host pip cache + "/root/.cache/pip", # Container pip cache + "rw" +) + +# Using build cache +client = DockerClient() +client.build_image( + path=".", + tag="my-app:latest", + dockerfile="Dockerfile", + buildargs={"BUILDKIT_INLINE_CACHE": "1"} +) +``` diff --git a/docs/features/garbage_collector.md b/docs/features/garbage_collector.md new file mode 100644 index 000000000..a396b1706 --- /dev/null +++ b/docs/features/garbage_collector.md @@ -0,0 +1,51 @@ +# Garbage Collector + +Testcontainers for Python includes a robust garbage collection mechanism to ensure that containers are properly cleaned up, even in unexpected scenarios. + +## How it Works + +The garbage collection is implemented using a special container called "Ryuk" (pronounced "reaper"). This container is automatically started when you create your first test container and is responsible for cleaning up resources when: + +1. The Python process exits normally +2. The Python process is terminated unexpectedly +3. The system crashes or loses power + +## Configuration + +The Ryuk container can be configured through environment variables: + +- `TESTCONTAINERS_RYUK_DISABLED`: Set to `true` to disable the Ryuk container (not recommended) +- `TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED`: Set to `true` to run Ryuk in privileged mode (default: `false`) +- `TESTCONTAINERS_RYUK_RECONNECTION_TIMEOUT`: Timeout for Ryuk reconnection attempts (default: `10s`) + +## Best Practices + +1. **Don't Disable Ryuk**: The Ryuk container is an important part of Testcontainers' cleanup mechanism. Only disable it if you have a specific reason and understand the implications. + +2. **Use Context Managers**: Always use the `with` statement when creating containers. This ensures proper cleanup even if an exception occurs: + +```python +with RedisContainer() as redis: + # Your test code here +``` + +3. **Session Management**: Each test session gets a unique session ID, and Ryuk tracks containers by this ID. This allows for proper cleanup even when running tests in parallel. + +## Troubleshooting + +If you notice containers not being cleaned up: + +1. Check if Ryuk is running: `docker ps | grep testcontainers-ryuk` +2. Verify that the containers have the correct session label: `docker inspect | grep session-id` +3. Check Ryuk logs: `docker logs ` + +## Implementation Details + +The Ryuk container is a lightweight container that: + +1. Connects to the Docker daemon +2. Listens for container events +3. Automatically removes containers when their parent process exits +4. Handles reconnection if the connection to Docker is lost + +This provides a more reliable cleanup mechanism than relying solely on Python's garbage collection or process termination handlers. diff --git a/docs/features/networking.md b/docs/features/networking.md index cb7730a88..6ebabe532 100644 --- a/docs/features/networking.md +++ b/docs/features/networking.md @@ -6,12 +6,64 @@ Testcontainers-Python provides several ways to configure networking between cont Testcontainers-Python supports three connection modes that determine how containers are accessed: -1. `bridge_ip` (default): Uses the bridge network IP address -2. `gateway_ip`: Uses the gateway IP address -3. `docker_host`: Uses the Docker host address +1. `bridge_ip` (default): Uses the bridge network IP address. Best for: + + - Docker-in-Docker (DinD) scenarios + - When containers need to communicate over a bridge network + - When you need direct container-to-container communication + +2. `gateway_ip`: Uses the gateway IP address. Best for: + + - Docker-in-Docker (DinD) scenarios + - When containers need to access the host network + - When you need to access services running on the host + +3. `docker_host`: Uses the Docker host address. Best for: + + - Local development + - When running tests outside of containers + - When you need to access containers from the host machine You can set the connection mode using the `TESTCONTAINERS_CONNECTION_MODE` environment variable or the `connection.mode` property in `.testcontainers.properties`. +## Port Exposure + +Testcontainers-Python provides two methods for exposing container ports, with `with_exposed_ports` being the recommended approach: + +### Exposing Ports with Random Host Ports (Recommended) + +```python +from testcontainers.core.container import DockerContainer + +container = DockerContainer("nginx:alpine") +container.with_exposed_ports(80, "443/tcp") # Expose ports, host ports will be assigned randomly +container.start() +mapped_port = container.get_exposed_port(80) # Get the randomly assigned host port +``` + +This is the preferred method because it: + +- Avoids port conflicts in parallel test execution +- Is more secure as it doesn't expose fixed ports +- Matches the behavior of other testcontainers implementations +- Allows for better isolation between test runs + +### Binding to Specific Host Ports (Not Recommended) + +```python +container = DockerContainer("nginx:alpine") +container.with_bind_ports(80, 8080) # Map container port 80 to host port 8080 +container.with_bind_ports("443/tcp", 8443) # Map container port 443 to host port 8443 +``` + +Use `with_bind_ports` only in specific cases where you absolutely need a fixed port number, such as: + +- When testing with tools that require specific port numbers +- When integrating with external systems that can't handle dynamic ports +- When debugging and need consistent port numbers + +Note that using fixed ports can cause conflicts when running tests in parallel and may lead to test failures if the specified ports are already in use. + ## Creating Networks You can create isolated networks for your containers: @@ -39,18 +91,6 @@ with container1, container2: pass ``` -## Port Mapping - -You can map container ports to host ports: - -```python -from testcontainers.core.container import DockerContainer - -container = DockerContainer("nginx:alpine") -container.with_bind_ports(80, 8080) # Map container port 80 to host port 8080 -container.with_bind_ports("443/tcp", 8443) # Map container port 443 to host port 8443 -``` - ## Container Communication Containers can communicate with each other in several ways: @@ -127,11 +167,38 @@ def test_multi_container_app(): ## Best Practices -1. Use isolated networks for tests -2. Use meaningful network aliases -3. Avoid using host networking unless necessary -4. Use appropriate connection modes for your environment -5. Clean up networks after tests -6. Use port mapping for external access -7. Consider using Docker Compose for complex setups -8. Use environment variables for configuration +1. **Port Management**: + + - Always use `with_exposed_ports` instead of `with_bind_ports` unless you have a specific requirement for fixed ports + - Use `get_exposed_port` to retrieve the mapped port number when using `with_exposed_ports` + - Avoid hardcoding port numbers in your tests + +2. **Network Configuration**: + + - Use isolated networks for tests to prevent conflicts + - Use meaningful network aliases for better readability and maintainability + - Avoid using host networking unless absolutely necessary + - Use the appropriate connection mode for your environment: + - `bridge_ip` for Docker-in-Docker (DinD) scenarios + - `gateway_ip` for accessing host network services + - `docker_host` for local development + +3. **Container Communication**: + + - Use network aliases for container-to-container communication + - Use environment variables for configuration + - Consider using Docker Compose for complex multi-container setups + +4. **Resource Management**: + + - Always use context managers (`with` statements) to ensure proper cleanup + - Let the Ryuk container handle cleanup in case of unexpected termination + - Clean up networks after tests + - Use environment variables for configuration + +5. **Testing Best Practices**: + - Write tests that are independent and can run in parallel + - Avoid dependencies on specific port numbers + - Use meaningful container and network names for debugging + - Consider using Docker Compose for complex setups + - Use environment variables for configuration diff --git a/docs/features/wait_strategies.md b/docs/features/wait_strategies.md index 8c588a710..3bb42eb69 100644 --- a/docs/features/wait_strategies.md +++ b/docs/features/wait_strategies.md @@ -97,6 +97,15 @@ postgres = PostgresContainer() postgres.start() # Will wait until PostgreSQL is ready to accept connections ``` +## Ryuk Container Wait Behavior + +The Ryuk container (used for garbage collection) has its own wait mechanism that combines log-based and connection-based waiting: + +1. **Log-based Wait**: Waits for the message ".\* Started!" with a 20-second timeout +2. **Connection Wait**: After the logs are found, attempts to establish a socket connection to the Ryuk container, retrying up to 50 times with a 0.5-second interval between attempts + +This ensures that the Ryuk container is fully operational before any test containers are started. + ## Configuring Wait Behavior You can configure the wait behavior using environment variables: diff --git a/docs/index.md b/docs/index.md index 05d5d8e55..76c92de39 100644 --- a/docs/index.md +++ b/docs/index.md @@ -25,6 +25,8 @@ Inline documentation and docs where the code live is crucial for us. Testcontain ## Who is using Testcontainers Python? - [Timescale](https://www.timescale.com/) - Uses testcontainers-python in their pgai project for testing PostgreSQL integrations, AI model interactions, and AWS service integrations +- [Redis](https://redis.io/) - Depends on testcontainers-python for their redis vector library implementation +- [Apache](https://skywalking.apache.org/) - Uses testcontainers-python in their Skywalking project for application performance monitor tool in distributed systems. ## License diff --git a/docs/quickstart.md b/docs/quickstart.md index dc1f4ff61..9a4ca5260 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -5,7 +5,7 @@ up and manage the dependencies life cycle via Docker. ## 1. System requirements -Please read the [system requirements](../system_requirements) page before you start. +Please read the [system requirements](system_requirements/index.md) page before you start. ## 2. Install _Testcontainers for Python_ @@ -56,7 +56,7 @@ The container is automatically cleaned up when the test finishes, thanks to the !!!tip - Look at [features/garbage_collector](/features/garbage_collector) to know another way to + Look at [the garbage collector](features/garbage_collector.md) to know another way to clean up resources. ## 4. Make your code to talk with the container diff --git a/mkdocs.yml b/mkdocs.yml index 273909fa6..aca8281b7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -48,6 +48,8 @@ nav: - Wait Strategies: features/wait_strategies.md - Docker Compose: features/docker_compose.md - Networking: features/networking.md + - Garbage Collector: features/garbage_collector.md + - Advanced Features: features/advanced_features.md - Modules: - Databases: - modules/arangodb.md From 2c3a2174b6e6c84531a98bfd939e821cdabae0ba Mon Sep 17 00:00:00 2001 From: Terry Date: Thu, 22 May 2025 15:42:06 -0300 Subject: [PATCH 11/16] docs: update the pip install instructions --- docs/modules/arangodb.md | 2 +- docs/modules/aws.md | 6 +++--- docs/modules/azurite.md | 4 ++-- docs/modules/cassandra.md | 2 +- docs/modules/chroma.md | 2 +- docs/modules/clickhouse.md | 2 +- docs/modules/cockroachdb.md | 4 ++-- docs/modules/cosmosdb.md | 4 ++-- docs/modules/db2.md | 4 ++-- docs/modules/elasticsearch.md | 2 +- docs/modules/google.md | 10 +++++----- docs/modules/influxdb.md | 10 ++++++++-- docs/modules/k3s.md | 4 ++-- docs/modules/kafka.md | 2 +- docs/modules/keycloak.md | 4 ++-- docs/modules/localstack.md | 4 ++-- docs/modules/mailpit.md | 4 ++-- docs/modules/memcached.md | 4 ++-- docs/modules/milvus.md | 4 ++-- docs/modules/minio.md | 4 ++-- docs/modules/mongodb.md | 4 ++-- docs/modules/mqtt.md | 4 ++-- docs/modules/mssql.md | 8 ++++---- docs/modules/mysql.md | 4 ++-- docs/modules/nats.md | 4 ++-- docs/modules/neo4j.md | 4 ++-- docs/modules/nginx.md | 4 ++-- docs/modules/ollama.md | 4 ++-- docs/modules/opensearch.md | 4 ++-- docs/modules/oracle-free.md | 2 +- docs/modules/postgres.md | 4 ++-- docs/modules/qdrant.md | 2 +- docs/modules/rabbitmq.md | 4 ++-- docs/modules/redis.md | 4 ++-- docs/modules/registry.md | 2 +- docs/modules/selenium.md | 4 ++-- docs/modules/sftp.md | 4 ++-- docs/modules/test_module_import.md | 2 +- docs/modules/trino.md | 4 ++-- docs/modules/vault.md | 4 ++-- docs/modules/weaviate.md | 4 ++-- 41 files changed, 84 insertions(+), 78 deletions(-) diff --git a/docs/modules/arangodb.md b/docs/modules/arangodb.md index 97834f90b..e342c9c9f 100644 --- a/docs/modules/arangodb.md +++ b/docs/modules/arangodb.md @@ -11,7 +11,7 @@ The Testcontainers module for ArangoDB. Please run the following command to add the ArangoDB module to your python dependencies: ```bash -pip install testcontainers[arangodb] +pip install testcontainers[arangodb] python-arango ``` ## Usage example diff --git a/docs/modules/aws.md b/docs/modules/aws.md index dcb512444..8fb1ea412 100644 --- a/docs/modules/aws.md +++ b/docs/modules/aws.md @@ -4,14 +4,14 @@ Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Azurite. Please run the following command to add the Azurite module to your python dependencies: ```bash -pip install testcontainers[azurite] +pip install testcontainers[azurite] azure-storage-blob ``` ## Usage example diff --git a/docs/modules/cassandra.md b/docs/modules/cassandra.md index 68a4be83c..3250b737f 100644 --- a/docs/modules/cassandra.md +++ b/docs/modules/cassandra.md @@ -11,7 +11,7 @@ The Testcontainers module for Cassandra. Please run the following command to add the Cassandra module to your python dependencies: ```bash -pip install testcontainers[cassandra] +pip install testcontainers[cassandra] cassandra-driver ``` ## Usage example diff --git a/docs/modules/chroma.md b/docs/modules/chroma.md index 1b7e80fcd..ae2e45dcf 100644 --- a/docs/modules/chroma.md +++ b/docs/modules/chroma.md @@ -11,7 +11,7 @@ The Testcontainers module for Chroma. Please run the following command to add the Chroma module to your python dependencies: ```bash -pip install testcontainers[chroma] +pip install testcontainers[chroma] chromadb requests ``` ## Usage example diff --git a/docs/modules/clickhouse.md b/docs/modules/clickhouse.md index 90f425955..ed86c3f32 100644 --- a/docs/modules/clickhouse.md +++ b/docs/modules/clickhouse.md @@ -11,7 +11,7 @@ The Testcontainers module for ClickHouse. Please run the following command to add the ClickHouse module to your python dependencies: ```bash -pip install testcontainers[clickhouse] +pip install testcontainers[clickhouse] clickhouse-driver ``` ## Usage example diff --git a/docs/modules/cockroachdb.md b/docs/modules/cockroachdb.md index 1a8018052..285ffc92d 100644 --- a/docs/modules/cockroachdb.md +++ b/docs/modules/cockroachdb.md @@ -1,6 +1,6 @@ # CockroachDB -Since testcontainers-python :material-tag: v4.6.0 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for CockroachDB. Please run the following command to add the CockroachDB module to your python dependencies: ```bash -pip install testcontainers[cockroachdb] +pip install testcontainers[cockroachdb] sqlalchemy psycopg2 ``` ## Usage example diff --git a/docs/modules/cosmosdb.md b/docs/modules/cosmosdb.md index 222209ace..3aadbe6b3 100644 --- a/docs/modules/cosmosdb.md +++ b/docs/modules/cosmosdb.md @@ -4,14 +4,14 @@ Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction diff --git a/docs/modules/google.md b/docs/modules/google.md index ff9dd3aef..f228e6c99 100644 --- a/docs/modules/google.md +++ b/docs/modules/google.md @@ -1,23 +1,23 @@ # Google -Since testcontainers-python :material-tag: v4.7.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction -The Testcontainers module for Google Cloud services, including Cloud Storage, Pub/Sub, BigQuery, and Datastore. +The Testcontainers module for Google Cloud services. ## Adding this module to your project dependencies Please run the following command to add the Google module to your python dependencies: -``` -pip install testcontainers[google] +```bash +pip install testcontainers[google] google-cloud-datastore google-cloud-pubsub ``` ## Usage example -[Creating a Google Cloud container](../../modules/google/example_basic.py) +[Creating a Google container](../../modules/google/example_basic.py) diff --git a/docs/modules/influxdb.md b/docs/modules/influxdb.md index dbdc01abf..3d9f1f314 100644 --- a/docs/modules/influxdb.md +++ b/docs/modules/influxdb.md @@ -1,6 +1,6 @@ # InfluxDB -Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,13 @@ The Testcontainers module for InfluxDB. Please run the following command to add the InfluxDB module to your python dependencies: ```bash -pip install testcontainers[influxdb] +# For InfluxDB 1.x +pip install testcontainers[influxdb] influxdb + +# For InfluxDB 2.x +pip install testcontainers[influxdb] influxdb-client ``` ## Usage example + +[Creating an InfluxDB container](../../modules/influxdb/example_basic.py) diff --git a/docs/modules/k3s.md b/docs/modules/k3s.md index 67c3772a0..66d26d0fc 100644 --- a/docs/modules/k3s.md +++ b/docs/modules/k3s.md @@ -1,6 +1,6 @@ # K3s -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for K3s. Please run the following command to add the K3s module to your python dependencies: ```bash -pip install testcontainers[k3s] +pip install testcontainers[k3s] kubernetes pyyaml ``` ## Usage example diff --git a/docs/modules/kafka.md b/docs/modules/kafka.md index 3a0a6083e..3a206bb3d 100644 --- a/docs/modules/kafka.md +++ b/docs/modules/kafka.md @@ -1,6 +1,6 @@ # Kafka -Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction diff --git a/docs/modules/keycloak.md b/docs/modules/keycloak.md index 9148badbf..98b638380 100644 --- a/docs/modules/keycloak.md +++ b/docs/modules/keycloak.md @@ -1,6 +1,6 @@ # Keycloak -Since testcontainers-python :material-tag: v4.8.2 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Keycloak. Please run the following command to add the Keycloak module to your python dependencies: ```bash -pip install testcontainers[keycloak] +pip install testcontainers[keycloak] python-keycloak requests ``` ## Usage example diff --git a/docs/modules/localstack.md b/docs/modules/localstack.md index 132682828..6c67d6696 100644 --- a/docs/modules/localstack.md +++ b/docs/modules/localstack.md @@ -1,6 +1,6 @@ # LocalStack -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for LocalStack. Please run the following command to add the LocalStack module to your python dependencies: ```bash -pip install testcontainers[localstack] +pip install testcontainers[localstack] boto3 ``` ## Usage example diff --git a/docs/modules/mailpit.md b/docs/modules/mailpit.md index 2a561d325..ca7d49364 100644 --- a/docs/modules/mailpit.md +++ b/docs/modules/mailpit.md @@ -1,6 +1,6 @@ # Mailpit -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.1 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Mailpit. Please run the following command to add the Mailpit module to your python dependencies: ```bash -pip install testcontainers[mailpit] +pip install testcontainers[mailpit] cryptography ``` ## Usage example diff --git a/docs/modules/memcached.md b/docs/modules/memcached.md index 04e3b1cf8..5d18fafc0 100644 --- a/docs/modules/memcached.md +++ b/docs/modules/memcached.md @@ -1,6 +1,6 @@ # Memcached -Since testcontainers-python :material-tag: v4.4.1 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Memcached. Please run the following command to add the Memcached module to your python dependencies: ```bash -pip install testcontainers[memcached] +pip install testcontainers[memcached] pymemcache ``` ## Usage example diff --git a/docs/modules/milvus.md b/docs/modules/milvus.md index 6e762362f..9c7beda6d 100644 --- a/docs/modules/milvus.md +++ b/docs/modules/milvus.md @@ -1,6 +1,6 @@ # Milvus -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Milvus. Please run the following command to add the Milvus module to your python dependencies: ```bash -pip install testcontainers[milvus] +pip install testcontainers[milvus] requests ``` ## Usage example diff --git a/docs/modules/minio.md b/docs/modules/minio.md index fd904191b..15ea1b6ef 100644 --- a/docs/modules/minio.md +++ b/docs/modules/minio.md @@ -1,6 +1,6 @@ # MinIO -Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for MinIO. Please run the following command to add the MinIO module to your python dependencies: ```bash -pip install testcontainers[minio] +pip install testcontainers[minio] minio requests ``` ## Usage example diff --git a/docs/modules/mongodb.md b/docs/modules/mongodb.md index 6db2ece6b..0c2d2d75d 100644 --- a/docs/modules/mongodb.md +++ b/docs/modules/mongodb.md @@ -1,6 +1,6 @@ # MongoDB -Since testcontainers-python :material-tag: v4.3.1 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for MongoDB. Please run the following command to add the MongoDB module to your python dependencies: ```bash -pip install testcontainers[mongodb] +pip install testcontainers[mongodb] pymongo ``` ## Usage example diff --git a/docs/modules/mqtt.md b/docs/modules/mqtt.md index 6b2afefeb..c290532fd 100644 --- a/docs/modules/mqtt.md +++ b/docs/modules/mqtt.md @@ -1,6 +1,6 @@ # MQTT -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for MQTT. Please run the following command to add the MQTT module to your python dependencies: ```bash -pip install testcontainers[mqtt] +pip install testcontainers[mqtt] paho-mqtt ``` ## Usage example diff --git a/docs/modules/mssql.md b/docs/modules/mssql.md index cde204395..89658c74f 100644 --- a/docs/modules/mssql.md +++ b/docs/modules/mssql.md @@ -1,23 +1,23 @@ # MSSQL -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction -The Testcontainers module for MSSQL. +The Testcontainers module for Microsoft SQL Server. ## Adding this module to your project dependencies Please run the following command to add the MSSQL module to your python dependencies: ```bash -pip install testcontainers[mssql] +pip install testcontainers[mssql] sqlalchemy pymssql ``` ## Usage example -[Creating an MSSQL container](../modules/mssql/example_basic.py) +[Creating an MSSQL container](../../modules/mssql/example_basic.py) diff --git a/docs/modules/mysql.md b/docs/modules/mysql.md index 3f24c6e1e..e3ca18ae7 100644 --- a/docs/modules/mysql.md +++ b/docs/modules/mysql.md @@ -1,6 +1,6 @@ # MySQL -Since testcontainers-python :material-tag: v4.4.1 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for MySQL. Please run the following command to add the MySQL module to your python dependencies: ```bash -pip install testcontainers[mysql] +pip install testcontainers[mysql] sqlalchemy pymysql ``` ## Usage example diff --git a/docs/modules/nats.md b/docs/modules/nats.md index 9800ff8f7..e3616e490 100644 --- a/docs/modules/nats.md +++ b/docs/modules/nats.md @@ -1,6 +1,6 @@ # NATS -Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for NATS. Please run the following command to add the NATS module to your python dependencies: ```bash -pip install testcontainers[nats] +pip install testcontainers[nats] nats-py ``` ## Usage example diff --git a/docs/modules/neo4j.md b/docs/modules/neo4j.md index 3c56ba5d0..047dd1de3 100644 --- a/docs/modules/neo4j.md +++ b/docs/modules/neo4j.md @@ -1,6 +1,6 @@ # Neo4j -Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Neo4j. Please run the following command to add the Neo4j module to your python dependencies: ```bash -pip install testcontainers[neo4j] +pip install testcontainers[neo4j] neo4j ``` ## Usage example diff --git a/docs/modules/nginx.md b/docs/modules/nginx.md index 5dfafc0c5..6781c1a88 100644 --- a/docs/modules/nginx.md +++ b/docs/modules/nginx.md @@ -1,6 +1,6 @@ # Nginx -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -18,6 +18,6 @@ pip install testcontainers[nginx] -[Creating an Nginx container](../../modules/nginx/example_basic.py) +[Creating a Nginx container](../../modules/nginx/example_basic.py) diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index e8ca5c573..c9db6e14f 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -1,6 +1,6 @@ # Ollama -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Ollama. Please run the following command to add the Ollama module to your python dependencies: ```bash -pip install testcontainers[ollama] +pip install testcontainers[ollama] requests ``` ## Usage example diff --git a/docs/modules/opensearch.md b/docs/modules/opensearch.md index f0113920f..d57ee45a7 100644 --- a/docs/modules/opensearch.md +++ b/docs/modules/opensearch.md @@ -1,6 +1,6 @@ # OpenSearch -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for OpenSearch. Please run the following command to add the OpenSearch module to your python dependencies: ```bash -pip install testcontainers[opensearch] +pip install testcontainers[opensearch] opensearch-py ``` ## Usage example diff --git a/docs/modules/oracle-free.md b/docs/modules/oracle-free.md index cd0cc7175..a0b68d18d 100644 --- a/docs/modules/oracle-free.md +++ b/docs/modules/oracle-free.md @@ -1,6 +1,6 @@ # Oracle Free -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction diff --git a/docs/modules/postgres.md b/docs/modules/postgres.md index 1b51257ce..4b381753f 100644 --- a/docs/modules/postgres.md +++ b/docs/modules/postgres.md @@ -1,6 +1,6 @@ # PostgreSQL -Since testcontainers-python :material-tag: v4.8.2 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for PostgreSQL. Please run the following command to add the PostgreSQL module to your python dependencies: ```bash -pip install testcontainers[postgres] +pip install testcontainers[postgres] sqlalchemy psycopg2 ``` ## Usage example diff --git a/docs/modules/qdrant.md b/docs/modules/qdrant.md index 87791de1f..c4eb7310f 100644 --- a/docs/modules/qdrant.md +++ b/docs/modules/qdrant.md @@ -1,6 +1,6 @@ # Qdrant -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction diff --git a/docs/modules/rabbitmq.md b/docs/modules/rabbitmq.md index 7616f6ebd..850b2739f 100644 --- a/docs/modules/rabbitmq.md +++ b/docs/modules/rabbitmq.md @@ -1,6 +1,6 @@ # RabbitMQ -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for RabbitMQ. Please run the following command to add the RabbitMQ module to your python dependencies: ```bash -pip install testcontainers[rabbitmq] +pip install testcontainers[rabbitmq] pika ``` ## Usage example diff --git a/docs/modules/redis.md b/docs/modules/redis.md index b5f8598ee..16f8566e2 100644 --- a/docs/modules/redis.md +++ b/docs/modules/redis.md @@ -1,6 +1,6 @@ # Redis -Since testcontainers-python :material-tag: v4.4.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Redis. Please run the following command to add the Redis module to your python dependencies: ```bash -pip install testcontainers[redis] +pip install testcontainers[redis] redis ``` ## Usage example diff --git a/docs/modules/registry.md b/docs/modules/registry.md index a1a123156..b00380d8c 100644 --- a/docs/modules/registry.md +++ b/docs/modules/registry.md @@ -1,6 +1,6 @@ # Registry -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction diff --git a/docs/modules/selenium.md b/docs/modules/selenium.md index 793b23a27..68b6174a7 100644 --- a/docs/modules/selenium.md +++ b/docs/modules/selenium.md @@ -1,6 +1,6 @@ # Selenium -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Selenium. Please run the following command to add the Selenium module to your python dependencies: ```bash -pip install testcontainers[selenium] +pip install testcontainers[selenium] selenium urllib3 ``` ## Usage example diff --git a/docs/modules/sftp.md b/docs/modules/sftp.md index 2606cb898..8fe7ecc5c 100644 --- a/docs/modules/sftp.md +++ b/docs/modules/sftp.md @@ -1,6 +1,6 @@ # SFTP -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.1 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for SFTP. Please run the following command to add the SFTP module to your python dependencies: ```bash -pip install testcontainers[sftp] +pip install testcontainers[sftp] paramiko cryptography ``` ## Usage example diff --git a/docs/modules/test_module_import.md b/docs/modules/test_module_import.md index ee9c2ecd3..ed5472756 100644 --- a/docs/modules/test_module_import.md +++ b/docs/modules/test_module_import.md @@ -1,6 +1,6 @@ # Test Module Import -Since testcontainers-python :material-tag: v4.7.1 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction diff --git a/docs/modules/trino.md b/docs/modules/trino.md index 17edcbb1d..3ceda1445 100644 --- a/docs/modules/trino.md +++ b/docs/modules/trino.md @@ -1,6 +1,6 @@ # Trino -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.2 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Trino. Please run the following command to add the Trino module to your python dependencies: ```bash -pip install testcontainers[trino] +pip install testcontainers[trino] trino ``` ## Usage example diff --git a/docs/modules/vault.md b/docs/modules/vault.md index 0266614c5..7dc4d1260 100644 --- a/docs/modules/vault.md +++ b/docs/modules/vault.md @@ -1,6 +1,6 @@ # Vault -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.7.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Vault. Please run the following command to add the Vault module to your python dependencies: ```bash -pip install testcontainers[vault] +pip install testcontainers[vault] hvac ``` ## Usage example diff --git a/docs/modules/weaviate.md b/docs/modules/weaviate.md index 2c362769e..90fec975a 100644 --- a/docs/modules/weaviate.md +++ b/docs/modules/weaviate.md @@ -1,6 +1,6 @@ # Weaviate -Since testcontainers-python :material-tag: v4.8.0 +Since testcontainers-python :material-tag: v4.6.0 ## Introduction @@ -11,7 +11,7 @@ The Testcontainers module for Weaviate. Please run the following command to add the Weaviate module to your python dependencies: ```bash -pip install testcontainers[weaviate] +pip install testcontainers[weaviate] weaviate-client ``` ## Usage example From fd9a88b25f70965385981f781d9d27cc6007384d Mon Sep 17 00:00:00 2001 From: ArthurFlag Date: Tue, 3 Jun 2025 15:39:13 +0200 Subject: [PATCH 12/16] shorten content --- docs/quickstart.md | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/docs/quickstart.md b/docs/quickstart.md index 9a4ca5260..651b53d9b 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -1,15 +1,14 @@ -_Testcontainers for Python_ plays well with Python's testing frameworks like pytest. +_Testcontainers for Python_ integrates seamlessly with Python testing frameworks like [pytest](https://docs.pytest.org/en/stable/). -The ideal use case is for integration or end to end tests. It helps you to spin -up and manage the dependencies life cycle via Docker. +It's ideal for integration and end-to-end tests, allowing you to easily manage dependencies using Docker. ## 1. System requirements -Please read the [system requirements](system_requirements/index.md) page before you start. +Before you begin, review the [system requirements](system_requirements/index.md). ## 2. Install _Testcontainers for Python_ -You can install testcontainers-python using pip: +Install testcontainers-python with pip: ```bash pip install testcontainers @@ -37,33 +36,25 @@ def test_with_redis(): assert value == "Hello, Redis!" ``` -The `RedisContainer` class provides a convenient way to start a Redis container for testing. +The `RedisContainer` class makes it easy to start a Redis container for testing: -- The container is automatically started when entering the context manager (`with` statement) -- The container is automatically stopped and removed when exiting the context manager -- `get_container_host_ip()` returns the host IP where the container is running -- `get_exposed_port()` returns the mapped port on the host +- The container starts automatically when entering the context manager (`with` statement). +- It stops and removes itself when exiting the context. +- `get_container_host_ip()` returns the host IP. +- `get_exposed_port()` returns the mapped host port. -When you use `get_exposed_port()`, you have to imagine yourself using `docker run -p -`. When you do so, `dockerd` maps the selected `` from inside the -container to a random one available on your host. +When using `get_exposed_port()`, think of it as running `docker run -p `. `dockerd` maps the container's internal port to a random available port on your host. -In the previous example, we expose the default Redis port (6379) for `tcp` traffic to the outside. This -allows Redis to be reachable from your code that runs outside the container, but -it also makes parallelization possible because if you run your tests in parallel, each test will get its own Redis container exposed on a different random port. +In the example above, the default Redis port (6379) is exposed for TCP traffic. This setup allows your code to connect to Redis outside the container and supports parallel test execution. Each test gets its own Redis container on a unique, random port. -The container is automatically cleaned up when the test finishes, thanks to the context manager (`with` statement). This ensures that no containers are left running after your tests complete. +The context manager (`with` statement) ensures containers are cleaned up after tests, so no containers are left running. !!!tip + See [the garbage collector](features/garbage_collector.md) for another way to clean up resources. - Look at [the garbage collector](features/garbage_collector.md) to know another way to - clean up resources. +## 4. Connect your code to the container -## 4. Make your code to talk with the container - -This is just an example, but usually Python applications that rely on Redis are -using the [redis-py](https://github.com/redis/redis-py) client. This code gets -the endpoint from the container we just started, and it configures the client. +Typically, Python applications use the [redis-py](https://github.com/redis/redis-py) client. The following code retrieves the endpoint from the container and configures the client. ```python def test_redis_operations(): @@ -97,4 +88,4 @@ pytest test_redis.py ## 6. Want to go deeper with Redis? -You can find a more elaborated Redis example in our examples section. Please check it out [here](./modules/redis.md). +You can find a more elaborated Redis example in our [examples section](./modules/redis.md). From dffa1d62da568d2d3cca6cd92c483fd498c41708 Mon Sep 17 00:00:00 2001 From: ArthurFlag Date: Tue, 3 Jun 2025 15:43:53 +0200 Subject: [PATCH 13/16] create containers: harmonize content --- docs/features/creating_container.md | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/docs/features/creating_container.md b/docs/features/creating_container.md index eca96f097..fa3b1190b 100644 --- a/docs/features/creating_container.md +++ b/docs/features/creating_container.md @@ -1,4 +1,4 @@ -# How to create a container +# How to Create a Container Testcontainers-Python is a thin wrapper around Docker designed for use in tests. Anything you can run in Docker, you can spin up with Testcontainers-Python: @@ -29,14 +29,14 @@ def test_basic_container(): ## Advanced Container Configuration -For more complex scenarios, you can use the `run` helper function which provides a high-level interface similar to Docker's `docker run` command. This helper automatically handles: +For more complex scenarios, use the `run` helper function. This high-level interface is similar to `docker run` and automatically handles: - Creating temporary networks - Mounting files or tmpfs - Waiting for container readiness - Container cleanup -Here's an example showing various configuration options: +Example with various configuration options: ```python import io @@ -85,18 +85,16 @@ def test_nginx_advanced(): ## Container Lifecycle Management -Testcontainers-Python provides several ways to manage container lifecycle: - -1. Using context manager (recommended): +Testcontainers-Python offers several ways to manage container lifecycle: +1. **Context manager (recommended):** ```python with GenericContainer("nginx:alpine") as container: # Container is automatically started and stopped pass ``` -2. Manual management: - +2. **Manual management:** ```python container = GenericContainer("nginx:alpine") container.start() @@ -108,8 +106,7 @@ finally: container.remove() ``` -3. Using pytest fixtures: - +3. **Pytest fixtures:** ```python import pytest from testcontainers.generic import GenericContainer @@ -129,7 +126,7 @@ def test_with_nginx(nginx_container): ## Container Readiness -For information about waiting for containers to be ready, see the [Wait Strategies](wait_strategies.md) documentation. +For details on waiting for containers to be ready, see [Wait strategies](wait_strategies.md). ## Best Practices From c341b9f61fca8d2fc0a3fa1717fd631d767cd067 Mon Sep 17 00:00:00 2001 From: ArthurFlag Date: Tue, 3 Jun 2025 15:48:58 +0200 Subject: [PATCH 14/16] config --- docs/features/configuration.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/features/configuration.md b/docs/features/configuration.md index 835bedb79..9b3e95681 100644 --- a/docs/features/configuration.md +++ b/docs/features/configuration.md @@ -1,20 +1,20 @@ -# Custom configuration +# Custom Configuration -You can override some default properties if your environment requires that. +You can override some default properties if your environment requires it. -## Configuration locations +## Configuration Locations The configuration will be loaded from multiple locations. Properties are considered in the following order: 1. Environment variables -2. `.testcontainers.properties` in user's home folder. Example locations: +2. `.testcontainers.properties` in the user's home folder. Example locations: **Linux:** `/home/myuser/.testcontainers.properties` **Windows:** `C:/Users/myuser/.testcontainers.properties` **macOS:** `/Users/myuser/.testcontainers.properties` -Note that when using environment variables, configuration property names should be set in upper case with underscore separators, preceded by `TESTCONTAINERS_` - e.g. `ryuk.disabled` becomes `TESTCONTAINERS_RYUK_DISABLED`. +Note that when using environment variables, configuration property names should be set in uppercase with underscore separators, preceded by `TESTCONTAINERS_` - e.g. `ryuk.disabled` becomes `TESTCONTAINERS_RYUK_DISABLED`. -### Supported properties +### Supported Properties Testcontainers-Python provides a configuration class to represent the settings: @@ -57,7 +57,7 @@ Additional configuration options: | `TC_POOLING_INTERVAL` | Time between connection attempts | 1 | | `DOCKER_AUTH_CONFIG` | Docker authentication config (experimental) | - | -## Docker host detection +## Docker Host Detection Testcontainers-Python will attempt to detect the Docker environment and configure everything to work automatically. @@ -74,7 +74,7 @@ However, sometimes customization is required. Testcontainers-Python will respect 5. Read the **docker.host** property in the `~/.testcontainers.properties` file. E.g. `docker.host=tcp://my.docker.host:1234` -6. Read the rootless Docker socket path, checking in the following alternative locations: +6. Read the rootless Docker socket path, checking the following alternative locations: 1. `${XDG_RUNTIME_DIR}/.docker/run/docker.sock` 2. `${HOME}/.docker/run/docker.sock` @@ -83,7 +83,7 @@ However, sometimes customization is required. Testcontainers-Python will respect 7. The library will raise a `DockerHostError` if none of the above are set, meaning that the Docker host was not detected. -## Docker socket path detection +## Docker Socket Path Detection Testcontainers-Python will attempt to detect the Docker socket path and configure everything to work automatically. @@ -92,13 +92,13 @@ However, sometimes customization is required. Testcontainers-Python will respect 1. Read the **TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE** environment variable. Path to Docker's socket. Used by Ryuk, Docker Compose, and a few other containers that need to perform Docker actions. Example: `/var/run/docker-alt.sock` -2. If the Operating System retrieved by the Docker client is "Docker Desktop", and the host is running on Windows, it will return the `//var/run/docker.sock` UNC Path. Else return the default docker socket path for rootless docker. +2. If the operating system retrieved by the Docker client is "Docker Desktop", and the host is running on Windows, it will return the `//var/run/docker.sock` UNC path. Otherwise, it returns the default Docker socket path for rootless Docker. -3. Get the current Docker Host from the existing strategies: see Docker host detection. +3. Get the current Docker host from the existing strategies: see Docker host detection. 4. If the socket contains the unix schema, the schema is removed (e.g. `unix:///var/run/docker.sock` -> `/var/run/docker.sock`) -5. Else, the default location of the docker socket is used: `/var/run/docker.sock` +5. Otherwise, the default location of the Docker socket is used: `/var/run/docker.sock` The library will raise a `DockerHostError` if the Docker host cannot be discovered. @@ -112,7 +112,7 @@ Testcontainers-Python supports different connection modes that determine how con You can set the connection mode using the `TESTCONTAINERS_CONNECTION_MODE` environment variable or the `connection.mode` property in `.testcontainers.properties`. -## Example configuration file +## Example Configuration File Here's an example of a `.testcontainers.properties` file: @@ -133,7 +133,7 @@ tc.host=tcp://my.testcontainers.host:1234 connection.mode=bridge_ip ``` -## Using configuration in code +## Using Configuration in Code You can access and modify the configuration programmatically: @@ -148,7 +148,7 @@ sleep_time = testcontainers_config.sleep_time # Changes should be made through environment variables or .testcontainers.properties ``` -## Best practices +## Best Practices 1. Use environment variables for CI/CD environments 2. Use `.testcontainers.properties` for local development From 173905ce44cf8c7a359b3f41edeac1c2367b773c Mon Sep 17 00:00:00 2001 From: ArthurFlag Date: Tue, 3 Jun 2025 16:47:23 +0200 Subject: [PATCH 15/16] add link to main website --- docs/index.md | 22 +++++++++++++--------- docs/quickstart.md | 3 ++- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/docs/index.md b/docs/index.md index 76c92de39..3defde8c3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -12,25 +12,29 @@ Ruby
    -## About Testcontainers for Python +## About Testcontainers For Python -_Testcontainers for Python_ is a Python library that that makes it simple to create and clean up container-based dependencies for automated integration/smoke tests. The clean, easy-to-use API enables developers to programmatically define containers that should be run as part of a test and clean up those resources when the test is done. +_Testcontainers for Python_ is a Python library that makes it simple to create and clean up container-based dependencies for automated integration or smoke tests. The easy-to-use API enables developers to programmatically define containers that should be run as part of a test and clean up those resources when the test is done. -To start using _Testcontainers for Python_ please read our [quickstart guide](quickstart.md) +To start using _Testcontainers for Python_, see the [quickstart guide](quickstart.md). + +!!!note + + If you need a high-level explanation of _Testcontainers_, see the [main website](https://testcontainers.com/getting-started/). ## Code Comments -Inline documentation and docs where the code live is crucial for us. Testcontainers Python follows [PEP 257](https://peps.python.org/pep-0257/){:target="\_blank"} comment conventions. The codebase previously supported Sphinx so you may encounter comments not yet updated for the new documentation style. +Inline documentation and docs where the code lives are crucial for us. Testcontainers For Python follows the [PEP 257](https://peps.python.org/pep-0257/){:target="\_blank"} comment conventions. The codebase previously supported Sphinx, so you may encounter comments not yet updated for the new documentation style. -## Who is using Testcontainers Python? +## Who Is Using Testcontainers Python? -- [Timescale](https://www.timescale.com/) - Uses testcontainers-python in their pgai project for testing PostgreSQL integrations, AI model interactions, and AWS service integrations -- [Redis](https://redis.io/) - Depends on testcontainers-python for their redis vector library implementation -- [Apache](https://skywalking.apache.org/) - Uses testcontainers-python in their Skywalking project for application performance monitor tool in distributed systems. +- [Timescale](https://www.timescale.com/) - Uses testcontainers-python in their pgai project for testing PostgreSQL integrations, AI model interactions, and AWS service integrations. +- [Redis](https://redis.io/) - Depends on testcontainers-python for their Redis vector library implementation. +- [Apache](https://skywalking.apache.org/) - Uses testcontainers-python in their Skywalking project for an application performance monitoring tool in distributed systems. ## License -See [LICENSE](https://raw.githubusercontent.com/testcontainers/testcontainers-python/refs/heads/main/LICENSE.txt){:target="\_blank"} . +See [LICENSE](https://raw.githubusercontent.com/testcontainers/testcontainers-python/refs/heads/main/LICENSE.txt){:target="\_blank"}. ## Attributions diff --git a/docs/quickstart.md b/docs/quickstart.md index 651b53d9b..83b0454ff 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -50,7 +50,8 @@ In the example above, the default Redis port (6379) is exposed for TCP traffic. The context manager (`with` statement) ensures containers are cleaned up after tests, so no containers are left running. !!!tip - See [the garbage collector](features/garbage_collector.md) for another way to clean up resources. + + See [the garbage collector](features/garbage_collector.md) for another way to clean up resources. ## 4. Connect your code to the container From 2143e2b83082f8163ad58f7d246913a29f0f803e Mon Sep 17 00:00:00 2001 From: Terry Date: Wed, 4 Jun 2025 17:19:59 -0300 Subject: [PATCH 16/16] docs: fix docs build failure due to bad example code --- docs/modules/influxdb.md | 4 + docs/modules/mssql.md | 2 +- docs/modules/mssql/example_basic.py | 50 ----------- modules/influxdb/example_basic.py | 132 ++++++++++++---------------- modules/mssql/example_basic.py | 9 +- 5 files changed, 63 insertions(+), 134 deletions(-) delete mode 100644 docs/modules/mssql/example_basic.py diff --git a/docs/modules/influxdb.md b/docs/modules/influxdb.md index 3d9f1f314..9541db7a4 100644 --- a/docs/modules/influxdb.md +++ b/docs/modules/influxdb.md @@ -20,4 +20,8 @@ pip install testcontainers[influxdb] influxdb-client ## Usage example + + [Creating an InfluxDB container](../../modules/influxdb/example_basic.py) + + diff --git a/docs/modules/mssql.md b/docs/modules/mssql.md index 89658c74f..effac8c75 100644 --- a/docs/modules/mssql.md +++ b/docs/modules/mssql.md @@ -11,7 +11,7 @@ The Testcontainers module for Microsoft SQL Server. Please run the following command to add the MSSQL module to your python dependencies: ```bash -pip install testcontainers[mssql] sqlalchemy pymssql +pip install testcontainers[mssql] pymssql ``` ## Usage example diff --git a/docs/modules/mssql/example_basic.py b/docs/modules/mssql/example_basic.py deleted file mode 100644 index 78287bc70..000000000 --- a/docs/modules/mssql/example_basic.py +++ /dev/null @@ -1,50 +0,0 @@ -import sqlalchemy - -from testcontainers.mssql import SqlServerContainer - - -def basic_example(): - # Create a SQL Server container with default settings - with SqlServerContainer() as mssql: - # Get the connection URL - connection_url = mssql.get_connection_url() - print(f"Connection URL: {connection_url}") - - # Create a SQLAlchemy engine - engine = sqlalchemy.create_engine(connection_url) - - # Create a test table and insert some data - with engine.begin() as connection: - # Create a test table - connection.execute( - sqlalchemy.text(""" - CREATE TABLE test_table ( - id INT IDENTITY(1,1) PRIMARY KEY, - name NVARCHAR(50), - value INT - ) - """) - ) - print("Created test table") - - # Insert some test data - connection.execute( - sqlalchemy.text(""" - INSERT INTO test_table (name, value) - VALUES - ('test1', 100), - ('test2', 200), - ('test3', 300) - """) - ) - print("Inserted test data") - - # Query the data - result = connection.execute(sqlalchemy.text("SELECT * FROM test_table")) - print("\nQuery results:") - for row in result: - print(f"id: {row[0]}, name: {row[1]}, value: {row[2]}") - - -if __name__ == "__main__": - basic_example() diff --git a/modules/influxdb/example_basic.py b/modules/influxdb/example_basic.py index 580c4aa53..94154b034 100644 --- a/modules/influxdb/example_basic.py +++ b/modules/influxdb/example_basic.py @@ -44,92 +44,63 @@ def basic_example(): query_api = client.query_api() # Query data - query = f''' - from(bucket: "{bucket}") - |> range(start: -1h) - |> filter(fn: (r) => r["_measurement"] == "test_measurement") - ''' + query = f'from(bucket: "{bucket}") |> range(start: -1h) |> filter(fn: (r) => r["_measurement"] == "test_measurement")' result = query_api.query(query) print("\nQuery results:") for table in result: for record in table.records: - print( - json.dumps( - { - "measurement": record.get_measurement(), - "time": record.get_time().isoformat(), - "location": record.values.get("location"), - "device": record.values.get("device"), - "field": record.get_field(), - "value": record.get_value(), - }, - indent=2, - ) - ) + record_data = { + "measurement": record.get_measurement(), + "time": record.get_time().isoformat(), + "location": record.values.get("location"), + "device": record.values.get("device"), + "field": record.get_field(), + "value": record.get_value(), + } + print(json.dumps(record_data, indent=2)) # Create aggregation query - agg_query = f''' - from(bucket: "{bucket}") - |> range(start: -1h) - |> filter(fn: (r) => r["_measurement"] == "test_measurement") - |> group(columns: ["location"]) - |> mean() - ''' + agg_query = f'from(bucket: "{bucket}") |> range(start: -1h) |> filter(fn: (r) => r["_measurement"] == "test_measurement") |> group(columns: ["location"]) |> mean()' agg_result = query_api.query(agg_query) print("\nAggregation results:") for table in agg_result: for record in table.records: - print( - json.dumps( - { - "location": record.values.get("location"), - "field": record.get_field(), - "mean": record.get_value(), - }, - indent=2, - ) - ) + record_data = { + "location": record.values.get("location"), + "field": record.get_field(), + "mean": record.get_value(), + } + print(json.dumps(record_data, indent=2)) # Create window query - window_query = f''' - from(bucket: "{bucket}") - |> range(start: -1h) - |> filter(fn: (r) => r["_measurement"] == "test_measurement") - |> window(every: 5m) - |> mean() - ''' + window_query = f'from(bucket: "{bucket}") |> range(start: -1h) |> filter(fn: (r) => r["_measurement"] == "test_measurement") |> window(every: 5m) |> mean()' window_result = query_api.query(window_query) print("\nWindow results:") for table in window_result: for record in table.records: - print( - json.dumps( - { - "window_start": record.get_start().isoformat(), - "window_stop": record.get_stop().isoformat(), - "field": record.get_field(), - "mean": record.get_value(), - }, - indent=2, - ) - ) + record_data = { + "window_start": record.get_start().isoformat(), + "window_stop": record.get_stop().isoformat(), + "field": record.get_field(), + "mean": record.get_value(), + } + print(json.dumps(record_data, indent=2)) # Create task - task_flux = f''' - option task = {{ - name: "test_task", - every: 1h - }} - - from(bucket: "{bucket}") - |> range(start: -1h) - |> filter(fn: (r) => r["_measurement"] == "test_measurement") - |> mean() - |> to(bucket: "{bucket}", measurement: "test_measurement_agg") - ''' + task_flux = ( + "option task = {\n" + ' name: "test_task",\n' + " every: 1h\n" + "}\n\n" + f'from(bucket: "{bucket}")\n' + " |> range(start: -1h)\n" + ' |> filter(fn: (r) => r["_measurement"] == "test_measurement")\n' + " |> mean()\n" + f' |> to(bucket: "{bucket}", measurement: "test_measurement_agg")' + ) tasks_api = client.tasks_api() task = tasks_api.create_task(name="test_task", flux=task_flux, org=org) @@ -138,12 +109,13 @@ def basic_example(): # Get task info task_info = tasks_api.find_task_by_id(task.id) print("\nTask info:") - print( - json.dumps( - {"id": task_info.id, "name": task_info.name, "status": task_info.status, "every": task_info.every}, - indent=2, - ) - ) + task_data = { + "id": task_info.id, + "name": task_info.name, + "status": task_info.status, + "every": task_info.every, + } + print(json.dumps(task_data, indent=2)) # Create dashboard dashboards_api = client.dashboards_api() @@ -159,11 +131,12 @@ def basic_example(): # Get dashboard info dashboard_info = dashboards_api.find_dashboard_by_id(dashboard.id) print("\nDashboard info:") - print( - json.dumps( - {"id": dashboard_info.id, "name": dashboard_info.name, "cells": len(dashboard_info.cells)}, indent=2 - ) - ) + dashboard_data = { + "id": dashboard_info.id, + "name": dashboard_info.name, + "cells": len(dashboard_info.cells), + } + print(json.dumps(dashboard_data, indent=2)) # Create bucket buckets_api = client.buckets_api() @@ -173,7 +146,12 @@ def basic_example(): # Get bucket info bucket_info = buckets_api.find_bucket_by_id(new_bucket.id) print("\nBucket info:") - print(json.dumps({"id": bucket_info.id, "name": bucket_info.name, "org_id": bucket_info.org_id}, indent=2)) + bucket_data = { + "id": bucket_info.id, + "name": bucket_info.name, + "org_id": bucket_info.org_id, + } + print(json.dumps(bucket_data, indent=2)) # Clean up tasks_api.delete_task(task.id) diff --git a/modules/mssql/example_basic.py b/modules/mssql/example_basic.py index bd06205c5..f42e541d1 100644 --- a/modules/mssql/example_basic.py +++ b/modules/mssql/example_basic.py @@ -1,4 +1,4 @@ -import pyodbc +import pymssql from testcontainers.mssql import MsSqlContainer @@ -12,11 +12,8 @@ def basic_example(): password = mssql.password database = mssql.database - # Create connection string - conn_str = f"DRIVER={{ODBC Driver 17 for SQL Server}};SERVER={host},{port};DATABASE={database};UID={username};PWD={password}" - # Connect to MSSQL - connection = pyodbc.connect(conn_str) + connection = pymssql.connect(server=host, port=port, user=username, password=password, database=database) print("Connected to MSSQL") # Create cursor @@ -40,7 +37,7 @@ def basic_example(): cursor.executemany( """ INSERT INTO test_table (name, value, category) - VALUES (?, ?, ?) + VALUES (%s, %s, %s) """, test_data, )