diff --git a/.gitignore b/.gitignore index 18837562c..9da9d0d32 100644 --- a/.gitignore +++ b/.gitignore @@ -72,3 +72,6 @@ venv .python-version .env .github-token + +# docs build +site/ diff --git a/Dockerfile.docs b/Dockerfile.docs new file mode 100644 index 000000000..10944a7d2 --- /dev/null +++ b/Dockerfile.docs @@ -0,0 +1,5 @@ +FROM python:3.11-slim + +RUN pip install poetry + +WORKDIR /docs diff --git a/Makefile b/Makefile index 9c820ffa5..855a9d9c3 100644 --- a/Makefile +++ b/Makefile @@ -68,3 +68,39 @@ clean-all: clean ## Remove all generated files and reset the local virtual envir .PHONY: help help: ## Display command usage @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +## -------------------------------------- + +DOCS_CONTAINER=mkdocs-container +DOCS_IMAGE=mkdocs-poetry +DOCS_DOCKERFILE := Dockerfile.docs + +.PHONY: clean-docs +clean-docs: + @echo "Destroying docs" + @if docker ps -a --format '{{.Names}}' | grep -q '^$(DOCS_CONTAINER)$$'; then \ + docker rm -f $(DOCS_CONTAINER); \ + fi + @if docker images -q $(DOCS_IMAGE) | grep -q .; then \ + docker rmi $(DOCS_IMAGE); \ + fi + +.PHONY: docs-ensure-image +docs-ensure-image: + @if [ -z "$$(docker images -q $(DOCS_IMAGE))" ]; then \ + docker build -f $(DOCS_DOCKERFILE) -t $(DOCS_IMAGE) . ; \ + fi + +.PHONY: serve-docs +serve-docs: docs-ensure-image + docker run --rm --name $(DOCS_CONTAINER) -it -p 8000:8000 \ + -v $(PWD):/testcontainers-go \ + -w /testcontainers-go \ + $(DOCS_IMAGE) bash -c "\ + cd docs && poetry install --no-root && \ + poetry run mkdocs serve -f ../mkdocs.yml -a 0.0.0.0:8000" + +# Needed if dependencies are added to the docs site +.PHONY: export-docs-deps +export-docs-deps: + cd docs && poetry export --without-hashes --output requirements.txt diff --git a/core/tests/test_core_registry.py b/core/tests/test_core_registry.py index 384b06693..36e4730f9 100644 --- a/core/tests/test_core_registry.py +++ b/core/tests/test_core_registry.py @@ -18,8 +18,13 @@ from testcontainers.core.waiting_utils import wait_container_is_ready from testcontainers.registry import DockerRegistryContainer +from testcontainers.core.utils import is_mac +@pytest.mark.skipif( + is_mac(), + reason="Docker Desktop on macOS does not support insecure private registries without daemon reconfiguration", +) def test_missing_on_private_registry(monkeypatch): username = "user" password = "pass" @@ -41,6 +46,10 @@ def test_missing_on_private_registry(monkeypatch): wait_container_is_ready(test_container) +@pytest.mark.skipif( + is_mac(), + reason="Docker Desktop on macOS does not support local insecure registries over HTTP without modifying daemon settings", +) @pytest.mark.parametrize( "image,tag,username,password", [ diff --git a/core/tests/test_docker_in_docker.py b/core/tests/test_docker_in_docker.py index b07f80e9a..02b8e1fc4 100644 --- a/core/tests/test_docker_in_docker.py +++ b/core/tests/test_docker_in_docker.py @@ -15,6 +15,7 @@ from testcontainers.core.container import DockerContainer from testcontainers.core.docker_client import DockerClient, LOGGER from testcontainers.core.utils import inside_container +from testcontainers.core.utils import is_mac from testcontainers.core.waiting_utils import wait_for_logs @@ -36,6 +37,7 @@ def _wait_for_dind_return_ip(client, dind): return docker_host_ip +@pytest.mark.skipif(is_mac(), reason="Docker socket forwarding (socat) is unsupported on Docker Desktop for macOS") def test_wait_for_logs_docker_in_docker(): # real dind isn't possible (AFAIK) in CI # forwarding the socket to a container port is at least somewhat the same @@ -64,6 +66,9 @@ def test_wait_for_logs_docker_in_docker(): not_really_dind.remove() +@pytest.mark.skipif( + is_mac(), reason="Bridge networking and Docker socket forwarding are not supported on Docker Desktop for macOS" +) def test_dind_inherits_network(): client = DockerClient() try: @@ -158,6 +163,9 @@ def test_find_host_network_in_dood() -> None: assert DockerClient().find_host_network() == os.environ[EXPECTED_NETWORK_VAR] +@pytest.mark.skipif( + is_mac(), reason="Docker socket mounting and container networking do not work reliably on Docker Desktop for macOS" +) @pytest.mark.skipif(not Path(tcc.ryuk_docker_socket).exists(), reason="No docker socket available") def test_dood(python_testcontainer_image: str) -> None: """ diff --git a/core/tests/test_ryuk.py b/core/tests/test_ryuk.py index 5d6b208af..76556d4f4 100644 --- a/core/tests/test_ryuk.py +++ b/core/tests/test_ryuk.py @@ -8,9 +8,14 @@ from testcontainers.core.config import testcontainers_config from testcontainers.core.container import Reaper from testcontainers.core.container import DockerContainer +from testcontainers.core.utils import is_mac from testcontainers.core.waiting_utils import wait_for_logs +@pytest.mark.skipif( + is_mac(), + reason="Ryuk container reaping is unreliable on Docker Desktop for macOS due to VM-based container lifecycle handling", +) @pytest.mark.inside_docker_check def test_wait_for_reaper(monkeypatch: MonkeyPatch): Reaper.delete_instance() @@ -41,6 +46,9 @@ def test_wait_for_reaper(monkeypatch: MonkeyPatch): Reaper.delete_instance() +@pytest.mark.skipif( + is_mac(), reason="Ryuk disabling behavior is unreliable on Docker Desktop for macOS due to Docker socket emulation" +) @pytest.mark.inside_docker_check def test_container_without_ryuk(monkeypatch: MonkeyPatch): Reaper.delete_instance() diff --git a/docs/_headers b/docs/_headers new file mode 100644 index 000000000..e59f34a29 --- /dev/null +++ b/docs/_headers @@ -0,0 +1,2 @@ +/search/search_index.json + Access-Control-Allow-Origin: * diff --git a/docs/_redirects b/docs/_redirects new file mode 100644 index 000000000..e69de29bb diff --git a/docs/contributing.md b/docs/contributing.md new file mode 100644 index 000000000..2e5a27c2a --- /dev/null +++ b/docs/contributing.md @@ -0,0 +1,126 @@ +# Contributing to `testcontainers-python` + +Welcome to the `testcontainers-python` community! +This should give you an idea about how we build, test and release `testcontainers-python`! + +Highly recommended to read this document thoroughly to understand what we're working on right now +and what our priorities are before you are trying to contribute something. + +This will greatly increase your chances of getting prompt replies as the maintainers are volunteers themselves. + +## Before you begin + +We recommend following these steps: + +1. Finish reading this document. +2. Read the [recently updated issues](https://github.com/testcontainers/testcontainers-python/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc){:target="\_blank"} +3. Look for existing issues on the subject you are interested in - we do our best to label everything correctly + +## Local development + +### Pre-Requisites + +You need to have the following tools available to you: + +- `make` - You'll need a GNU Make for common developer activities +- `poetry` - This is the primary package manager for the project +- `pyenv` **Recommended**: For installing python versions for your system. + Poetry infers the current latest version from what it can find on the `PATH` so you are still fine if you don't use `pyenv`. + +### Build and test + +- Run `make install` to get `poetry` to install all dependencies and set up `pre-commit` + - **Recommended**: Run `make` or `make help` to see other commands available to you. +- After this, you should have a working virtual environment and proceed with writing code with your favorite IDE +- **TIP**: You can run `make core/tests` or `make modules//tests` to run the tests specifically for that to speed up feedback cycles +- You can also run `make lint` to run the `pre-commit` for the entire codebase. + +## Adding new modules + +We have an [issue template](https://github.com/testcontainers/testcontainers-python/blob/main/.github/ISSUE_TEMPLATE/new-container.md){:target="\_blank"} for adding new module containers, please refer to that for more information. +Once you've talked to the maintainers (we do our best to reply!) then you can proceed with contributing the new container. + +!!!WARNING + + Please raise an issue before you try to contribute a new container! It helps maintainersunderstand your use-case and motivation. + This way we can keep pull requests forced on the "how", not the "why"! :pray: + It also gives maintainers a chance to give you last-minute guidance on caveats orexpectations, particularly with + new extra dependencies and how to manage them. + +### Module documentation + +Leave examples for others with your mew module such as `modules//basic_example.py`. You can create as many examples as you want. + +Create a new `docs/modules/.md` describing the basic use of the new container. There is a [starter template provided here](https://raw.githubusercontent.com/testcontainers/testcontainers-python/blob/main/docs/modules/template.md){:target="\_blank"}. + +!!! important + + Make sure to add your new module to the sidebar nav in the `mkdocs.yml` + +## Raising issues + +We have [Issue Templates](https://raw.githubusercontent.com/testcontainers/testcontainers-python/refs/heads/main/.github/ISSUE_TEMPLATE/new-container.md){:target="\_blank"} to cover most cases, please try to adhere to them, they will guide you through the process. +Try to look through the existing issues before you raise a new one. + +## Releasing versions + +We have automated Semantic Versioning and release via [release-please](https://github.com/testcontainers/testcontainers-python/blob/main/.github/workflows/release-please.yml){:target="\_blank"}. +This takes care of: + +- Detecting the next version, based on the commits that landed on `main` +- When a Release PR has been merged + - Create a GitHub Release with the CHANGELOG included + - Update the [CHANGELOG](https://github.com/testcontainers/testcontainers-python/blob/main/CHANGELOG.md){:target="\_blank"}, similar to the GitHub Release + - Release to PyPI via a [trusted publisher](https://docs.pypi.org/trusted-publishers/using-a-publisher/){:target="\_blank"} + - Automatically script updates in files where it's needed instead of hand-crafting it (i.e. in `pyproject.toml`) + +!!!DANGER + + Community modules are supported on a best-effort basis and for maintenance reasons, any change to them + is only covered under minor and patch changes. + Community modules changes DO NOT contribute to major version changes! + If your community module container was broken by a minor or patch version change, check out the change logs! + +## Documentation contributions + +The _Testcontainers for Go_ documentation is a static site built with [MkDocs](https://www.mkdocs.org/){:target="\_blank"}. +We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/){:target="\_blank"} theme, which offers a number of useful extensions to MkDocs. + +We publish our documentation using Netlify. + +### Adding code snippets + +To include code snippets in the documentation, we use the [codeinclude plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin){:target="\_blank"}, which uses the following syntax: + +> <!--codeinclude-->
> [Human readable title for snippet](./relative_path_to_example_code.go) targeting_expression
> [Human readable title for snippet](./relative_path_to_example_code.go) targeting_expression
> <!--/codeinclude-->
+ +Where each title snippet in the same `codeinclude` block would represent a new tab +in the snippet, and each `targeting_expression` would be: + +- `block:someString` or +- `inside_block:someString` + +Please refer to the [codeinclude plugin documentation](https://github.com/rnorth/mkdocs-codeinclude-plugin){:target="\_blank"} for more information. + +### Previewing rendered content + +From the root directory of the repository, you can use the following command to build and serve the documentation locally: + +```shell +make serve-docs +``` + +It will use a Docker container to install the required dependencies and start a local server at `http://localhost:8000`. + +Once finished, you can destroy the container with the following command: + +```shell +make clean-docs +``` + +### PR preview deployments + +Note that documentation for pull requests will automatically be published by Netlify as 'deploy previews'. +These deployment previews can be accessed via the `deploy/netlify` check that appears for each pull request. + +Please check the GitHub comment Netlify posts on the PR for the URL to the deployment preview. diff --git a/docs/css/extra.css b/docs/css/extra.css new file mode 100644 index 000000000..4c700dac4 --- /dev/null +++ b/docs/css/extra.css @@ -0,0 +1,136 @@ +h1, h2, h3, h4, h5, h6 { + font-family: 'Rubik', sans-serif; +} + +[data-md-color-scheme="testcontainers"] { + --md-primary-fg-color: #00bac2; + --md-accent-fg-color: #361E5B; + --md-typeset-a-color: #0C94AA; + --md-primary-fg-color--dark: #291A3F; + --md-default-fg-color--lightest: #F2F4FE; + --md-footer-fg-color: #361E5B; + --md-footer-fg-color--light: #746C8F; + --md-footer-fg-color--lighter: #C3BEDE; + --md-footer-bg-color: #F7F9FD; + --md-footer-bg-color--dark: #F7F9FD; +} + +.card-grid { + display: grid; + gap: 10px; +} + +.tc-version { + font-size: 1.1em; + text-align: center; + margin: 0; +} + +@media (min-width: 680px) { + .card-grid { + grid-template-columns: repeat(3, 1fr); + } +} + +body .card-grid-item { + display: flex; + align-items: center; + gap: 20px; + border: 1px solid #C3BEDE; + border-radius: 6px; + padding: 16px; + font-weight: 600; + color: #9991B5; + background: #F2F4FE; +} + +body .card-grid-item:hover, +body .card-grid-item:focus { + color: #9991B5; +} + +.card-grid-item[href] { + color: var(--md-primary-fg-color--dark); + background: transparent; +} + +.card-grid-item[href]:hover, +.card-grid-item[href]:focus { + background: #F2F4FE; + color: var(--md-primary-fg-color--dark); +} + +.community-callout-wrapper { + padding: 30px 10px 0 10px; +} + +.community-callout { + color: #F2F4FE; + background: linear-gradient(10.88deg, rgba(102, 56, 242, 0.4) 9.56%, #6638F2 100%), #291A3F; + box-shadow: 0px 20px 45px rgba(#9991B5, 0.75); + border-radius: 10px; + padding: 20px; +} + +.community-callout h2 { + font-size: 1.15em; + margin: 0 0 20px 0; + color: #F2F4FE; + text-align: center; +} + +.community-callout ul { + list-style: none; + padding: 0; + display: flex; + justify-content: space-between; + gap: 10px; + margin-top: 20px; + margin-bottom: 0; +} + +.community-callout a { + transition: opacity 0.2s ease; +} + +.community-callout a:hover { + opacity: 0.5; +} + +.community-callout a img { + height: 1.75em; + width: auto; + aspect-ratio: 1; +} + +@media (min-width: 1220px) { + .community-callout-wrapper { + padding: 40px 0 0; + } + + .community-callout h2 { + font-size: 1.25em; + } + + .community-callout a img { + height: 2em; + } +} + +@media (min-width: 1600px) { + .community-callout h2 { + font-size: 1.15em; + } + + .community-callout a img { + height: 1.75em; + } +} + +.md-typeset__table { + min-width: 100%; + } + + .md-typeset table:not([class]) { + display: table; +} diff --git a/docs/css/tc-header.css b/docs/css/tc-header.css new file mode 100644 index 000000000..de78d636e --- /dev/null +++ b/docs/css/tc-header.css @@ -0,0 +1,389 @@ + +:root { + --color-catskill: #F2F4FE; + --color-catskill-45: rgba(242, 244, 254, 0.45); + --color-mist: #E7EAFB; + --color-fog: #C3C7E6; + --color-smoke: #9991B5; + --color-smoke-75: rgba(153, 145, 181, 0.75); + --color-storm: #746C8F; + --color-topaz: #00BAC2; + --color-pacific: #17A6B2; + --color-teal: #027F9E; + --color-eggplant: #291A3F; + --color-plum: #361E5B; + +} + +#site-header { + color: var(--color-storm); + background: #fff; + font-family: 'Rubik', Arial, Helvetica, sans-serif; + font-size: 12px; + line-height: 1.5; + position: relative; + width: 100%; + z-index: 4; + display: flex; + align-items: center; + justify-content: space-between; + gap: 20px; + padding: 20px; +} + +body.tc-header-active #site-header { + z-index: 5; +} + +#site-header .brand { + display: flex; + justify-content: space-between; + gap: 20px; + width: 100%; +} + +#site-header .logo { + display: flex; +} + +#site-header .logo img, +#site-header .logo svg { + height: 30px; + width: auto; + max-width: 100%; +} + +#site-header #mobile-menu-toggle { + background: none; + border: none; + display: flex; + align-items: center; + gap: 10px; + cursor: pointer; + color: var(--color-eggplant); + padding: 0; + margin: 0; + font-weight: 500; +} + +body.mobile-menu #site-header #mobile-menu-toggle { + color: var(--color-topaz); +} + +#site-header ul { + list-style: none; + padding: 0; + margin: 0; +} + +#site-header nav { + display: none; +} + +#site-header .menu-item { + display: flex; +} + +#site-header .menu-item button, +#site-header .menu-item a { + min-height: 30px; + display: flex; + gap: 6px; + align-items: center; + border: none; + background: none; + cursor: pointer; + padding: 0; + font-weight: 500; + color: var(--color-eggplant); + text-decoration: none; + font-size: 14px; + transition: color 0.2s ease; + white-space: nowrap; +} + +#site-header .menu-item .badge { + color: white; + font-size: 10px; + padding: 2px 6px; + background-color: #0FD5C6; // somehow $topaz is too dark for me. +text-align: center; + text-decoration: none; + display: inline-block; + border-radius: 6px; + &:hover { + + } +} + +#site-header .menu-item button:hover, +#site-header .menu-item a:hover { + color: var(--color-topaz); +} + +#site-header .menu-item button .icon-external, +#site-header .menu-item a .icon-externa { + margin-left: auto; + opacity: .3; + flex-shrink: 0; +} + +#site-header .menu-item button .icon-caret, +#site-header .menu-item a .icon-caret { + opacity: .3; + height: 8px; +} + +#site-header .menu-item button .icon-slack, +#site-header .menu-item a .icon-slack, +#site-header .menu-item button .icon-github, +#site-header .menu-item a .icon-github { + height: 18px; +} + +#site-header .menu-item .menu-dropdown { + flex-direction: column; +} + +body #site-header .menu-item .menu-dropdown { + display: none; +} + +#site-header .menu-item.has-children.active .menu-dropdown { + display: flex; + z-index: 10; +} + +#site-header .menu-dropdown-item + .menu-dropdown-item { + border-top: 1px solid var(--color-mist); +} + +#site-header .menu-dropdown-item a { + display: flex; + gap: 10px; + align-items: center; + padding: 10px 20px; + font-weight: 500; + color: var(--color-eggplant); + text-decoration: none; + transition: + color 0.2s ease, + background 0.2s ease; +} + +#site-header .menu-dropdown-item a .icon-external { + margin-left: auto; + color: var(--color-fog); + flex-shrink: 0; + opacity: 1; +} + +#site-header .menu-dropdown-item a:hover { + background-color: var(--color-catskill-45); +} + +#site-header .menu-dropdown-item a:hover .icon-external { + color: var(--color-topaz); +} + +#site-header .menu-dropdown-item a img { + height: 24px; +} + +.md-header { + background-color: var(--color-catskill); + color: var(--color-eggplant); +} + +.md-header.md-header--shadow { + box-shadow: none; +} + +.md-header__inner.md-grid { + max-width: 100%; + padding: 1.5px 20px; +} + +[dir=ltr] .md-header__title { + margin: 0; +} + +.md-header__topic:first-child { + font-size: 16px; + font-weight: 500; + font-family: 'Rubik', Arial, Helvetica, sans-serif; +} + +.md-header__title.md-header__title--active .md-header__topic, +.md-header__title[data-md-state=active] .md-header__topic { + opacity: 1; + pointer-events: all; + transform: translateX(0); + transition: none; + z-index: 0; +} + +.md-header__topic a { + max-width: 100%; + overflow: hidden; + text-overflow: ellipsis; + transition: color .2s ease; +} + +.md-header__topic a:hover { + color: var(--color-topaz); +} + +div.md-header__source { + width: auto; +} + +div.md-source__repository { + max-width: 100%; +} + +.md-main { + padding: 0 12px; +} + +@media screen and (min-width: 60em) { + form.md-search__form { + background-color: #FBFBFF; + color: var(--color-storm); + } + + form.md-search__form:hover { + background-color: #fff; + } + + .md-search__input + .md-search__icon { + color: var(--color-plum); + } + + .md-search__input::placeholder { + color: var(--color-smoke); + } +} + +@media (min-width: 500px) { + #site-header { + font-size: 16px; + padding: 20px 40px; + } + #site-header .logo img, + #site-header .logo svg { + height: 48px; + } + + #site-header .menu-item button .icon-caret, + #site-header .menu-item a .icon-caret { + height: 10px; + } + + #site-header .menu-item button .icon-slack, + #site-header .menu-item a .icon-slack, + #site-header .menu-item button .icon-github, + #site-header .menu-item a .icon-github { + height: 24px; + } + + .md-header__inner.md-grid { + padding: 5px 40px; + } + + .md-main { + padding: 0 32px; + } +} + +@media (min-width: 1024px) { + #site-header #mobile-menu-toggle { + display: none; + } + + #site-header nav { + display: block; + } + + #site-header .menu { + display: flex; + justify-content: center; + gap: 30px; + } + + #site-header .menu-item { + align-items: center; + position: relative; + } + + #site-header .menu-item button, + #site-header .menu-item a { + min-height: 48px; + gap: 8px; + font-size: 16px; + } + + #site-header .menu-item .menu-dropdown { + position: absolute; + top: 100%; + right: -8px; + border: 1px solid var(--color-mist); + border-radius: 6px; + background: #fff; + box-shadow: 0px 30px 35px var(--color-smoke-75); + min-width: 200px; + } +} + + +@media (max-width: 1023px) { + #site-header { + flex-direction: column; + } + + body.mobile-tc-header-active #site-header { + z-index: 5; + } + + body.mobile-menu #site-header nav { + display: flex; + } + + #site-header nav { + position: absolute; + top: calc(100% - 5px); + width: calc(100% - 80px); + flex-direction: column; + border: 1px solid var(--color-mist); + border-radius: 6px; + background: #fff; + box-shadow: 0px 30px 35px var(--color-smoke-75); + min-width: 200px; + } + + #site-header .menu-item { + flex-direction: column; + } + #site-header .menu-item + .menu-item { + border-top: 1px solid var(--color-mist); + } + + #site-header .menu-item button, + #site-header .menu-item a { + padding: 10px 20px; + } + + #site-header .menu-item.has-children.active .menu-dropdown { + border-top: 1px solid var(--color-mist); + } + + #site-header .menu-dropdown-item a { + padding: 10px 20px 10px 30px; + } +} + +@media (max-width: 499px) { + #site-header nav { + width: calc(100% - 40px); + } +} diff --git a/docs/favicon.ico b/docs/favicon.ico new file mode 100644 index 000000000..311a0acaa Binary files /dev/null and b/docs/favicon.ico differ diff --git a/docs/features/advanced_features.md b/docs/features/advanced_features.md new file mode 100644 index 000000000..e1f1ee2fc --- /dev/null +++ b/docs/features/advanced_features.md @@ -0,0 +1,424 @@ +# Advanced Features and Best Practices + +This document covers advanced features and best practices for using testcontainers-python in complex scenarios. + +## Docker-in-Docker (DinD) Support + +Testcontainers-python provides robust support for running tests inside Docker containers, enabling true isolation and reproducibility of test environments. This feature is particularly valuable for CI/CD pipelines, integration testing, and scenarios requiring consistent, isolated environments. + +### Use Cases + +- **CI/CD Pipelines**: Run tests in isolated containers within your CI environment +- **Integration Testing**: Test interactions between multiple services in a controlled environment +- **Environment Consistency**: Ensure tests run in the same environment across different machines +- **Resource Isolation**: Prevent test interference and resource conflicts + +### Connection Modes + +Testcontainers-python supports three connection modes for container networking: + +- **`bridge_ip`**: Use this mode when containers need to communicate over a bridge network. This is the default mode and provides isolated network communication between containers. +- **`gateway_ip`**: Use this mode when containers need to access the host network. This is useful when containers need to communicate with services running on the host machine. +- **`docker_host`**: Use this mode for local development. This mode uses the host's Docker socket directly, which is more efficient but provides less isolation. + +### Network Configuration + +Here's how to set up container networking: + +```python +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +# Create an isolated network +network = Network() + +# Create containers on the network +container1 = DockerContainer("nginx:alpine") +container1.with_network(network) +container1.with_network_aliases(["web"]) + +container2 = DockerContainer("redis:alpine") +container2.with_network(network) +container2.with_network_aliases(["cache"]) +``` + +### Volume Mounting + +Mount host directories into containers for data persistence or configuration: + +```python +container = DockerContainer("nginx:alpine") +container.with_volume_mapping("/host/path", "/container/path", "ro") # Read-only mount +container.with_volume_mapping("/host/data", "/container/data", "rw") # Read-write mount +``` + +### Best Practices + +When working with Docker-in-Docker, it's crucial to follow a comprehensive set of best practices to ensure optimal performance, security, and maintainability. Start by carefully managing your resources: set appropriate memory and CPU limits for your containers, actively monitor their resource usage, and ensure proper cleanup after tests complete. This helps prevent resource exhaustion and maintains system stability. + +Security should be a top priority in your DinD implementation. Always use read-only volume mounts when possible to prevent unauthorized modifications, avoid running containers with privileged access unless absolutely necessary, and implement proper network isolation to prevent unauthorized access between containers. These measures help maintain a secure testing environment. + +For optimal performance, focus on using appropriate base images. Alpine-based images are often a good choice due to their small footprint, but consider your specific needs. Implement proper health checks to ensure containers are truly ready before proceeding with tests, and consider using container caching strategies to speed up test execution. When dealing with complex setups, consider using Docker Compose to manage multiple containers and their interactions. + +## ARM64 Support + +Testcontainers-python provides comprehensive support for ARM64 architecture through automatic emulation, making it seamless to run tests on ARM-based systems like Apple Silicon (M1/M2) Macs and ARM-based cloud instances. + +### Using ARM64 Support + +```python +from testcontainers.core.container import DockerContainer + +# Basic usage with automatic emulation +container = DockerContainer("nginx:alpine") +container.maybe_emulate_amd64() # Automatically handles ARM64 emulation + +# Advanced configuration with resource limits +container = DockerContainer("nginx:alpine") +container.maybe_emulate_amd64() +container.with_memory_limit("512m") +container.with_cpu_limit(0.5) # Use 50% of available CPU +``` + +### Performance Considerations + +1. **Emulation Overhead**: + - Expect 20-30% performance impact when running x86_64 containers on ARM + - Use ARM-native images when available for better performance + - Consider using multi-architecture images (e.g., `nginx:alpine`) + +2. **Resource Management**: + - Monitor memory usage during emulation + - Adjust CPU limits based on your workload + - Use appropriate base images to minimize emulation overhead + +### Best Practices + +When working with ARM64 architecture, a thoughtful approach to image selection and resource management is essential. Prioritize using multi-architecture images when available, as they provide the best compatibility across different platforms. For optimal performance, use minimal base images to reduce emulation overhead, and thoroughly test your setup with different image variants to find the best balance between size and functionality. + +In your development workflow, ensure you test your applications on both ARM and x86_64 environments to catch any architecture-specific issues early. When setting up CI/CD pipelines, make sure they support ARM64 architecture and document any architecture-specific considerations in your project documentation. This helps maintain consistency across different development environments and deployment targets. + +## TCP Forwarding with Socat + +The `SocatContainer` provides powerful TCP forwarding capabilities, enabling complex networking scenarios and service communication patterns. + +### Using Socat Container + +```python +from testcontainers.socat import SocatContainer + +# Basic TCP forwarding +socat = SocatContainer() +socat.with_target(8080, "host.docker.internal", 80) +socat.start() + +# Multiple port forwarding +socat = SocatContainer() +socat.with_target(8080, "host.docker.internal", 80) +socat.with_target(5432, "postgres", 5432) # Forward to another container +socat.start() + +# UDP forwarding +socat = SocatContainer() +socat.with_target(53, "8.8.8.8", 53, protocol="udp") +socat.start() +``` + +### Advanced Configuration + +```python +# Custom Socat options +socat = SocatContainer() +socat.with_option("-d") # Enable debug output +socat.with_option("-v") # Verbose mode +socat.with_target(8080, "host.docker.internal", 80) +socat.start() +``` + +### Best Practices + +When working with Socat, security should be your primary concern. Only forward the ports that are absolutely necessary for your application to function, and implement appropriate access controls to prevent unauthorized access. For sensitive traffic, consider using TLS to encrypt the forwarded connections. Regularly monitor your forwarded connections to detect any suspicious activity or performance issues. + +Performance optimization is crucial for maintaining a responsive system. Monitor connection latency to identify potential bottlenecks, and adjust buffer sizes based on your specific use case. For high-load scenarios, consider implementing connection pooling to manage resources efficiently. Regular maintenance is also important: document your forwarding rules clearly, implement proper cleanup procedures, and monitor connection health to ensure reliable operation. + +## Environment Variables and Configuration + +Testcontainers-python offers flexible configuration options through environment variables, configuration files, and properties. + +### Using Environment Variables + +**Direct Environment Variables**: + +```python +container = DockerContainer("nginx:alpine") +container.with_env("NGINX_HOST", "example.com") +container.with_env("NGINX_PORT", "8080") +container.with_env("DEBUG", "true") +``` + +**Environment Files**: + +```python +# .env file +NGINX_HOST=example.com +NGINX_PORT=8080 +DEBUG=true + +# Python code +container = DockerContainer("nginx:alpine") +container.with_env_file(".env") +``` + +**Configuration Properties**: + +```properties +# .testcontainers.properties +ryuk.container.privileged=true +ryuk.reconnection.timeout=10s +docker.client.strategy=org.testcontainers.dockerclient.UnixSocketClientProviderStrategy +``` + +### Best Practices + +Configuration management in testcontainers-python requires a careful balance between flexibility and security. Never commit sensitive data to version control; instead, use environment variables for secrets and consider implementing a secrets manager for more complex scenarios. When dealing with configuration files, ensure they are well-documented and include validation to catch errors early. + +In your development workflow, provide example configuration files to help new team members get started quickly. Document all required environment variables and their purposes, and implement configuration testing to catch issues before they reach production. Use configuration templates to maintain consistency across different environments while allowing for environment-specific customization. + +## Container Health Checks + +Testcontainers-python provides robust health checking mechanisms to ensure containers are ready for testing. + +### Custom Health Checks + +```python +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_container_is_ready +import requests + +class WebContainer(DockerContainer): + @wait_container_is_ready() + def _connect(self): + response = requests.get(f"http://{self.get_container_host_ip()}:{self.get_exposed_port(80)}") + return response.status_code == 200 + +class DatabaseContainer(DockerContainer): + @wait_container_is_ready() + def _connect(self): + # Implement database connection check + pass +``` + +### Health Check Strategies + +1. **HTTP Health Checks**: + - Check HTTP endpoints + - Verify response status codes + - Validate response content + +2. **TCP Health Checks**: + - Verify port availability + - Check connection establishment + - Monitor connection stability + +3. **Application-Specific Checks**: + - Verify service readiness + - Check data consistency + - Validate business logic + +### Best Practices + +Health checks are a critical component of reliable containerized applications. When implementing health checks, use appropriate timeouts and implement retry mechanisms to handle temporary issues gracefully. Log health check failures with sufficient detail to aid in debugging, and consider using multiple check strategies to ensure comprehensive coverage of your application's health. + +Monitoring is essential for maintaining system health. Track health check metrics to identify patterns and potential issues, implement proper logging to capture relevant information, and set up alerts for failures to enable quick response to problems. Regular maintenance is also important: review your health checks periodically, update check criteria as your application evolves, and test check reliability to ensure they continue to provide accurate information. + +## Error Handling and Debugging + +### Common Issues and Solutions + +**Container Startup Failures**: + +```python +try: + container = DockerContainer("nginx:alpine") + container.start() +except Exception as e: + print(f"Container startup failed: {e}") + print(f"Container logs: {container.get_logs()}") + raise +``` + +**Network Issues**: + +```python +# Debug network configuration +container_id = container.get_wrapped_container().id +network_info = container.get_docker_client().inspect_network(network_name) +print(f"Network configuration: {network_info}") + +# Check container connectivity +host_ip = container.get_container_host_ip() +print(f"Container host IP: {host_ip}") +``` + +**Resource Cleanup**: + +```python +from contextlib import contextmanager + +@contextmanager +def managed_container(): + container = DockerContainer("nginx:alpine") + try: + container.start() + yield container + finally: + container.stop() + container.remove() +``` + +### Debugging Tools + +**Container Logs**: + +```python +# Get all logs +stdout, stderr = container.get_logs() + +# Get recent logs +stdout, stderr = container.get_logs(since="2024-01-01T00:00:00Z") + +# Follow logs +for line in container.get_logs(stream=True): + print(line) +``` + +**Container Information**: + +```python +# Get container details +container_id = container.get_wrapped_container().id +container_info = container.get_docker_client().inspect_container(container_id) + +# Get resource usage +stats = container.get_docker_client().stats(container_id) +``` + +**Network Information**: + +```python +# Get network details +network_name = container.get_docker_client().network_name(container_id) +network_info = container.get_docker_client().inspect_network(network_name) + +# List connected containers +connected_containers = container.get_docker_client().list_containers( + filters={"network": network_name} +) +``` + +### Best Practices + +Error handling and debugging in containerized environments require a systematic approach. Start by implementing proper validation and using appropriate timeouts to prevent common issues. Set up monitoring to catch problems early, and document known issues and their solutions to help team members resolve similar problems quickly. + +When debugging issues, collect relevant logs and analyze error patterns to identify root causes. Use appropriate tools for different types of problems, and document your solutions to build a knowledge base for future reference. Regular maintenance is crucial: perform regular system checks, keep documentation up to date, monitor error rates, and implement improvements based on your findings. + +## Performance Optimization + +Optimizing the performance of your testcontainers-python setup is crucial for maintaining efficient test execution and resource utilization. This section covers key strategies and best practices for achieving optimal performance. + +### Image Selection and Management + +The choice of base images significantly impacts your container's performance and resource usage. When selecting images, consider the following: + +```python +# Using minimal base images +container = DockerContainer("nginx:alpine") # ~7MB +container = DockerContainer("python:3.9-slim") # ~125MB +container = DockerContainer("python:3.9") # ~900MB + +# Using multi-stage builds for custom images +from testcontainers.core.container import DockerContainer +from testcontainers.core.docker_client import DockerClient + +client = DockerClient() +client.build_image( + path=".", + tag="my-optimized-app:latest", + dockerfile=""" + FROM python:3.9-slim as builder + WORKDIR /app + COPY requirements.txt . + RUN pip install --no-cache-dir -r requirements.txt + + FROM python:3.9-slim + WORKDIR /app + COPY --from=builder /usr/local/lib/python3.9/site-packages /usr/local/lib/python3.9/site-packages + COPY . . + """ +) +``` + +### Resource Management + +Proper resource allocation is essential for maintaining system stability and performance. Here's how to manage resources effectively: + +```python +# Setting resource limits +container = DockerContainer("nginx:alpine") +container.with_memory_limit("512m") # Limit memory usage +container.with_cpu_limit(0.5) # Use 50% of available CPU +container.with_shm_size("256m") # Set shared memory size + +# Monitoring resource usage +stats = container.get_docker_client().stats(container.get_wrapped_container().id) +print(f"CPU Usage: {stats['cpu_stats']['cpu_usage']['total_usage']}") +print(f"Memory Usage: {stats['memory_stats']['usage']}") +``` + +### Parallel Execution + +Running tests in parallel can significantly reduce overall execution time. Here's how to implement parallel execution: + +```python +import concurrent.futures +from testcontainers.core.container import DockerContainer + +def run_test(container_config): + with DockerContainer(**container_config) as container: + # Run your test + pass + +# Run multiple containers in parallel +container_configs = [ + {"image": "nginx:alpine", "ports": {"80": 8080}}, + {"image": "redis:alpine", "ports": {"6379": 6379}}, + {"image": "postgres:alpine", "ports": {"5432": 5432}} +] + +with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: + futures = [executor.submit(run_test, config) for config in container_configs] + concurrent.futures.wait(futures) +``` + +### Caching Strategies + +Implementing effective caching strategies can significantly improve test execution time: + +```python +# Using Docker layer caching +container = DockerContainer("python:3.9-slim") +container.with_volume_mapping( + "${HOME}/.cache/pip", # Host pip cache + "/root/.cache/pip", # Container pip cache + "rw" +) + +# Using build cache +client = DockerClient() +client.build_image( + path=".", + tag="my-app:latest", + dockerfile="Dockerfile", + buildargs={"BUILDKIT_INLINE_CACHE": "1"} +) +``` diff --git a/docs/features/authentication.md b/docs/features/authentication.md new file mode 100644 index 000000000..138fd6816 --- /dev/null +++ b/docs/features/authentication.md @@ -0,0 +1,109 @@ +# Docker Authentication + +Testcontainers-Python supports various methods of authenticating with Docker registries. This is essential when working with private registries or when you need to pull images that require authentication. + +## Basic Authentication + +The simplest way to authenticate is using Docker's built-in credential store. Testcontainers-Python will automatically use credentials stored by Docker: + +```python +from testcontainers.generic import GenericContainer + +# Docker will automatically use stored credentials +container = GenericContainer("private.registry.com/myimage:latest") +``` + +## Environment Variables + +You can provide registry credentials using environment variables: + +```bash +# Set registry credentials +export DOCKER_USERNAME=myuser +export DOCKER_PASSWORD=mypassword +export DOCKER_REGISTRY=private.registry.com +``` + +## Configuration File + +You can also configure authentication in the `.testcontainers.properties` file: + +```properties +registry.username=myuser +registry.password=mypassword +registry.url=private.registry.com +``` + +## Programmatic Authentication + +For more control, you can provide credentials programmatically: + +```python +from testcontainers.core.config import TestcontainersConfiguration + +# Configure registry credentials +config = TestcontainersConfiguration() +config.registry_username = "myuser" +config.registry_password = "mypassword" +config.registry_url = "private.registry.com" + +# Use the configuration +container = GenericContainer("private.registry.com/myimage:latest") +``` + +## AWS ECR Authentication + +For Amazon Elastic Container Registry (ECR), Testcontainers-Python supports automatic authentication: + +```python +from testcontainers.generic import GenericContainer + +# ECR authentication is handled automatically +container = GenericContainer("123456789012.dkr.ecr.region.amazonaws.com/myimage:latest") +``` + +## Google Container Registry (GCR) + +For Google Container Registry, you can use Google Cloud credentials: + +```python +from testcontainers.generic import GenericContainer + +# GCR authentication using Google Cloud credentials +container = GenericContainer("gcr.io/myproject/myimage:latest") +``` + +## Azure Container Registry (ACR) + +For Azure Container Registry, you can use Azure credentials: + +```python +from testcontainers.generic import GenericContainer + +# ACR authentication using Azure credentials +container = GenericContainer("myregistry.azurecr.io/myimage:latest") +``` + +## Best Practices + +1. Never commit credentials to version control +2. Use environment variables or secure credential stores +3. Rotate credentials regularly +4. Use the least privileged credentials necessary +5. Consider using Docker credential helpers +6. Use registry-specific authentication when available +7. Keep credentials secure and encrypted +8. Use separate credentials for different environments + +## Troubleshooting + +If you encounter authentication issues: + +1. Verify your credentials are correct +2. Check if the registry is accessible +3. Ensure your Docker daemon is running +4. Check Docker's credential store +5. Verify network connectivity +6. Check for any proxy settings +7. Look for any rate limiting +8. Check registry-specific requirements diff --git a/docs/features/building_images.md b/docs/features/building_images.md new file mode 100644 index 000000000..d59e14a2b --- /dev/null +++ b/docs/features/building_images.md @@ -0,0 +1,190 @@ +# Building Images from Dockerfiles + +Testcontainers-Python allows you to build Docker images from Dockerfiles during test execution. This is useful when you need to test custom images or when you want to ensure your Dockerfile builds correctly. + +## Basic Image Building + +The simplest way to build an image is using the `build_image` function: + +```python +from testcontainers.core.container import build_image + +# Build an image from a Dockerfile +image = build_image( + path="path/to/dockerfile/directory", + tag="myapp:test" +) + +# Use the built image +with GenericContainer(image) as container: + # Your test code here + pass +``` + +## Building with Options + +You can customize the build process with various options: + +```python +# Build with specific Dockerfile +image = build_image( + path="path/to/dockerfile/directory", + dockerfile="Dockerfile.test", + tag="myapp:test" +) + +# Build with build arguments +image = build_image( + path="path/to/dockerfile/directory", + buildargs={ + "VERSION": "1.0.0", + "ENVIRONMENT": "test" + }, + tag="myapp:test" +) + +# Build with target stage +image = build_image( + path="path/to/dockerfile/directory", + target="test", + tag="myapp:test" +) +``` + +## Building with Context + +You can specify a build context: + +```python +# Build with specific context +image = build_image( + path="path/to/dockerfile/directory", + context="path/to/build/context", + tag="myapp:test" +) +``` + +## Building with Cache + +You can control build caching: + +```python +# Build without cache +image = build_image( + path="path/to/dockerfile/directory", + nocache=True, + tag="myapp:test" +) + +# Build with specific cache from +image = build_image( + path="path/to/dockerfile/directory", + cache_from=["myapp:latest"], + tag="myapp:test" +) +``` + +## Building with Platform + +You can specify the target platform: + +```python +# Build for specific platform +image = build_image( + path="path/to/dockerfile/directory", + platform="linux/amd64", + tag="myapp:test" +) +``` + +## Building with Labels + +You can add labels to the built image: + +```python +# Build with labels +image = build_image( + path="path/to/dockerfile/directory", + labels={ + "test": "true", + "environment": "test" + }, + tag="myapp:test" +) +``` + +## Best Practices + +1. Use appropriate tags +2. Clean up built images +3. Use build arguments for configuration +4. Consider build context size +5. Use appropriate build caching +6. Handle build failures +7. Use appropriate platforms +8. Add meaningful labels + +## Common Use Cases + +### Building Test Images + +```python +def test_custom_image(): + # Build test image + image = build_image( + path="path/to/dockerfile/directory", + buildargs={"TEST_MODE": "true"}, + tag="myapp:test" + ) + + # Use the test image + with GenericContainer(image) as container: + # Your test code here + pass +``` + +### Building with Dependencies + +```python +def test_with_dependencies(): + # Build base image + base_image = build_image( + path="path/to/base/dockerfile/directory", + tag="myapp:base" + ) + + # Build test image using base + test_image = build_image( + path="path/to/test/dockerfile/directory", + cache_from=[base_image], + tag="myapp:test" + ) +``` + +### Building for Different Environments + +```python +def test_different_environments(): + # Build for different environments + environments = ["dev", "test", "staging"] + + for env in environments: + image = build_image( + path="path/to/dockerfile/directory", + buildargs={"ENVIRONMENT": env}, + tag=f"myapp:{env}" + ) +``` + +## Troubleshooting + +If you encounter issues with image building: + +1. Check Dockerfile syntax +2. Verify build context +3. Check for missing files +4. Verify build arguments +5. Check for platform compatibility +6. Verify cache settings +7. Check for resource limits +8. Verify Docker daemon state diff --git a/docs/features/configuration.md b/docs/features/configuration.md new file mode 100644 index 000000000..9b3e95681 --- /dev/null +++ b/docs/features/configuration.md @@ -0,0 +1,160 @@ +# Custom Configuration + +You can override some default properties if your environment requires it. + +## Configuration Locations + +The configuration will be loaded from multiple locations. Properties are considered in the following order: + +1. Environment variables +2. `.testcontainers.properties` in the user's home folder. Example locations: + **Linux:** `/home/myuser/.testcontainers.properties` + **Windows:** `C:/Users/myuser/.testcontainers.properties` + **macOS:** `/Users/myuser/.testcontainers.properties` + +Note that when using environment variables, configuration property names should be set in uppercase with underscore separators, preceded by `TESTCONTAINERS_` - e.g. `ryuk.disabled` becomes `TESTCONTAINERS_RYUK_DISABLED`. + +### Supported Properties + +Testcontainers-Python provides a configuration class to represent the settings: + +```python +from testcontainers.core.config import TestcontainersConfiguration + +# Default configuration +config = TestcontainersConfiguration() + +# Access configuration values +max_tries = config.max_tries +sleep_time = config.sleep_time +ryuk_image = config.ryuk_image +ryuk_privileged = config.ryuk_privileged +ryuk_disabled = config.ryuk_disabled +ryuk_docker_socket = config.ryuk_docker_socket +ryuk_reconnection_timeout = config.ryuk_reconnection_timeout +tc_host_override = config.tc_host_override +``` + +The following properties are supported: + +| Property | Environment Variable | Description | Default | +| --------------------------- | ------------------------------------------- | ---------------------------------------------------- | ------------------------- | +| `tc.host` | `TC_HOST` or `TESTCONTAINERS_HOST_OVERRIDE` | Testcontainers host address | - | +| `docker.host` | `DOCKER_HOST` | Address of the Docker daemon | - | +| `docker.tls.verify` | `DOCKER_TLS_VERIFY` | Enable/disable TLS verification | 0 | +| `docker.cert.path` | `DOCKER_CERT_PATH` | Path to Docker certificates | - | +| `ryuk.disabled` | `TESTCONTAINERS_RYUK_DISABLED` | Disable the Garbage Collector | false | +| `ryuk.container.privileged` | `TESTCONTAINERS_RYUK_PRIVILEGED` | Run Ryuk in privileged mode | false | +| `ryuk.reconnection.timeout` | `RYUK_RECONNECTION_TIMEOUT` | Time to wait before reconnecting | 10s | +| `ryuk.image` | `RYUK_CONTAINER_IMAGE` | Ryuk container image | testcontainers/ryuk:0.8.1 | +| `connection.mode` | `TESTCONTAINERS_CONNECTION_MODE` | Connection mode (bridge_ip, gateway_ip, docker_host) | - | + +Additional configuration options: + +| Environment Variable | Description | Default | +| --------------------- | ------------------------------------------- | ------- | +| `TC_MAX_TRIES` | Maximum number of connection attempts | 120 | +| `TC_POOLING_INTERVAL` | Time between connection attempts | 1 | +| `DOCKER_AUTH_CONFIG` | Docker authentication config (experimental) | - | + +## Docker Host Detection + +Testcontainers-Python will attempt to detect the Docker environment and configure everything to work automatically. + +However, sometimes customization is required. Testcontainers-Python will respect the following order: + +1. Read the **tc.host** property in the `~/.testcontainers.properties` file. E.g. `tc.host=tcp://my.docker.host:1234` + +2. Read the **TC_HOST** or **TESTCONTAINERS_HOST_OVERRIDE** environment variable. E.g. `TC_HOST=tcp://my.docker.host:1234` + +3. Read the **DOCKER_HOST** environment variable. E.g. `DOCKER_HOST=unix:///var/run/docker.sock` + See [Docker environment variables](https://docs.docker.com/engine/reference/commandline/cli/#environment-variables) for more information. + +4. Read the default Docker socket path, without the unix schema. E.g. `/var/run/docker.sock` + +5. Read the **docker.host** property in the `~/.testcontainers.properties` file. E.g. `docker.host=tcp://my.docker.host:1234` + +6. Read the rootless Docker socket path, checking the following alternative locations: + + 1. `${XDG_RUNTIME_DIR}/.docker/run/docker.sock` + 2. `${HOME}/.docker/run/docker.sock` + 3. `${HOME}/.docker/desktop/docker.sock` + 4. `/run/user/${UID}/docker.sock`, where `${UID}` is the user ID of the current user + +7. The library will raise a `DockerHostError` if none of the above are set, meaning that the Docker host was not detected. + +## Docker Socket Path Detection + +Testcontainers-Python will attempt to detect the Docker socket path and configure everything to work automatically. + +However, sometimes customization is required. Testcontainers-Python will respect the following order: + +1. Read the **TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE** environment variable. Path to Docker's socket. Used by Ryuk, Docker Compose, and a few other containers that need to perform Docker actions. + Example: `/var/run/docker-alt.sock` + +2. If the operating system retrieved by the Docker client is "Docker Desktop", and the host is running on Windows, it will return the `//var/run/docker.sock` UNC path. Otherwise, it returns the default Docker socket path for rootless Docker. + +3. Get the current Docker host from the existing strategies: see Docker host detection. + +4. If the socket contains the unix schema, the schema is removed (e.g. `unix:///var/run/docker.sock` -> `/var/run/docker.sock`) + +5. Otherwise, the default location of the Docker socket is used: `/var/run/docker.sock` + +The library will raise a `DockerHostError` if the Docker host cannot be discovered. + +## Connection Modes + +Testcontainers-Python supports different connection modes that determine how containers are accessed: + +1. `bridge_ip` (default): Uses the bridge network IP address +2. `gateway_ip`: Uses the gateway IP address +3. `docker_host`: Uses the Docker host address + +You can set the connection mode using the `TESTCONTAINERS_CONNECTION_MODE` environment variable or the `connection.mode` property in `.testcontainers.properties`. + +## Example Configuration File + +Here's an example of a `.testcontainers.properties` file: + +```properties +# Docker host configuration +docker.host=tcp://my.docker.host:1234 +docker.tls.verify=1 +docker.cert.path=/path/to/certs + +# Ryuk configuration +ryuk.disabled=false +ryuk.container.privileged=true +ryuk.reconnection.timeout=30s +ryuk.image=testcontainers/ryuk:0.8.1 + +# Testcontainers configuration +tc.host=tcp://my.testcontainers.host:1234 +connection.mode=bridge_ip +``` + +## Using Configuration in Code + +You can access and modify the configuration programmatically: + +```python +from testcontainers.core.config import testcontainers_config + +# Access configuration values +max_tries = testcontainers_config.max_tries +sleep_time = testcontainers_config.sleep_time + +# The configuration is read-only by default +# Changes should be made through environment variables or .testcontainers.properties +``` + +## Best Practices + +1. Use environment variables for CI/CD environments +2. Use `.testcontainers.properties` for local development +3. Set appropriate timeouts for your environment +4. Enable verbose logging when debugging +5. Consider disabling Ryuk if your environment already handles container cleanup +6. Use privileged mode for Ryuk only when necessary +7. Set proper TLS verification and certificate paths for secure environments +8. Choose the appropriate connection mode for your environment diff --git a/docs/features/container_logs.md b/docs/features/container_logs.md new file mode 100644 index 000000000..c4fe06069 --- /dev/null +++ b/docs/features/container_logs.md @@ -0,0 +1,151 @@ +# Container Logs + +Testcontainers-Python provides several ways to access and follow container logs. This is essential for debugging and monitoring container behavior during tests. + +## Basic Log Access + +The simplest way to access logs is using the `get_logs` method: + +```python +from testcontainers.generic import GenericContainer + +with GenericContainer("nginx:alpine") as container: + # Get all logs + stdout, stderr = container.get_logs() + print(f"STDOUT: {stdout}") + print(f"STDERR: {stderr}") +``` + +## Following Logs + +To follow logs in real-time: + +```python +with GenericContainer("nginx:alpine") as container: + # Follow logs + for line in container.follow_logs(): + print(line) # Each line as it appears +``` + +## Log Access with Options + +You can customize log access with various options: + +```python +with GenericContainer("nginx:alpine") as container: + # Get logs with timestamps + stdout, stderr = container.get_logs(timestamps=True) + + # Get logs since a specific time + import datetime + since = datetime.datetime.now() - datetime.timedelta(minutes=5) + stdout, stderr = container.get_logs(since=since) + + # Get logs with tail + stdout, stderr = container.get_logs(tail=100) # Last 100 lines +``` + +## Log Streams + +You can access specific log streams: + +```python +with GenericContainer("nginx:alpine") as container: + # Get only stdout + stdout, _ = container.get_logs() + + # Get only stderr + _, stderr = container.get_logs() + + # Get both streams + stdout, stderr = container.get_logs() +``` + +## Log Following with Callback + +You can use a callback function to process logs: + +```python +def log_callback(line): + print(f"Log line: {line}") + +with GenericContainer("nginx:alpine") as container: + # Follow logs with callback + container.follow_logs(callback=log_callback) +``` + +## Log Access in Tests + +Here's how to use logs in tests: + +```python +import pytest +from testcontainers.generic import GenericContainer + +def test_container_logs(): + with GenericContainer("nginx:alpine") as container: + # Wait for specific log message + for line in container.follow_logs(): + if "Configuration complete" in line: + break + + # Verify log content + stdout, stderr = container.get_logs() + assert "Configuration complete" in stdout +``` + +## Best Practices + +1. Use appropriate log levels +2. Handle log streams separately +3. Use timestamps for debugging +4. Consider log rotation +5. Use log following for real-time monitoring +6. Clean up log resources +7. Use appropriate log formats +8. Consider log volume + +## Common Use Cases + +### Application Startup Verification + +```python +with GenericContainer("myapp:latest") as container: + # Wait for application to start + for line in container.follow_logs(): + if "Application started" in line: + break +``` + +### Error Detection + +```python +with GenericContainer("myapp:latest") as container: + # Monitor for errors + for line in container.follow_logs(): + if "ERROR" in line: + print(f"Error detected: {line}") +``` + +### Performance Monitoring + +```python +with GenericContainer("myapp:latest") as container: + # Monitor performance metrics + for line in container.follow_logs(): + if "Performance" in line: + print(f"Performance metric: {line}") +``` + +## Troubleshooting + +If you encounter issues with log access: + +1. Check container state +2. Verify log configuration +3. Check for log rotation +4. Verify log permissions +5. Check for log volume +6. Verify log format +7. Check for log buffering +8. Verify log drivers diff --git a/docs/features/copying_data.md b/docs/features/copying_data.md new file mode 100644 index 000000000..8623ec73f --- /dev/null +++ b/docs/features/copying_data.md @@ -0,0 +1,166 @@ +# Copying Data into Containers + +Testcontainers-Python provides several ways to copy data into containers. This is essential for setting up test data, configuration files, or any other files needed for your tests. + +## Basic File Copy + +The simplest way to copy a file is using the `copy_file_to_container` method: + +```python +from testcontainers.generic import GenericContainer + +with GenericContainer("alpine:latest") as container: + # Copy a single file + container.copy_file_to_container( + local_path="path/to/local/file.txt", + container_path="/path/in/container/file.txt" + ) +``` + +## Copying Multiple Files + +You can copy multiple files at once: + +```python +with GenericContainer("alpine:latest") as container: + # Copy multiple files + container.copy_files_to_container([ + ("path/to/local/file1.txt", "/path/in/container/file1.txt"), + ("path/to/local/file2.txt", "/path/in/container/file2.txt") + ]) +``` + +## Copying Directories + +You can copy entire directories: + +```python +with GenericContainer("alpine:latest") as container: + # Copy a directory + container.copy_directory_to_container( + local_path="path/to/local/directory", + container_path="/path/in/container/directory" + ) +``` + +## Copying with Permissions + +You can set permissions for copied files: + +```python +with GenericContainer("alpine:latest") as container: + # Copy file with specific permissions + container.copy_file_to_container( + local_path="path/to/local/file.txt", + container_path="/path/in/container/file.txt", + permissions=0o644 # rw-r--r-- + ) +``` + +## Copying with User + +You can specify the owner of copied files: + +```python +with GenericContainer("alpine:latest") as container: + # Copy file with specific owner + container.copy_file_to_container( + local_path="path/to/local/file.txt", + container_path="/path/in/container/file.txt", + user="nobody" + ) +``` + +## Copying from Memory + +You can copy data directly from memory: + +```python +with GenericContainer("alpine:latest") as container: + # Copy data from memory + data = b"Hello, World!" + container.copy_data_to_container( + data=data, + container_path="/path/in/container/file.txt" + ) +``` + +## Best Practices + +1. Use appropriate file permissions +2. Clean up copied files +3. Use absolute paths +4. Handle file encoding +5. Consider file size +6. Use appropriate owners +7. Handle file conflicts +8. Consider security implications + +## Common Use Cases + +### Setting Up Test Data + +```python +def test_with_data(): + with GenericContainer("alpine:latest") as container: + # Copy test data + container.copy_file_to_container( + local_path="tests/data/test_data.json", + container_path="/app/data/test_data.json" + ) + + # Copy configuration + container.copy_file_to_container( + local_path="tests/config/test_config.yaml", + container_path="/app/config/config.yaml" + ) +``` + +### Setting Up Application Files + +```python +def test_application(): + with GenericContainer("myapp:latest") as container: + # Copy application files + container.copy_directory_to_container( + local_path="app/static", + container_path="/app/static" + ) + + # Copy templates + container.copy_directory_to_container( + local_path="app/templates", + container_path="/app/templates" + ) +``` + +### Setting Up Database Files + +```python +def test_database(): + with GenericContainer("postgres:latest") as container: + # Copy database initialization script + container.copy_file_to_container( + local_path="tests/db/init.sql", + container_path="/docker-entrypoint-initdb.d/init.sql" + ) + + # Copy test data + container.copy_file_to_container( + local_path="tests/db/test_data.sql", + container_path="/docker-entrypoint-initdb.d/test_data.sql" + ) +``` + +## Troubleshooting + +If you encounter issues with copying data: + +1. Check file permissions +2. Verify file paths +3. Check file encoding +4. Verify file size +5. Check container state +6. Verify user permissions +7. Check for file conflicts +8. Verify disk space diff --git a/docs/features/creating_container.md b/docs/features/creating_container.md new file mode 100644 index 000000000..fa3b1190b --- /dev/null +++ b/docs/features/creating_container.md @@ -0,0 +1,139 @@ +# How to Create a Container + +Testcontainers-Python is a thin wrapper around Docker designed for use in tests. Anything you can run in Docker, you can spin up with Testcontainers-Python: + +- NoSQL databases or other data stores (e.g. Redis, ElasticSearch, MongoDB) +- Web servers/proxies (e.g. NGINX, Apache) +- Log services (e.g. Logstash, Kibana) +- Other services developed by your team/organization which are already Dockerized + +## Basic Container Creation + +The simplest way to create a container is using the `GenericContainer` class: + +```python +from testcontainers.generic import GenericContainer + +def test_basic_container(): + with GenericContainer("nginx:alpine") as nginx: + # Get container connection details + host = nginx.get_container_host_ip() + port = nginx.get_exposed_port(80) + + # Your test code here + # For example, make HTTP requests to the nginx server + import requests + response = requests.get(f"http://{host}:{port}") + assert response.status_code == 200 +``` + +## Advanced Container Configuration + +For more complex scenarios, use the `run` helper function. This high-level interface is similar to `docker run` and automatically handles: + +- Creating temporary networks +- Mounting files or tmpfs +- Waiting for container readiness +- Container cleanup + +Example with various configuration options: + +```python +import io +import pytest +from testcontainers.core.container import run +from testcontainers.core.network import DockerNetwork +from testcontainers.core.waiting_utils import wait_for_logs + +def test_nginx_advanced(): + # Create an isolated network + network = DockerNetwork() + network.create() + pytest.addfinalizer(network.remove) + + # Create a test file to mount + test_file_content = b"Hello from test file!" + host_file = io.BytesIO(test_file_content) + + # Run the container with various options + container = run( + image="nginx:alpine", + network=network.name, + files=[(host_file, "/usr/share/nginx/html/test.txt")], # Mount file + tmpfs={"/tmp": "rw"}, # Mount tmpfs + labels={"testcontainers.label": "true"}, # Add labels + environment={"TEST": "true"}, # Set environment variables + ports={"80/tcp": None}, # Expose port 80 + command=["nginx", "-g", "daemon off;"], # Override default command + wait=wait_for_logs("Configuration complete; ready for start"), # Wait for logs + startup_timeout=30, # Set startup timeout + ) + + # Ensure cleanup + pytest.addfinalizer(container.stop) + pytest.addfinalizer(container.remove) + + # Test the container + host = container.get_container_host_ip() + port = container.get_exposed_port(80) + + # Verify the mounted file + import requests + response = requests.get(f"http://{host}:{port}/test.txt") + assert response.text == "Hello from test file!" +``` + +## Container Lifecycle Management + +Testcontainers-Python offers several ways to manage container lifecycle: + +1. **Context manager (recommended):** +```python +with GenericContainer("nginx:alpine") as container: + # Container is automatically started and stopped + pass +``` + +2. **Manual management:** +```python +container = GenericContainer("nginx:alpine") +container.start() +try: + # Your test code here + pass +finally: + container.stop() + container.remove() +``` + +3. **Pytest fixtures:** +```python +import pytest +from testcontainers.generic import GenericContainer + +@pytest.fixture +def nginx_container(): + container = GenericContainer("nginx:alpine") + container.start() + yield container + container.stop() + container.remove() + +def test_with_nginx(nginx_container): + # Your test code here + pass +``` + +## Container Readiness + +For details on waiting for containers to be ready, see [Wait strategies](wait_strategies.md). + +## Best Practices + +1. Always use context managers or ensure proper cleanup +2. Set appropriate timeouts for container startup +3. Use isolated networks for tests +4. Mount test files instead of copying them +5. Use tmpfs for temporary data +6. Add meaningful labels to containers +7. Configure proper wait conditions diff --git a/docs/features/docker_compose.md b/docs/features/docker_compose.md new file mode 100644 index 000000000..006a12b92 --- /dev/null +++ b/docs/features/docker_compose.md @@ -0,0 +1,117 @@ +# Docker Compose Support + +Testcontainers-Python provides support for running Docker Compose environments in your tests. This is useful when you need to test against multiple containers that work together. + +## Basic Usage + +The simplest way to use Docker Compose is with the `DockerCompose` class: + +```python +from testcontainers.compose import DockerCompose + +# Create a compose environment +compose = DockerCompose( + context="path/to/compose/directory", + compose_file_name="docker-compose.yml" +) + +# Start the environment +with compose: + # Your test code here + pass +``` + +## Configuration Options + +The `DockerCompose` class supports various configuration options: + +```python +compose = DockerCompose( + context="path/to/compose/directory", + compose_file_name=["docker-compose.yml", "docker-compose.override.yml"], # Multiple compose files + pull=True, # Pull images before starting + build=True, # Build images before starting + wait=True, # Wait for services to be healthy + env_file=".env", # Environment file + services=["service1", "service2"], # Specific services to run + profiles=["profile1", "profile2"], # Compose profiles to use + keep_volumes=False # Whether to keep volumes after stopping +) +``` + +## Accessing Services + +You can access service information and interact with containers: + +```python +with DockerCompose("path/to/compose/directory") as compose: + # Get service host and port + host = compose.get_service_host("web") + port = compose.get_service_port("web", 8080) + + # Get both host and port + host, port = compose.get_service_host_and_port("web", 8080) + + # Execute commands in a container + stdout, stderr, exit_code = compose.exec_in_container( + ["ls", "-la"], + service_name="web" + ) + + # Get container logs + stdout, stderr = compose.get_logs("web") +``` + +## Waiting for Services + +You can wait for services to be ready: + +```python +with DockerCompose("path/to/compose/directory") as compose: + # Wait for a specific URL to be accessible + compose.wait_for("http://localhost:8080/health") +``` + +## Example with Multiple Services + +Here's a complete example using multiple services: + +```python +from testcontainers.compose import DockerCompose +import requests + +def test_web_application(): + compose = DockerCompose( + "path/to/compose/directory", + compose_file_name="docker-compose.yml", + pull=True, + build=True + ) + + with compose: + # Get web service details + host = compose.get_service_host("web") + port = compose.get_service_port("web", 8080) + + # Make a request to the web service + response = requests.get(f"http://{host}:{port}/api/health") + assert response.status_code == 200 + + # Execute a command in the database service + stdout, stderr, exit_code = compose.exec_in_container( + ["psql", "-U", "postgres", "-c", "SELECT 1"], + service_name="db" + ) + assert exit_code == 0 +``` + +## Best Practices + +1. Use context managers (`with` statement) to ensure proper cleanup +2. Set appropriate timeouts for service startup +3. Use health checks in your compose files +4. Keep compose files in your test directory +5. Use environment variables for configuration +6. Consider using profiles for different test scenarios +7. Clean up volumes when not needed +8. Use specific service names in your tests diff --git a/docs/features/executing_commands.md b/docs/features/executing_commands.md new file mode 100644 index 000000000..9db76a89c --- /dev/null +++ b/docs/features/executing_commands.md @@ -0,0 +1,157 @@ +# Executing Commands in Containers + +Testcontainers-Python provides several ways to execute commands inside containers. This is useful for setup, verification, and debugging during tests. + +## Basic Command Execution + +The simplest way to execute a command is using the `exec` method: + +```python +from testcontainers.generic import GenericContainer + +with GenericContainer("alpine:latest") as container: + # Execute a simple command + exit_code, output = container.exec(["ls", "-la"]) + print(output) # Command output as string +``` + +## Command Execution with Options + +You can customize command execution with various options: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with user + exit_code, output = container.exec( + ["whoami"], + user="nobody" + ) + + # Execute command with environment variables + exit_code, output = container.exec( + ["echo", "$TEST_VAR"], + environment={"TEST_VAR": "test_value"} + ) + + # Execute command with working directory + exit_code, output = container.exec( + ["pwd"], + workdir="/tmp" + ) +``` + +## Interactive Commands + +For interactive commands, you can use the `exec_interactive` method: + +```python +with GenericContainer("alpine:latest") as container: + # Start an interactive shell + container.exec_interactive(["sh"]) +``` + +## Command Execution with Timeout + +You can set a timeout for command execution: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with timeout + try: + exit_code, output = container.exec( + ["sleep", "10"], + timeout=5 # Timeout in seconds + ) + except TimeoutError: + print("Command timed out") +``` + +## Command Execution with Privileges + +For commands that require elevated privileges: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with privileges + exit_code, output = container.exec( + ["mount"], + privileged=True + ) +``` + +## Command Execution with TTY + +For commands that require a TTY: + +```python +with GenericContainer("alpine:latest") as container: + # Execute command with TTY + exit_code, output = container.exec( + ["top"], + tty=True + ) +``` + +## Best Practices + +1. Use appropriate timeouts for long-running commands +2. Handle command failures gracefully +3. Use environment variables for configuration +4. Consider security implications of privileged commands +5. Clean up after command execution +6. Use appropriate user permissions +7. Handle command output appropriately +8. Consider using shell scripts for complex commands + +## Common Use Cases + +### Database Setup + +```python +from testcontainers.postgres import PostgresContainer + +with PostgresContainer() as postgres: + # Create a database + postgres.exec(["createdb", "testdb"]) + + # Run migrations + postgres.exec(["psql", "-d", "testdb", "-f", "/path/to/migrations.sql"]) +``` + +### File Operations + +```python +with GenericContainer("alpine:latest") as container: + # Create a directory + container.exec(["mkdir", "-p", "/data"]) + + # Set permissions + container.exec(["chmod", "755", "/data"]) + + # List files + exit_code, output = container.exec(["ls", "-la", "/data"]) +``` + +### Service Management + +```python +with GenericContainer("nginx:alpine") as container: + # Check service status + exit_code, output = container.exec(["nginx", "-t"]) + + # Reload configuration + container.exec(["nginx", "-s", "reload"]) +``` + +## Troubleshooting + +If you encounter issues with command execution: + +1. Check command syntax and arguments +2. Verify user permissions +3. Check container state +4. Verify command availability +5. Check for timeout issues +6. Verify environment variables +7. Check working directory +8. Verify TTY requirements diff --git a/docs/features/garbage_collector.md b/docs/features/garbage_collector.md new file mode 100644 index 000000000..a396b1706 --- /dev/null +++ b/docs/features/garbage_collector.md @@ -0,0 +1,51 @@ +# Garbage Collector + +Testcontainers for Python includes a robust garbage collection mechanism to ensure that containers are properly cleaned up, even in unexpected scenarios. + +## How it Works + +The garbage collection is implemented using a special container called "Ryuk" (pronounced "reaper"). This container is automatically started when you create your first test container and is responsible for cleaning up resources when: + +1. The Python process exits normally +2. The Python process is terminated unexpectedly +3. The system crashes or loses power + +## Configuration + +The Ryuk container can be configured through environment variables: + +- `TESTCONTAINERS_RYUK_DISABLED`: Set to `true` to disable the Ryuk container (not recommended) +- `TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED`: Set to `true` to run Ryuk in privileged mode (default: `false`) +- `TESTCONTAINERS_RYUK_RECONNECTION_TIMEOUT`: Timeout for Ryuk reconnection attempts (default: `10s`) + +## Best Practices + +1. **Don't Disable Ryuk**: The Ryuk container is an important part of Testcontainers' cleanup mechanism. Only disable it if you have a specific reason and understand the implications. + +2. **Use Context Managers**: Always use the `with` statement when creating containers. This ensures proper cleanup even if an exception occurs: + +```python +with RedisContainer() as redis: + # Your test code here +``` + +3. **Session Management**: Each test session gets a unique session ID, and Ryuk tracks containers by this ID. This allows for proper cleanup even when running tests in parallel. + +## Troubleshooting + +If you notice containers not being cleaned up: + +1. Check if Ryuk is running: `docker ps | grep testcontainers-ryuk` +2. Verify that the containers have the correct session label: `docker inspect | grep session-id` +3. Check Ryuk logs: `docker logs ` + +## Implementation Details + +The Ryuk container is a lightweight container that: + +1. Connects to the Docker daemon +2. Listens for container events +3. Automatically removes containers when their parent process exits +4. Handles reconnection if the connection to Docker is lost + +This provides a more reliable cleanup mechanism than relying solely on Python's garbage collection or process termination handlers. diff --git a/docs/features/networking.md b/docs/features/networking.md new file mode 100644 index 000000000..6ebabe532 --- /dev/null +++ b/docs/features/networking.md @@ -0,0 +1,204 @@ +# Networking and Container Communication + +Testcontainers-Python provides several ways to configure networking between containers and your test code. This is essential for testing distributed systems and microservices. + +## Connection Modes + +Testcontainers-Python supports three connection modes that determine how containers are accessed: + +1. `bridge_ip` (default): Uses the bridge network IP address. Best for: + + - Docker-in-Docker (DinD) scenarios + - When containers need to communicate over a bridge network + - When you need direct container-to-container communication + +2. `gateway_ip`: Uses the gateway IP address. Best for: + + - Docker-in-Docker (DinD) scenarios + - When containers need to access the host network + - When you need to access services running on the host + +3. `docker_host`: Uses the Docker host address. Best for: + + - Local development + - When running tests outside of containers + - When you need to access containers from the host machine + +You can set the connection mode using the `TESTCONTAINERS_CONNECTION_MODE` environment variable or the `connection.mode` property in `.testcontainers.properties`. + +## Port Exposure + +Testcontainers-Python provides two methods for exposing container ports, with `with_exposed_ports` being the recommended approach: + +### Exposing Ports with Random Host Ports (Recommended) + +```python +from testcontainers.core.container import DockerContainer + +container = DockerContainer("nginx:alpine") +container.with_exposed_ports(80, "443/tcp") # Expose ports, host ports will be assigned randomly +container.start() +mapped_port = container.get_exposed_port(80) # Get the randomly assigned host port +``` + +This is the preferred method because it: + +- Avoids port conflicts in parallel test execution +- Is more secure as it doesn't expose fixed ports +- Matches the behavior of other testcontainers implementations +- Allows for better isolation between test runs + +### Binding to Specific Host Ports (Not Recommended) + +```python +container = DockerContainer("nginx:alpine") +container.with_bind_ports(80, 8080) # Map container port 80 to host port 8080 +container.with_bind_ports("443/tcp", 8443) # Map container port 443 to host port 8443 +``` + +Use `with_bind_ports` only in specific cases where you absolutely need a fixed port number, such as: + +- When testing with tools that require specific port numbers +- When integrating with external systems that can't handle dynamic ports +- When debugging and need consistent port numbers + +Note that using fixed ports can cause conflicts when running tests in parallel and may lead to test failures if the specified ports are already in use. + +## Creating Networks + +You can create isolated networks for your containers: + +```python +from testcontainers.core.network import Network + +# Create a new network +network = Network() +network.create() + +# Use the network with containers +container1 = GenericContainer("nginx:alpine") +container1.with_network(network) +container1.with_network_aliases(["web"]) + +container2 = GenericContainer("redis:alpine") +container2.with_network(network) +container2.with_network_aliases(["cache"]) + +# Start containers +with container1, container2: + # Containers can communicate using their network aliases + # e.g., "web" can connect to "cache:6379" + pass +``` + +## Container Communication + +Containers can communicate with each other in several ways: + +1. Using network aliases: + +```python +# Container 1 can reach Container 2 using its network alias +container1 = GenericContainer("app:latest") +container1.with_network(network) +container1.with_network_aliases(["app"]) + +container2 = GenericContainer("db:latest") +container2.with_network(network) +container2.with_network_aliases(["database"]) + +# Container 1 can connect to Container 2 using "database:5432" +``` + +2. Using container IP addresses: + +```python +with container1, container2: + # Get container IP addresses + container1_ip = container1.get_container_host_ip() + container2_ip = container2.get_container_host_ip() + + # Containers can communicate using IP addresses + # e.g., container1 can connect to container2_ip:5432 +``` + +3. Using host networking: + +```python +container = GenericContainer("nginx:alpine") +container.with_network_mode("host") # Use host networking +``` + +## Example: Multi-Container Application + +Here's a complete example of a multi-container application: + +```python +from testcontainers.core.network import Network +from testcontainers.postgres import PostgresContainer +from testcontainers.redis import RedisContainer + +def test_multi_container_app(): + # Create a network + network = Network() + network.create() + + # Create containers + postgres = PostgresContainer() + postgres.with_network(network) + postgres.with_network_aliases(["db"]) + + redis = RedisContainer() + redis.with_network(network) + redis.with_network_aliases(["cache"]) + + # Start containers + with postgres, redis: + # Get connection details + db_host = postgres.get_container_host_ip() + db_port = postgres.get_exposed_port(5432) + + redis_host = redis.get_container_host_ip() + redis_port = redis.get_exposed_port(6379) + + # Your test code here + pass +``` + +## Best Practices + +1. **Port Management**: + + - Always use `with_exposed_ports` instead of `with_bind_ports` unless you have a specific requirement for fixed ports + - Use `get_exposed_port` to retrieve the mapped port number when using `with_exposed_ports` + - Avoid hardcoding port numbers in your tests + +2. **Network Configuration**: + + - Use isolated networks for tests to prevent conflicts + - Use meaningful network aliases for better readability and maintainability + - Avoid using host networking unless absolutely necessary + - Use the appropriate connection mode for your environment: + - `bridge_ip` for Docker-in-Docker (DinD) scenarios + - `gateway_ip` for accessing host network services + - `docker_host` for local development + +3. **Container Communication**: + + - Use network aliases for container-to-container communication + - Use environment variables for configuration + - Consider using Docker Compose for complex multi-container setups + +4. **Resource Management**: + + - Always use context managers (`with` statements) to ensure proper cleanup + - Let the Ryuk container handle cleanup in case of unexpected termination + - Clean up networks after tests + - Use environment variables for configuration + +5. **Testing Best Practices**: + - Write tests that are independent and can run in parallel + - Avoid dependencies on specific port numbers + - Use meaningful container and network names for debugging + - Consider using Docker Compose for complex setups + - Use environment variables for configuration diff --git a/docs/features/wait_strategies.md b/docs/features/wait_strategies.md new file mode 100644 index 000000000..3bb42eb69 --- /dev/null +++ b/docs/features/wait_strategies.md @@ -0,0 +1,131 @@ +# Wait Strategies + +Testcontainers-Python provides several strategies to wait for containers to be ready before proceeding with tests. This is crucial for ensuring that your tests don't start before the container is fully initialized and ready to accept connections. + +## Basic Wait Strategy + +The simplest way to wait for a container is using the `wait_container_is_ready` decorator: + +```python +from testcontainers.core.waiting_utils import wait_container_is_ready + +class MyContainer(DockerContainer): + @wait_container_is_ready() + def _connect(self): + # Your connection logic here + pass +``` + +This decorator will retry the method until it succeeds or times out. By default, it will retry for 120 seconds with a 1-second interval between attempts. + +## Log-based Waiting + +Wait for specific log messages to appear: + +```python +from testcontainers.core.waiting_utils import wait_for_logs + +# Wait for a specific log message +container = GenericContainer( + "nginx:alpine", + wait=wait_for_logs("Configuration complete; ready for start") +) + +# Wait for a log pattern using regex +container = GenericContainer( + "postgres:latest", + wait=wait_for_logs("database system is ready to accept connections") +) + +# Wait for logs in both stdout and stderr +container = GenericContainer( + "myapp:latest", + wait=wait_for_logs("Ready", predicate_streams_and=True) +) +``` + +## HTTP-based Waiting + +Wait for an HTTP endpoint to be accessible: + +```python +from testcontainers.core.waiting_utils import wait_for_http + +# Wait for an HTTP endpoint +container = GenericContainer( + "nginx:alpine", + wait=wait_for_http("/", port=80) +) + +# Wait for a specific HTTP status code +container = GenericContainer( + "myapp:latest", + wait=wait_for_http("/health", port=8080, status_code=200) +) +``` + +## Custom Wait Conditions + +You can create custom wait conditions by implementing your own wait function: + +```python +def custom_wait(container): + # Your custom logic here + # Return True if the container is ready, False otherwise + return True + +container = GenericContainer( + "myapp:latest", + wait=custom_wait +) +``` + +## Connection-based Waiting + +Many container implementations include built-in connection waiting. For example: + +```python +from testcontainers.redis import RedisContainer +from testcontainers.postgres import PostgresContainer + +# Redis container waits for connection +redis = RedisContainer() +redis.start() # Will wait until Redis is ready to accept connections + +# PostgreSQL container waits for connection +postgres = PostgresContainer() +postgres.start() # Will wait until PostgreSQL is ready to accept connections +``` + +## Ryuk Container Wait Behavior + +The Ryuk container (used for garbage collection) has its own wait mechanism that combines log-based and connection-based waiting: + +1. **Log-based Wait**: Waits for the message ".\* Started!" with a 20-second timeout +2. **Connection Wait**: After the logs are found, attempts to establish a socket connection to the Ryuk container, retrying up to 50 times with a 0.5-second interval between attempts + +This ensures that the Ryuk container is fully operational before any test containers are started. + +## Configuring Wait Behavior + +You can configure the wait behavior using environment variables: + +- `TC_MAX_TRIES`: Maximum number of connection attempts (default: 120) +- `TC_POOLING_INTERVAL`: Time between connection attempts in seconds (default: 1) + +Example: + +```bash +export TC_MAX_TRIES=60 +export TC_POOLING_INTERVAL=2 +``` + +## Best Practices + +1. Always use appropriate wait strategies for your containers +2. Set reasonable timeouts for your environment +3. Use specific wait conditions rather than generic ones when possible +4. Consider using connection-based waiting for database containers +5. Use log-based waiting for applications that output clear startup messages +6. Use HTTP-based waiting for web services +7. Implement custom wait conditions for complex startup scenarios diff --git a/docs/getting_help.md b/docs/getting_help.md new file mode 100644 index 000000000..51a1227ac --- /dev/null +++ b/docs/getting_help.md @@ -0,0 +1,10 @@ +# Getting help + +We hope that you find Testcontainers intuitive to use and reliable. +However, sometimes things don't go the way we'd expect, and we'd like to try and help out if we can. + +To contact the Testcontainers team and other users you can: + +- Join our [Slack team](https://slack.testcontainers.org) +- [Search our issues tracker](https://github.com/testcontainers/testcontainers-python/issues), or raise a new issue if you find any bugs or have suggested improvements +- [Search Stack Overflow](https://stackoverflow.com/questions/tagged/testcontainers), especially among posts tagged with `testcontainers` diff --git a/docs/icons/github.svg b/docs/icons/github.svg new file mode 100644 index 000000000..8274d383d --- /dev/null +++ b/docs/icons/github.svg @@ -0,0 +1,4 @@ + + + diff --git a/docs/icons/slack.svg b/docs/icons/slack.svg new file mode 100644 index 000000000..1b371770b --- /dev/null +++ b/docs/icons/slack.svg @@ -0,0 +1,10 @@ + + + + + + diff --git a/docs/icons/stackoverflow.svg b/docs/icons/stackoverflow.svg new file mode 100644 index 000000000..0cf51ec46 --- /dev/null +++ b/docs/icons/stackoverflow.svg @@ -0,0 +1,5 @@ + + + + diff --git a/docs/icons/twitter.svg b/docs/icons/twitter.svg new file mode 100644 index 000000000..a6a902ce7 --- /dev/null +++ b/docs/icons/twitter.svg @@ -0,0 +1,4 @@ + + + diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..3defde8c3 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,45 @@ +# Testcontainers for Python + +

Not using Python? Here are other supported languages!

+
+ Java + Go + .NET + Node.js + Python + Rust + Haskell + Ruby +
+ +## About Testcontainers For Python + +_Testcontainers for Python_ is a Python library that makes it simple to create and clean up container-based dependencies for automated integration or smoke tests. The easy-to-use API enables developers to programmatically define containers that should be run as part of a test and clean up those resources when the test is done. + +To start using _Testcontainers for Python_, see the [quickstart guide](quickstart.md). + +!!!note + + If you need a high-level explanation of _Testcontainers_, see the [main website](https://testcontainers.com/getting-started/). + +## Code Comments + +Inline documentation and docs where the code lives are crucial for us. Testcontainers For Python follows the [PEP 257](https://peps.python.org/pep-0257/){:target="\_blank"} comment conventions. The codebase previously supported Sphinx, so you may encounter comments not yet updated for the new documentation style. + +## Who Is Using Testcontainers Python? + +- [Timescale](https://www.timescale.com/) - Uses testcontainers-python in their pgai project for testing PostgreSQL integrations, AI model interactions, and AWS service integrations. +- [Redis](https://redis.io/) - Depends on testcontainers-python for their Redis vector library implementation. +- [Apache](https://skywalking.apache.org/) - Uses testcontainers-python in their Skywalking project for an application performance monitoring tool in distributed systems. + +## License + +See [LICENSE](https://raw.githubusercontent.com/testcontainers/testcontainers-python/refs/heads/main/LICENSE.txt){:target="\_blank"}. + +## Attributions + +## Copyright + +Copyright (c) 2015-2021 Sergey Pirogov and other authors. + +See [AUTHORS](https://github.com/testcontainers/testcontainers-python/graphs/contributors){:target="\_blank"} for contributors. diff --git a/docs/js/tc-header.js b/docs/js/tc-header.js new file mode 100644 index 000000000..7d51ebf6b --- /dev/null +++ b/docs/js/tc-header.js @@ -0,0 +1,45 @@ +const mobileToggle = document.getElementById("mobile-menu-toggle"); +const mobileSubToggle = document.getElementById("mobile-submenu-toggle"); +function toggleMobileMenu() { + document.body.classList.toggle('mobile-menu'); + document.body.classList.toggle("mobile-tc-header-active"); +} +function toggleMobileSubmenu() { + document.body.classList.toggle('mobile-submenu'); +} +if (mobileToggle) + mobileToggle.addEventListener("click", toggleMobileMenu); +if (mobileSubToggle) + mobileSubToggle.addEventListener("click", toggleMobileSubmenu); + +const allParentMenuItems = document.querySelectorAll("#site-header .menu-item.has-children"); +function clearActiveMenuItem() { + document.body.classList.remove("tc-header-active"); + allParentMenuItems.forEach((item) => { + item.classList.remove("active"); + }); +} +function setActiveMenuItem(e) { + clearActiveMenuItem(); + e.currentTarget.closest(".menu-item").classList.add("active"); + document.body.classList.add("tc-header-active"); +} +allParentMenuItems.forEach((item) => { + const trigger = item.querySelector(":scope > a, :scope > button"); + + trigger.addEventListener("click", (e) => { + if (e.currentTarget.closest(".menu-item").classList.contains("active")) { + clearActiveMenuItem(); + } else { + setActiveMenuItem(e); + } + }); + + trigger.addEventListener("mouseenter", (e) => { + setActiveMenuItem(e); + }); + + item.addEventListener("mouseleave", (e) => { + clearActiveMenuItem(); + }); +}); diff --git a/docs/language-logos/dotnet.svg b/docs/language-logos/dotnet.svg new file mode 100644 index 000000000..496753d54 --- /dev/null +++ b/docs/language-logos/dotnet.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/docs/language-logos/go.svg b/docs/language-logos/go.svg new file mode 100644 index 000000000..58ba79abd --- /dev/null +++ b/docs/language-logos/go.svg @@ -0,0 +1,10 @@ + + + + + + + diff --git a/docs/language-logos/haskell.svg b/docs/language-logos/haskell.svg new file mode 100644 index 000000000..eb6de3776 --- /dev/null +++ b/docs/language-logos/haskell.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/docs/language-logos/java.svg b/docs/language-logos/java.svg new file mode 100644 index 000000000..d9080555a --- /dev/null +++ b/docs/language-logos/java.svg @@ -0,0 +1,17 @@ + + + + + + + + + diff --git a/docs/language-logos/nodejs.svg b/docs/language-logos/nodejs.svg new file mode 100644 index 000000000..34af396b0 --- /dev/null +++ b/docs/language-logos/nodejs.svg @@ -0,0 +1,5 @@ + + + diff --git a/docs/language-logos/python.svg b/docs/language-logos/python.svg new file mode 100644 index 000000000..c7ba2353b --- /dev/null +++ b/docs/language-logos/python.svg @@ -0,0 +1,8 @@ + + + + diff --git a/docs/language-logos/ruby.svg b/docs/language-logos/ruby.svg new file mode 100644 index 000000000..05537cedf --- /dev/null +++ b/docs/language-logos/ruby.svg @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/language-logos/rust.svg b/docs/language-logos/rust.svg new file mode 100644 index 000000000..1691f56bb --- /dev/null +++ b/docs/language-logos/rust.svg @@ -0,0 +1,57 @@ + + + diff --git a/docs/logo.png b/docs/logo.png new file mode 100644 index 000000000..88961b3e3 Binary files /dev/null and b/docs/logo.png differ diff --git a/docs/logo.svg b/docs/logo.svg new file mode 100644 index 000000000..bac0c391a --- /dev/null +++ b/docs/logo.svg @@ -0,0 +1,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/modules/arangodb.md b/docs/modules/arangodb.md new file mode 100644 index 000000000..e342c9c9f --- /dev/null +++ b/docs/modules/arangodb.md @@ -0,0 +1,41 @@ +# ArangoDB + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for ArangoDB. + +## Adding this module to your project dependencies + +Please run the following command to add the ArangoDB module to your python dependencies: + +```bash +pip install testcontainers[arangodb] python-arango +``` + +## Usage example + + + +[Creating an ArangoDB container](../../modules/arangodb/example_basic.py) + + + +## Features + +- Multi-model database support (key-value, document, graph) +- AQL (ArangoDB Query Language) for complex queries +- Built-in aggregation functions +- Collection management +- Document CRUD operations +- Bulk document import + +## Configuration + +The ArangoDB container can be configured with the following parameters: + +- `username`: Database username (default: "root") +- `password`: Database password (default: "test") +- `port`: Port to expose (default: 8529) +- `version`: ArangoDB version to use (default: "latest") diff --git a/docs/modules/aws.md b/docs/modules/aws.md new file mode 100644 index 000000000..8fb1ea412 --- /dev/null +++ b/docs/modules/aws.md @@ -0,0 +1,23 @@ +# AWS + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for AWS. + +## Adding this module to your project dependencies + +Please run the following command to add the AWS module to your python dependencies: + +```bash +pip install testcontainers[aws] httpx +``` + +## Usage example + + + +[Creating an AWS container](../../modules/aws/example_basic.py) + + diff --git a/docs/modules/azurite.md b/docs/modules/azurite.md new file mode 100644 index 000000000..9acc48730 --- /dev/null +++ b/docs/modules/azurite.md @@ -0,0 +1,23 @@ +# Azurite + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Azurite. + +## Adding this module to your project dependencies + +Please run the following command to add the Azurite module to your python dependencies: + +```bash +pip install testcontainers[azurite] azure-storage-blob +``` + +## Usage example + + + +[Creating an Azurite container](../../modules/azurite/example_basic.py) + + diff --git a/docs/modules/cassandra.md b/docs/modules/cassandra.md new file mode 100644 index 000000000..3250b737f --- /dev/null +++ b/docs/modules/cassandra.md @@ -0,0 +1,23 @@ +# Cassandra + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Cassandra. + +## Adding this module to your project dependencies + +Please run the following command to add the Cassandra module to your python dependencies: + +```bash +pip install testcontainers[cassandra] cassandra-driver +``` + +## Usage example + + + +[Creating a Cassandra container](../../modules/cassandra/example_basic.py) + + diff --git a/docs/modules/chroma.md b/docs/modules/chroma.md new file mode 100644 index 000000000..ae2e45dcf --- /dev/null +++ b/docs/modules/chroma.md @@ -0,0 +1,43 @@ +# Chroma + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Chroma. + +## Adding this module to your project dependencies + +Please run the following command to add the Chroma module to your python dependencies: + +```bash +pip install testcontainers[chroma] chromadb requests +``` + +## Usage example + + + +[Creating a Chroma container](../../modules/chroma/example_basic.py) + + + +## Features + +- Vector similarity search +- Document storage and retrieval +- Metadata filtering +- Collection management +- Embedding storage +- Distance metrics +- Batch operations +- REST API support + +## Configuration + +The Chroma container can be configured with the following parameters: + +- `port`: Port to expose (default: 8000) +- `version`: Chroma version to use (default: "latest") +- `persist_directory`: Directory to persist data (default: None) +- `allow_reset`: Whether to allow collection reset (default: True) diff --git a/docs/modules/clickhouse.md b/docs/modules/clickhouse.md new file mode 100644 index 000000000..ed86c3f32 --- /dev/null +++ b/docs/modules/clickhouse.md @@ -0,0 +1,44 @@ +# ClickHouse + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for ClickHouse. + +## Adding this module to your project dependencies + +Please run the following command to add the ClickHouse module to your python dependencies: + +```bash +pip install testcontainers[clickhouse] clickhouse-driver +``` + +## Usage example + + + +[Creating a ClickHouse container](../../modules/clickhouse/example_basic.py) + + + +## Features + +- Column-oriented storage +- High-performance analytics +- Real-time data processing +- SQL support +- Data compression +- Parallel processing +- Distributed queries +- Integration with pandas for data analysis + +## Configuration + +The ClickHouse container can be configured with the following parameters: + +- `port`: Port to expose (default: 9000) +- `version`: ClickHouse version to use (default: "latest") +- `user`: Database username (default: "default") +- `password`: Database password (default: "") +- `database`: Database name (default: "default") diff --git a/docs/modules/cockroachdb.md b/docs/modules/cockroachdb.md new file mode 100644 index 000000000..285ffc92d --- /dev/null +++ b/docs/modules/cockroachdb.md @@ -0,0 +1,44 @@ +# CockroachDB + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for CockroachDB. + +## Adding this module to your project dependencies + +Please run the following command to add the CockroachDB module to your python dependencies: + +```bash +pip install testcontainers[cockroachdb] sqlalchemy psycopg2 +``` + +## Usage example + + + +[Creating a CockroachDB container](../../modules/cockroachdb/example_basic.py) + + + +## Features + +- Distributed SQL database +- ACID transactions +- Strong consistency +- Horizontal scaling +- Built-in replication +- Automatic sharding +- SQL compatibility +- Integration with pandas for data analysis + +## Configuration + +The CockroachDB container can be configured with the following parameters: + +- `username`: Database username (default: "root") +- `password`: Database password (default: "") +- `database`: Database name (default: "postgres") +- `port`: Port to expose (default: 26257) +- `version`: CockroachDB version to use (default: "latest") diff --git a/docs/modules/cosmosdb.md b/docs/modules/cosmosdb.md new file mode 100644 index 000000000..3aadbe6b3 --- /dev/null +++ b/docs/modules/cosmosdb.md @@ -0,0 +1,43 @@ +# CosmosDB + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for Azure Cosmos DB. + +## Adding this module to your project dependencies + +Please run the following command to add the CosmosDB module to your python dependencies: + +```bash +pip install testcontainers[cosmosdb] pymongo azure-cosmos +``` + +## Usage example + + + +[Creating a CosmosDB container](../../modules/cosmosdb/example_basic.py) + + + +## Features + +- Multi-model database support (document, key-value, wide-column, graph) +- SQL-like query language +- Automatic indexing +- Partitioning support +- Global distribution +- Built-in aggregation functions +- Container management +- Document CRUD operations + +## Configuration + +The CosmosDB container can be configured with the following parameters: + +- `port`: Port to expose (default: 8081) +- `version`: CosmosDB Emulator version to use (default: "latest") +- `ssl_verify`: Whether to verify SSL certificates (default: False) +- `emulator_key`: Emulator key for authentication (default: "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") diff --git a/docs/modules/db2.md b/docs/modules/db2.md new file mode 100644 index 000000000..59b6a4493 --- /dev/null +++ b/docs/modules/db2.md @@ -0,0 +1,43 @@ +# DB2 + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for IBM Db2. + +## Adding this module to your project dependencies + +Please run the following command to add the DB2 module to your python dependencies: + +```bash +pip install testcontainers[db2] sqlalchemy ibm-db +``` + +## Usage example + + + +[Creating a DB2 container](../../modules/db2/example_basic.py) + + + +## Features + +- Full SQL support +- Transaction management +- Stored procedures +- User-defined functions +- Advanced analytics +- JSON support +- Integration with pandas for data analysis + +## Configuration + +The DB2 container can be configured with the following parameters: + +- `username`: Database username (default: "db2inst1") +- `password`: Database password (default: "password") +- `database`: Database name (default: "testdb") +- `port`: Port to expose (default: 50000) +- `version`: DB2 version to use (default: "latest") diff --git a/docs/modules/elasticsearch.md b/docs/modules/elasticsearch.md new file mode 100644 index 000000000..e95e3beb5 --- /dev/null +++ b/docs/modules/elasticsearch.md @@ -0,0 +1,23 @@ +# Elasticsearch + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Elasticsearch. + +## Adding this module to your project dependencies + +Please run the following command to add the Elasticsearch module to your python dependencies: + +```bash +pip install testcontainers[elasticsearch] +``` + +## Usage example + + + +[Creating an Elasticsearch container](../../modules/elasticsearch/example_basic.py) + + diff --git a/docs/modules/generic.md b/docs/modules/generic.md new file mode 100644 index 000000000..87d1209b7 --- /dev/null +++ b/docs/modules/generic.md @@ -0,0 +1,23 @@ +# Generic + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for running generic containers with various configurations and features. + +## Adding this module to your project dependencies + +Please run the following command to add the Generic module to your python dependencies: + +``` +pip install testcontainers[generic] +``` + +## Usage example + + + +[Creating a Generic container](../../modules/generic/example_basic.py) + + diff --git a/docs/modules/google.md b/docs/modules/google.md new file mode 100644 index 000000000..f228e6c99 --- /dev/null +++ b/docs/modules/google.md @@ -0,0 +1,23 @@ +# Google + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Google Cloud services. + +## Adding this module to your project dependencies + +Please run the following command to add the Google module to your python dependencies: + +```bash +pip install testcontainers[google] google-cloud-datastore google-cloud-pubsub +``` + +## Usage example + + + +[Creating a Google container](../../modules/google/example_basic.py) + + diff --git a/docs/modules/influxdb.md b/docs/modules/influxdb.md new file mode 100644 index 000000000..9541db7a4 --- /dev/null +++ b/docs/modules/influxdb.md @@ -0,0 +1,27 @@ +# InfluxDB + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for InfluxDB. + +## Adding this module to your project dependencies + +Please run the following command to add the InfluxDB module to your python dependencies: + +```bash +# For InfluxDB 1.x +pip install testcontainers[influxdb] influxdb + +# For InfluxDB 2.x +pip install testcontainers[influxdb] influxdb-client +``` + +## Usage example + + + +[Creating an InfluxDB container](../../modules/influxdb/example_basic.py) + + diff --git a/docs/modules/k3s.md b/docs/modules/k3s.md new file mode 100644 index 000000000..66d26d0fc --- /dev/null +++ b/docs/modules/k3s.md @@ -0,0 +1,23 @@ +# K3s + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for K3s. + +## Adding this module to your project dependencies + +Please run the following command to add the K3s module to your python dependencies: + +```bash +pip install testcontainers[k3s] kubernetes pyyaml +``` + +## Usage example + + + +[Creating a K3s container](../../modules/k3s/example_basic.py) + + diff --git a/docs/modules/kafka.md b/docs/modules/kafka.md new file mode 100644 index 000000000..3a206bb3d --- /dev/null +++ b/docs/modules/kafka.md @@ -0,0 +1,23 @@ +# Kafka + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Kafka. + +## Adding this module to your project dependencies + +Please run the following command to add the Kafka module to your python dependencies: + +```bash +pip install testcontainers[kafka] +``` + +## Usage example + + + +[Creating a Kafka container](../../modules/kafka/example_basic.py) + + diff --git a/docs/modules/keycloak.md b/docs/modules/keycloak.md new file mode 100644 index 000000000..98b638380 --- /dev/null +++ b/docs/modules/keycloak.md @@ -0,0 +1,23 @@ +# Keycloak + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Keycloak. + +## Adding this module to your project dependencies + +Please run the following command to add the Keycloak module to your python dependencies: + +```bash +pip install testcontainers[keycloak] python-keycloak requests +``` + +## Usage example + + + +[Creating a Keycloak container](../../modules/keycloak/example_basic.py) + + diff --git a/docs/modules/localstack.md b/docs/modules/localstack.md new file mode 100644 index 000000000..6c67d6696 --- /dev/null +++ b/docs/modules/localstack.md @@ -0,0 +1,23 @@ +# LocalStack + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for LocalStack. + +## Adding this module to your project dependencies + +Please run the following command to add the LocalStack module to your python dependencies: + +```bash +pip install testcontainers[localstack] boto3 +``` + +## Usage example + + + +[Creating a LocalStack container](../../modules/localstack/example_basic.py) + + diff --git a/docs/modules/mailpit.md b/docs/modules/mailpit.md new file mode 100644 index 000000000..ca7d49364 --- /dev/null +++ b/docs/modules/mailpit.md @@ -0,0 +1,23 @@ +# Mailpit + +Since testcontainers-python :material-tag: v4.7.1 + +## Introduction + +The Testcontainers module for Mailpit. + +## Adding this module to your project dependencies + +Please run the following command to add the Mailpit module to your python dependencies: + +```bash +pip install testcontainers[mailpit] cryptography +``` + +## Usage example + + + +[Creating a Mailpit container](../../modules/mailpit/example_basic.py) + + diff --git a/docs/modules/memcached.md b/docs/modules/memcached.md new file mode 100644 index 000000000..5d18fafc0 --- /dev/null +++ b/docs/modules/memcached.md @@ -0,0 +1,23 @@ +# Memcached + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for Memcached. + +## Adding this module to your project dependencies + +Please run the following command to add the Memcached module to your python dependencies: + +```bash +pip install testcontainers[memcached] pymemcache +``` + +## Usage example + + + +[Creating a Memcached container](../../modules/memcached/example_basic.py) + + diff --git a/docs/modules/milvus.md b/docs/modules/milvus.md new file mode 100644 index 000000000..9c7beda6d --- /dev/null +++ b/docs/modules/milvus.md @@ -0,0 +1,23 @@ +# Milvus + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for Milvus. + +## Adding this module to your project dependencies + +Please run the following command to add the Milvus module to your python dependencies: + +```bash +pip install testcontainers[milvus] requests +``` + +## Usage example + + + +[Creating a Milvus container](../../modules/milvus/example_basic.py) + + diff --git a/docs/modules/minio.md b/docs/modules/minio.md new file mode 100644 index 000000000..15ea1b6ef --- /dev/null +++ b/docs/modules/minio.md @@ -0,0 +1,23 @@ +# MinIO + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for MinIO. + +## Adding this module to your project dependencies + +Please run the following command to add the MinIO module to your python dependencies: + +```bash +pip install testcontainers[minio] minio requests +``` + +## Usage example + + + +[Creating a MinIO container](../../modules/minio/example_basic.py) + + diff --git a/docs/modules/mongodb.md b/docs/modules/mongodb.md new file mode 100644 index 000000000..0c2d2d75d --- /dev/null +++ b/docs/modules/mongodb.md @@ -0,0 +1,23 @@ +# MongoDB + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for MongoDB. + +## Adding this module to your project dependencies + +Please run the following command to add the MongoDB module to your python dependencies: + +```bash +pip install testcontainers[mongodb] pymongo +``` + +## Usage example + + + +[Creating a MongoDB container](../../modules/mongodb/example_basic.py) + + diff --git a/docs/modules/mqtt.md b/docs/modules/mqtt.md new file mode 100644 index 000000000..c290532fd --- /dev/null +++ b/docs/modules/mqtt.md @@ -0,0 +1,23 @@ +# MQTT + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for MQTT. + +## Adding this module to your project dependencies + +Please run the following command to add the MQTT module to your python dependencies: + +```bash +pip install testcontainers[mqtt] paho-mqtt +``` + +## Usage example + + + +[Creating an MQTT container](../../modules/mqtt/example_basic.py) + + diff --git a/docs/modules/mssql.md b/docs/modules/mssql.md new file mode 100644 index 000000000..effac8c75 --- /dev/null +++ b/docs/modules/mssql.md @@ -0,0 +1,23 @@ +# MSSQL + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Microsoft SQL Server. + +## Adding this module to your project dependencies + +Please run the following command to add the MSSQL module to your python dependencies: + +```bash +pip install testcontainers[mssql] pymssql +``` + +## Usage example + + + +[Creating an MSSQL container](../../modules/mssql/example_basic.py) + + diff --git a/docs/modules/mysql.md b/docs/modules/mysql.md new file mode 100644 index 000000000..e3ca18ae7 --- /dev/null +++ b/docs/modules/mysql.md @@ -0,0 +1,23 @@ +# MySQL + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for MySQL. + +## Adding this module to your project dependencies + +Please run the following command to add the MySQL module to your python dependencies: + +```bash +pip install testcontainers[mysql] sqlalchemy pymysql +``` + +## Usage example + + + +[Creating a MySQL container](../../modules/mysql/example_basic.py) + + diff --git a/docs/modules/nats.md b/docs/modules/nats.md new file mode 100644 index 000000000..e3616e490 --- /dev/null +++ b/docs/modules/nats.md @@ -0,0 +1,23 @@ +# NATS + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for NATS. + +## Adding this module to your project dependencies + +Please run the following command to add the NATS module to your python dependencies: + +```bash +pip install testcontainers[nats] nats-py +``` + +## Usage example + + + +[Creating a NATS container](../../modules/nats/example_basic.py) + + diff --git a/docs/modules/neo4j.md b/docs/modules/neo4j.md new file mode 100644 index 000000000..047dd1de3 --- /dev/null +++ b/docs/modules/neo4j.md @@ -0,0 +1,23 @@ +# Neo4j + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Neo4j. + +## Adding this module to your project dependencies + +Please run the following command to add the Neo4j module to your python dependencies: + +```bash +pip install testcontainers[neo4j] neo4j +``` + +## Usage example + + + +[Creating a Neo4j container](../../modules/neo4j/example_basic.py) + + diff --git a/docs/modules/nginx.md b/docs/modules/nginx.md new file mode 100644 index 000000000..6781c1a88 --- /dev/null +++ b/docs/modules/nginx.md @@ -0,0 +1,23 @@ +# Nginx + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Nginx. + +## Adding this module to your project dependencies + +Please run the following command to add the Nginx module to your python dependencies: + +```bash +pip install testcontainers[nginx] +``` + +## Usage example + + + +[Creating a Nginx container](../../modules/nginx/example_basic.py) + + diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md new file mode 100644 index 000000000..c9db6e14f --- /dev/null +++ b/docs/modules/ollama.md @@ -0,0 +1,23 @@ +# Ollama + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for Ollama. + +## Adding this module to your project dependencies + +Please run the following command to add the Ollama module to your python dependencies: + +```bash +pip install testcontainers[ollama] requests +``` + +## Usage example + + + +[Creating an Ollama container](../../modules/ollama/example_basic.py) + + diff --git a/docs/modules/opensearch.md b/docs/modules/opensearch.md new file mode 100644 index 000000000..d57ee45a7 --- /dev/null +++ b/docs/modules/opensearch.md @@ -0,0 +1,23 @@ +# OpenSearch + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for OpenSearch. + +## Adding this module to your project dependencies + +Please run the following command to add the OpenSearch module to your python dependencies: + +```bash +pip install testcontainers[opensearch] opensearch-py +``` + +## Usage example + + + +[Creating an OpenSearch container](../../modules/opensearch/example_basic.py) + + diff --git a/docs/modules/oracle-free.md b/docs/modules/oracle-free.md new file mode 100644 index 000000000..a0b68d18d --- /dev/null +++ b/docs/modules/oracle-free.md @@ -0,0 +1,23 @@ +# Oracle Free + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Oracle Free. + +## Adding this module to your project dependencies + +Please run the following command to add the Oracle Free module to your python dependencies: + +```bash +pip install testcontainers[oracle-free] +``` + +## Usage example + + + +[Creating an Oracle Free container](../../modules/oracle-free/example_basic.py) + + diff --git a/docs/modules/postgres.md b/docs/modules/postgres.md new file mode 100644 index 000000000..4b381753f --- /dev/null +++ b/docs/modules/postgres.md @@ -0,0 +1,23 @@ +# PostgreSQL + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for PostgreSQL. + +## Adding this module to your project dependencies + +Please run the following command to add the PostgreSQL module to your python dependencies: + +```bash +pip install testcontainers[postgres] sqlalchemy psycopg2 +``` + +## Usage example + + + +[Creating a PostgreSQL container](../../modules/postgres/example_basic.py) + + diff --git a/docs/modules/qdrant.md b/docs/modules/qdrant.md new file mode 100644 index 000000000..c4eb7310f --- /dev/null +++ b/docs/modules/qdrant.md @@ -0,0 +1,23 @@ +# Qdrant + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Qdrant. + +## Adding this module to your project dependencies + +Please run the following command to add the Qdrant module to your python dependencies: + +```bash +pip install testcontainers[qdrant] +``` + +## Usage example + + + +[Creating a Qdrant container](../../modules/qdrant/example_basic.py) + + diff --git a/docs/modules/rabbitmq.md b/docs/modules/rabbitmq.md new file mode 100644 index 000000000..850b2739f --- /dev/null +++ b/docs/modules/rabbitmq.md @@ -0,0 +1,23 @@ +# RabbitMQ + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for RabbitMQ. + +## Adding this module to your project dependencies + +Please run the following command to add the RabbitMQ module to your python dependencies: + +```bash +pip install testcontainers[rabbitmq] pika +``` + +## Usage example + + + +[Creating a RabbitMQ container](../../modules/rabbitmq/example_basic.py) + + diff --git a/docs/modules/redis.md b/docs/modules/redis.md new file mode 100644 index 000000000..16f8566e2 --- /dev/null +++ b/docs/modules/redis.md @@ -0,0 +1,23 @@ +# Redis + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Redis. + +## Adding this module to your project dependencies + +Please run the following command to add the Redis module to your python dependencies: + +```bash +pip install testcontainers[redis] redis +``` + +## Usage example + + + +[Creating a Redis container](../../modules/redis/example_basic.py) + + diff --git a/docs/modules/registry.md b/docs/modules/registry.md new file mode 100644 index 000000000..b00380d8c --- /dev/null +++ b/docs/modules/registry.md @@ -0,0 +1,23 @@ +# Registry + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Registry. + +## Adding this module to your project dependencies + +Please run the following command to add the Registry module to your python dependencies: + +```bash +pip install testcontainers[registry] +``` + +## Usage example + + + +[Creating a Registry container](../../modules/registry/example_basic.py) + + diff --git a/docs/modules/scylla.md b/docs/modules/scylla.md new file mode 100644 index 000000000..c1001a425 --- /dev/null +++ b/docs/modules/scylla.md @@ -0,0 +1,23 @@ +# Scylla + +Since testcontainers-python :material-tag: v4.8.0 + +## Introduction + +The Testcontainers module for Scylla. + +## Adding this module to your project dependencies + +Please run the following command to add the Scylla module to your python dependencies: + +```bash +pip install testcontainers[scylla] +``` + +## Usage example + + + +[Creating a Scylla container](../../modules/scylla/example_basic.py) + + diff --git a/docs/modules/selenium.md b/docs/modules/selenium.md new file mode 100644 index 000000000..68b6174a7 --- /dev/null +++ b/docs/modules/selenium.md @@ -0,0 +1,23 @@ +# Selenium + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Selenium. + +## Adding this module to your project dependencies + +Please run the following command to add the Selenium module to your python dependencies: + +```bash +pip install testcontainers[selenium] selenium urllib3 +``` + +## Usage example + + + +[Creating a Selenium container](../../modules/selenium/example_basic.py) + + diff --git a/docs/modules/sftp.md b/docs/modules/sftp.md new file mode 100644 index 000000000..8fe7ecc5c --- /dev/null +++ b/docs/modules/sftp.md @@ -0,0 +1,23 @@ +# SFTP + +Since testcontainers-python :material-tag: v4.7.1 + +## Introduction + +The Testcontainers module for SFTP. + +## Adding this module to your project dependencies + +Please run the following command to add the SFTP module to your python dependencies: + +```bash +pip install testcontainers[sftp] paramiko cryptography +``` + +## Usage example + + + +[Creating an SFTP container](../../modules/sftp/example_basic.py) + + diff --git a/docs/modules/test_module_import.md b/docs/modules/test_module_import.md new file mode 100644 index 000000000..ed5472756 --- /dev/null +++ b/docs/modules/test_module_import.md @@ -0,0 +1,100 @@ +# Test Module Import + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for testing Python module imports and package management. This module provides a containerized environment for testing various aspects of Python module imports, including: + +- Basic module and package imports +- Module reloading +- Version-specific imports +- Dependencies and environment variables +- Advanced features like custom loaders and namespace packages + +## Adding this module to your project dependencies + +Please run the following command to add the Test Module Import module to your python dependencies: + +``` +pip install testcontainers[test_module_import] +``` + +## Usage examples + +The module provides several examples demonstrating different use cases: + +### Basic Module Imports + +This example demonstrates the fundamental capabilities of the TestModuleImportContainer: + +- Importing a basic Python module and accessing its attributes +- Importing and using submodules +- Importing and working with packages +- Proper cleanup of imported modules + + + +[Basic module imports](../../modules/test_module_import/examples/01_basic_import.py) + + + +### Module Reloading + +This example shows how to work with module reloading functionality: + +- Importing a module and accessing its initial state +- Reloading the module to pick up changes +- Handling reloading errors gracefully +- Managing module state during reloads + + + +[Module reloading](../../modules/test_module_import/examples/02_module_reloading.py) + + + +### Version-Specific Imports + +This example demonstrates handling version-specific module imports: + +- Importing specific versions of modules +- Managing version compatibility +- Accessing and verifying version information +- Working with version-specific features + + + +[Version-specific imports](../../modules/test_module_import/examples/03_version_specific.py) + + + +### Dependencies and Environment Variables + +This example shows how to handle module dependencies and environment requirements: + +- Importing modules with external dependencies +- Managing required dependency versions +- Setting up and accessing environment variables +- Handling environment-specific configurations + + + +[Dependencies and environment variables](../../modules/test_module_import/examples/04_dependencies_and_env.py) + + + +### Advanced Features + +This example demonstrates advanced module import capabilities: + +- Using custom module loaders for specialized import scenarios +- Working with namespace packages +- Managing entry points +- Handling complex module configurations + + + +[Advanced features](../../modules/test_module_import/examples/05_advanced_features.py) + + diff --git a/docs/modules/trino.md b/docs/modules/trino.md new file mode 100644 index 000000000..3ceda1445 --- /dev/null +++ b/docs/modules/trino.md @@ -0,0 +1,23 @@ +# Trino + +Since testcontainers-python :material-tag: v4.7.2 + +## Introduction + +The Testcontainers module for Trino. + +## Adding this module to your project dependencies + +Please run the following command to add the Trino module to your python dependencies: + +```bash +pip install testcontainers[trino] trino +``` + +## Usage example + + + +[Creating a Trino container](../../modules/trino/example_basic.py) + + diff --git a/docs/modules/vault.md b/docs/modules/vault.md new file mode 100644 index 000000000..7dc4d1260 --- /dev/null +++ b/docs/modules/vault.md @@ -0,0 +1,23 @@ +# Vault + +Since testcontainers-python :material-tag: v4.7.0 + +## Introduction + +The Testcontainers module for Vault. + +## Adding this module to your project dependencies + +Please run the following command to add the Vault module to your python dependencies: + +```bash +pip install testcontainers[vault] hvac +``` + +## Usage example + + + +[Creating a Vault container](../../modules/vault/example_basic.py) + + diff --git a/docs/modules/weaviate.md b/docs/modules/weaviate.md new file mode 100644 index 000000000..90fec975a --- /dev/null +++ b/docs/modules/weaviate.md @@ -0,0 +1,23 @@ +# Weaviate + +Since testcontainers-python :material-tag: v4.6.0 + +## Introduction + +The Testcontainers module for Weaviate. + +## Adding this module to your project dependencies + +Please run the following command to add the Weaviate module to your python dependencies: + +```bash +pip install testcontainers[weaviate] weaviate-client +``` + +## Usage example + + + +[Creating a Weaviate container](../../modules/weaviate/example_basic.py) + + diff --git a/docs/poetry.lock b/docs/poetry.lock new file mode 100644 index 000000000..bb4f10cd7 --- /dev/null +++ b/docs/poetry.lock @@ -0,0 +1,829 @@ +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. + +[[package]] +name = "babel" +version = "2.17.0" +description = "Internationalization utilities" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2"}, + {file = "babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d"}, +] + +[package.extras] +dev = ["backports.zoneinfo ; python_version < \"3.9\"", "freezegun (>=1.0,<2.0)", "jinja2 (>=3.0)", "pytest (>=6.0)", "pytest-cov", "pytz", "setuptools", "tzdata ; sys_platform == \"win32\""] + +[[package]] +name = "backrefs" +version = "5.8" +description = "A wrapper around re and regex that adds additional back references." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "backrefs-5.8-py310-none-any.whl", hash = "sha256:c67f6638a34a5b8730812f5101376f9d41dc38c43f1fdc35cb54700f6ed4465d"}, + {file = "backrefs-5.8-py311-none-any.whl", hash = "sha256:2e1c15e4af0e12e45c8701bd5da0902d326b2e200cafcd25e49d9f06d44bb61b"}, + {file = "backrefs-5.8-py312-none-any.whl", hash = "sha256:bbef7169a33811080d67cdf1538c8289f76f0942ff971222a16034da88a73486"}, + {file = "backrefs-5.8-py313-none-any.whl", hash = "sha256:e3a63b073867dbefd0536425f43db618578528e3896fb77be7141328642a1585"}, + {file = "backrefs-5.8-py39-none-any.whl", hash = "sha256:a66851e4533fb5b371aa0628e1fee1af05135616b86140c9d787a2ffdf4b8fdc"}, + {file = "backrefs-5.8.tar.gz", hash = "sha256:2cab642a205ce966af3dd4b38ee36009b31fa9502a35fd61d59ccc116e40a6bd"}, +] + +[package.extras] +extras = ["regex"] + +[[package]] +name = "bracex" +version = "2.5.post1" +description = "Bash style brace expander." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "bracex-2.5.post1-py3-none-any.whl", hash = "sha256:13e5732fec27828d6af308628285ad358047cec36801598368cb28bc631dbaf6"}, + {file = "bracex-2.5.post1.tar.gz", hash = "sha256:12c50952415bfa773d2d9ccb8e79651b8cdb1f31a42f6091b804f6ba2b4a66b6"}, +] + +[[package]] +name = "certifi" +version = "2025.4.26" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, + {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, + {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, + {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, + {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, + {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, + {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, + {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, + {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, + {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, + {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, +] + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version == \"3.9\"" +files = [ + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "markdown" +version = "3.8" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, + {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.6.1" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, + {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-codeinclude-plugin" +version = "0.2.1" +description = "A plugin to include code snippets into mkdocs pages" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "mkdocs-codeinclude-plugin-0.2.1.tar.gz", hash = "sha256:305387f67a885f0e36ec1cf977324fe1fe50d31301147194b63631d0864601b1"}, + {file = "mkdocs_codeinclude_plugin-0.2.1-py3-none-any.whl", hash = "sha256:172a917c9b257fa62850b669336151f85d3cd40312b2b52520cbcceab557ea6c"}, +] + +[package.dependencies] +mkdocs = ">=1.2" +pygments = ">=2.9.0" + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-include-markdown-plugin" +version = "7.1.5" +description = "Mkdocs Markdown includer plugin." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "mkdocs_include_markdown_plugin-7.1.5-py3-none-any.whl", hash = "sha256:d0b96edee45e7fda5eb189e63331cfaf1bf1fbdbebbd08371f1daa77045d3ae9"}, + {file = "mkdocs_include_markdown_plugin-7.1.5.tar.gz", hash = "sha256:a986967594da6789226798e3c41c70bc17130fadb92b4313f42bd3defdac0adc"}, +] + +[package.dependencies] +mkdocs = ">=1.4" +wcmatch = "*" + +[package.extras] +cache = ["platformdirs"] + +[[package]] +name = "mkdocs-markdownextradata-plugin" +version = "0.2.6" +description = "A MkDocs plugin that injects the mkdocs.yml extra variables into the markdown template" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mkdocs_markdownextradata_plugin-0.2.6-py3-none-any.whl", hash = "sha256:34dd40870781784c75809596b2d8d879da783815b075336d541de1f150c94242"}, + {file = "mkdocs_markdownextradata_plugin-0.2.6.tar.gz", hash = "sha256:4aed9b43b8bec65b02598387426ca4809099ea5f5aa78bf114f3296fd46686b5"}, +] + +[package.dependencies] +mkdocs = "*" +pyyaml = "*" + +[[package]] +name = "mkdocs-material" +version = "9.6.13" +description = "Documentation that simply works" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_material-9.6.13-py3-none-any.whl", hash = "sha256:3730730314e065f422cc04eacbc8c6084530de90f4654a1482472283a38e30d3"}, + {file = "mkdocs_material-9.6.13.tar.gz", hash = "sha256:7bde7ebf33cfd687c1c86c08ed8f6470d9a5ba737bd89e7b3e5d9f94f8c72c16"}, +] + +[package.dependencies] +babel = ">=2.10,<3.0" +backrefs = ">=5.7.post1,<6.0" +colorama = ">=0.4,<1.0" +jinja2 = ">=3.1,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.6,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<3)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "paginate" +version = "0.5.7" +description = "Divides large result sets into pages for easier browsing" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591"}, + {file = "paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945"}, +] + +[package.extras] +dev = ["pytest", "tox"] +lint = ["black"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, + {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pygments" +version = "2.19.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymdown-extensions" +version = "10.15" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pymdown_extensions-10.15-py3-none-any.whl", hash = "sha256:46e99bb272612b0de3b7e7caf6da8dd5f4ca5212c0b273feb9304e236c484e5f"}, + {file = "pymdown_extensions-10.15.tar.gz", hash = "sha256:0e5994e32155f4b03504f939e501b981d306daf7ec2aa1cd2eb6bd300784f8f7"}, +] + +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.19.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "pyyaml-env-tag" +version = "1.0" +description = "A custom YAML tag for referencing environment variables in YAML files." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyyaml_env_tag-1.0-py3-none-any.whl", hash = "sha256:37f081041b8dca44ed8eb931ce0056f97de17251450f0ed08773dc2bcaf9e683"}, + {file = "pyyaml_env_tag-1.0.tar.gz", hash = "sha256:bc952534a872b583f66f916e2dd83e7a7b9087847f4afca6d9c957c48b258ed2"}, +] + +[package.dependencies] +pyyaml = "*" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "wcmatch" +version = "10.0" +description = "Wildcard/glob file name matcher." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wcmatch-10.0-py3-none-any.whl", hash = "sha256:0dd927072d03c0a6527a20d2e6ad5ba8d0380e60870c383bc533b71744df7b7a"}, + {file = "wcmatch-10.0.tar.gz", hash = "sha256:e72f0de09bba6a04e0de70937b0cf06e55f36f37b3deb422dfaf854b867b840a"}, +] + +[package.dependencies] +bracex = ">=2.1.1" + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version == \"3.9\"" +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.9,<4.0" +content-hash = "3f78e6a27c59513925f871a6a742fd5dc51049c5624a5dfe1377235cc2b7bda1" diff --git a/docs/pyproject.toml b/docs/pyproject.toml new file mode 100644 index 000000000..565d77714 --- /dev/null +++ b/docs/pyproject.toml @@ -0,0 +1,25 @@ +[tool.poetry] +name = "testcontainers-docs" +version = "0.1.0" +description = "Documentation site for testcontainers-python" +authors = ["Sergey Pirogov "] +maintainers = [ + "Balint Bartha ", + "David Ankin ", + "Vemund Santi ", +] + +[tool.poetry.dependencies] +python = ">=3.9,<4.0" +mkdocs = "^1.5.3" +mkdocs-material = "^9.5.0" +mkdocs-markdownextradata-plugin = "^0.2.6" +mkdocs-codeinclude-plugin = "^0.2.1" +mkdocs-include-markdown-plugin = "^7.1.5" + +[tool.poetry.requires-plugins] +poetry-plugin-export = ">=1.8" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 000000000..83b0454ff --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,92 @@ +_Testcontainers for Python_ integrates seamlessly with Python testing frameworks like [pytest](https://docs.pytest.org/en/stable/). + +It's ideal for integration and end-to-end tests, allowing you to easily manage dependencies using Docker. + +## 1. System requirements + +Before you begin, review the [system requirements](system_requirements/index.md). + +## 2. Install _Testcontainers for Python_ + +Install testcontainers-python with pip: + +```bash +pip install testcontainers +``` + +## 3. Spin up Redis + +```python +import pytest +from testcontainers.redis import RedisContainer +import redis + +def test_with_redis(): + with RedisContainer() as redis_container: + # Get connection parameters + host = redis_container.get_container_host_ip() + port = redis_container.get_exposed_port(redis_container.port) + + # Create Redis client + client = redis.Redis(host=host, port=port, decode_responses=True) + + # Test Redis connection + client.set("test_key", "Hello, Redis!") + value = client.get("test_key") + assert value == "Hello, Redis!" +``` + +The `RedisContainer` class makes it easy to start a Redis container for testing: + +- The container starts automatically when entering the context manager (`with` statement). +- It stops and removes itself when exiting the context. +- `get_container_host_ip()` returns the host IP. +- `get_exposed_port()` returns the mapped host port. + +When using `get_exposed_port()`, think of it as running `docker run -p `. `dockerd` maps the container's internal port to a random available port on your host. + +In the example above, the default Redis port (6379) is exposed for TCP traffic. This setup allows your code to connect to Redis outside the container and supports parallel test execution. Each test gets its own Redis container on a unique, random port. + +The context manager (`with` statement) ensures containers are cleaned up after tests, so no containers are left running. + +!!!tip + + See [the garbage collector](features/garbage_collector.md) for another way to clean up resources. + +## 4. Connect your code to the container + +Typically, Python applications use the [redis-py](https://github.com/redis/redis-py) client. The following code retrieves the endpoint from the container and configures the client. + +```python +def test_redis_operations(): + with RedisContainer() as redis_container: + # Get connection parameters + host = redis_container.get_container_host_ip() + port = redis_container.get_exposed_port(redis_container.port) + + # Create Redis client + client = redis.Redis(host=host, port=port, decode_responses=True) + + # Test various Redis operations + # String operations + client.set("greeting", "Hello, Redis!") + value = client.get("greeting") + assert value == "Hello, Redis!" + + # List operations + client.lpush("tasks", "task1", "task2", "task3") + tasks = client.lrange("tasks", 0, -1) + assert tasks == ["task3", "task2", "task1"] +``` + +## 5. Run the test + +You can run the test via `pytest`: + +```bash +pytest test_redis.py +``` + +## 6. Want to go deeper with Redis? + +You can find a more elaborated Redis example in our [examples section](./modules/redis.md). diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..a46ce6fff --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,36 @@ +babel==2.17.0 ; python_version >= "3.9" and python_version < "4.0" +backrefs==5.8 ; python_version >= "3.9" and python_version < "4.0" +bracex==2.5.post1 ; python_version >= "3.9" and python_version < "4.0" +certifi==2025.4.26 ; python_version >= "3.9" and python_version < "4.0" +charset-normalizer==3.4.2 ; python_version >= "3.9" and python_version < "4.0" +click==8.1.8 ; python_version >= "3.9" and python_version < "4.0" +colorama==0.4.6 ; python_version >= "3.9" and python_version < "4.0" +ghp-import==2.1.0 ; python_version >= "3.9" and python_version < "4.0" +idna==3.10 ; python_version >= "3.9" and python_version < "4.0" +importlib-metadata==8.7.0 ; python_version == "3.9" +jinja2==3.1.6 ; python_version >= "3.9" and python_version < "4.0" +markdown==3.8 ; python_version >= "3.9" and python_version < "4.0" +markupsafe==3.0.2 ; python_version >= "3.9" and python_version < "4.0" +mergedeep==1.3.4 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-codeinclude-plugin==0.2.1 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-get-deps==0.2.0 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-include-markdown-plugin==7.1.5 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-markdownextradata-plugin==0.2.6 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-material-extensions==1.3.1 ; python_version >= "3.9" and python_version < "4.0" +mkdocs-material==9.6.13 ; python_version >= "3.9" and python_version < "4.0" +mkdocs==1.6.1 ; python_version >= "3.9" and python_version < "4.0" +packaging==25.0 ; python_version >= "3.9" and python_version < "4.0" +paginate==0.5.7 ; python_version >= "3.9" and python_version < "4.0" +pathspec==0.12.1 ; python_version >= "3.9" and python_version < "4.0" +platformdirs==4.3.8 ; python_version >= "3.9" and python_version < "4.0" +pygments==2.19.1 ; python_version >= "3.9" and python_version < "4.0" +pymdown-extensions==10.15 ; python_version >= "3.9" and python_version < "4.0" +python-dateutil==2.9.0.post0 ; python_version >= "3.9" and python_version < "4.0" +pyyaml-env-tag==1.0 ; python_version >= "3.9" and python_version < "4.0" +pyyaml==6.0.2 ; python_version >= "3.9" and python_version < "4.0" +requests==2.32.3 ; python_version >= "3.9" and python_version < "4.0" +six==1.17.0 ; python_version >= "3.9" and python_version < "4.0" +urllib3==2.4.0 ; python_version >= "3.9" and python_version < "4.0" +watchdog==6.0.0 ; python_version >= "3.9" and python_version < "4.0" +wcmatch==10.0 ; python_version >= "3.9" and python_version < "4.0" +zipp==3.21.0 ; python_version == "3.9" diff --git a/docs/system_requirements/docker.md b/docs/system_requirements/docker.md new file mode 100644 index 000000000..7bc3c55de --- /dev/null +++ b/docs/system_requirements/docker.md @@ -0,0 +1,11 @@ +# General Docker requirements + +Testcontainers requires a Docker-API compatible container runtime. +During development, Testcontainers is actively tested against recent versions of Docker on Linux, as well as against Docker Desktop on Mac and Windows. +These Docker environments are automatically detected and used by Testcontainers without any additional configuration being necessary. + +It is possible to configure Testcontainers to work for other Docker setups, such as a remote Docker host or Docker alternatives. +However, these are not actively tested in the main development workflow, so not all Testcontainers features might be available and additional manual configuration might be necessary. Please see the [Docker host detection](../features/configuration.md#docker-host-detection) section for more information. + +If you have further questions about configuration details for your setup or whether it supports running Testcontainers-based tests, +please contact the Testcontainers team and other users from the Testcontainers community on [Slack](https://slack.testcontainers.org/). diff --git a/docs/system_requirements/index.md b/docs/system_requirements/index.md new file mode 100644 index 000000000..74e0464da --- /dev/null +++ b/docs/system_requirements/index.md @@ -0,0 +1,183 @@ +# Python versions + +The library supports Python >= 3.9, < 4.0. + +## Updating your Python version + +There are several common approaches for managing and isolating your Python environment when using Testcontainers (or any Python project). Each has its own trade-offs in terms of reproducibility, ease of use, and integration with tooling: + +### venv (built-in virtual environments) + +#### What it is + +Python’s built-in way to create lightweight environments. + +#### How to use + +```bash +python3 -m venv .venv # create an env in “.venv” +source .venv/bin/activate # on Unix/macOS +.venv\Scripts\activate # on Windows +pip install -r requirements.txt +``` + +| Pros | Cons | +| ----------------------------------------- | -------------------------------------------------- | +| No extra dependencies | You still manage `requirements.txt` by hand | +| Very lightweight | Doesn’t provide lockfiles or dependency resolution | +| Works everywhere Python 3.3+ is installed | | + +### virtualenv (stand-alone) + +#### What it is + +A more mature alternative to venv, sometimes faster and with more features. + +#### How to use + +```bash +pip install virtualenv +virtualenv .env +source .env/bin/activate +pip install -r requirements.txt +``` + +| Pros | Cons | +| --------------------------------------------------------------------------- | ---------------------------------------------------- | +| Slightly more flexible than `venv` (e.g. can target different interpreters) | Still manual management of versions and dependencies | + +### pipenv + +#### What it is + +A higher-level tool combining environment creation with Pipfile dependency management. + +#### How to use + +```bash +pip install pipenv +pipenv install --dev testcontainers +pipenv shell +``` + +Dependencies live in Pipfile; exact versions locked in Pipfile.lock. + +| Pros | Cons | +| ----------------------------------------- | --------------------------------------------------- | +| Automatic creation of a virtualenv | Can be slower, historically some performance quirks | +| Lockfile for reproducible installs | | +| `pipenv run …` to avoid activating shells | | + +### poetry + +#### What it is + +A modern dependency manager and packaging tool, with built-in virtualenv support. + +#### How to use + +```bash +curl -sSL https://install.python-poetry.org | python3 - +poetry init # walk you through pyproject.toml creation +poetry add --dev testcontainers +poetry shell +``` + +Your Python version constraints and dependencies are in pyproject.toml; lockfile is poetry.lock. + +| Pros | Cons | +| --------------------------------------------------- | ----------------------------------------------------- | +| Elegant TOML-based config | A bit of a learning curve if you’re used to plain Pip | +| Creates truly reproducible environments | | +| Supports publishing packages to PyPI out of the box | | + +### conda / mamba + +#### What it is + +Cross-language environment and package manager (Python/R/C++). + +#### How to use + +```bash +conda create -n tc-env python=3.10 +conda activate tc-env +conda install pip +pip install testcontainers +``` + +Or with Mamba for faster solves: + +```bash +mamba install pip +mamba install -c conda-forge testcontainers +``` + +| Pros | Cons | +| --------------------------------------------------------------- | --------------------------- | +| Manages non-Python dependencies easily (e.g., system libraries) | Larger disk footprint | +| Reproducible YAML environment files (`environment.yml`) | Less “pure” Python workflow | + +### Docker-based environments + +#### What it is + +Run your tests inside a Docker image, so everything (even Python itself) is containerized. + +#### How to use + +```bash +FROM python:3.10-slim +WORKDIR /app +COPY pyproject.toml poetry.lock ./ +RUN pip install poetry && poetry install --no-root +COPY . . +CMD ["pytest", "--maxfail=1", "--disable-warnings", "-q"] +``` + +| Pros | Cons | +| ---------------------------------------------------- | --------------------------------------------------- | +| True isolation from host machine (including OS libs) | Slower startup/testing cycle | +| Easy to share exact environment via Dockerfile | Extra complexity if you’re not already Docker-savvy | + +### tox for multi-env testing + +#### What it is + +A tool to automate testing across multiple Python versions/environments. + +#### How to use + +```bash +# tox.ini + +[tox] +envlist = py39,py310,py311 + +[testenv] +deps = pytest +testcontainers +commands = pytest +``` + +| Pros | Cons | +| --------------------------------------------------------- | ---------------------------- | +| Ensures compatibility across multiple Python interpreters | Adds another layer of config | +| Isolates each test run in its own venv | | + +## Choosing the Right Tool + +| Tool | Lockfile? | Built-in Env | Cross-Platform | Non-Python Deps | Reproducibility | +| ------------ | --------- | ------------ | -------------- | --------------- | --------------- | +| `venv` | No | Yes | Yes | No | Low | +| `virtualenv` | No | Yes | Yes | No | Low | +| `pipenv` | Yes | Yes | Yes | No | Medium | +| `poetry` | Yes | Yes | Yes | No | High | +| `conda` | Yes (YML) | Yes | Yes | Yes | High | +| Docker | – | Container | Yes | Yes | Very High | + +## Next Steps + +With any of these, once your environment is set up you can simply `pip install testcontainers` (or use Poetry’s `poetry add --dev testcontainers`) and begin writing your container-backed tests in Python. + +See the [General Docker Requirements](docker.md) to continue diff --git a/docs/testcontainers-logo.svg b/docs/testcontainers-logo.svg new file mode 100644 index 000000000..cc5fb6188 --- /dev/null +++ b/docs/testcontainers-logo.svg @@ -0,0 +1,22 @@ + + + Testcontainers + + + + + + + + + + + + + + + + + + + diff --git a/docs/theme/main.html b/docs/theme/main.html new file mode 100644 index 000000000..1c0823892 --- /dev/null +++ b/docs/theme/main.html @@ -0,0 +1,10 @@ +{% extends "base.html" %} + +{% block analytics %} + +{% endblock %} + +{% block extrahead %} + + +{% endblock %} diff --git a/docs/theme/partials/header.html b/docs/theme/partials/header.html new file mode 100644 index 000000000..2c59cbb3d --- /dev/null +++ b/docs/theme/partials/header.html @@ -0,0 +1,140 @@ + + + +{% set class = "md-header" %} {% if "navigation.tabs.sticky" in features %} {% +set class = class ~ " md-header--shadow md-header--lifted" %} {% elif +"navigation.tabs" not in features %} {% set class = class ~ " md-header--shadow" +%} {% endif %} {% include "partials/tc-header.html" %} + + +
+ + + + {% if "navigation.tabs.sticky" in features %} {% if "navigation.tabs" in + features %} {% include "partials/tabs.html" %} {% endif %} {% endif %} +
diff --git a/docs/theme/partials/nav.html b/docs/theme/partials/nav.html new file mode 100644 index 000000000..90dcdc2ef --- /dev/null +++ b/docs/theme/partials/nav.html @@ -0,0 +1,79 @@ + + + +{% import "partials/nav-item.html" as item with context %} +{% set class = "md-nav md-nav--primary" %} +{% if "navigation.tabs" in features %} +{% set class = class ~ " md-nav--lifted" %} +{% endif %} +{% if "toc.integrate" in features %} +{% set class = class ~ " md-nav--integrated" %} +{% endif %} + + + diff --git a/docs/theme/partials/tc-header.html b/docs/theme/partials/tc-header.html new file mode 100644 index 000000000..246e9ff52 --- /dev/null +++ b/docs/theme/partials/tc-header.html @@ -0,0 +1,157 @@ +{% set header = ({ + "siteUrl": "https://testcontainers.com/", + "menuItems": [ + { + "label": "Desktop NEW", + "url": "https://testcontainers.com/desktop/" + }, + { + "label": "Cloud", + "url": "https://testcontainers.com/cloud/" + }, + { + "label": "Getting Started", + "url": "https://testcontainers.com/getting-started/" + }, + { + "label": "Guides", + "url": "https://testcontainers.com/guides/" + }, + { + "label": "Modules", + "url": "https://testcontainers.com/modules/" + }, + { + "label": "Docs", + "children": [ + { + "label": "Testcontainers for Java", + "url": "https://java.testcontainers.org/", + "image": "/language-logos/java.svg", + }, + { + "label": "Testcontainers for Go", + "url": "https://golang.testcontainers.org/", + "image": "/language-logos/go.svg", + }, + { + "label": "Testcontainers for .NET", + "url": "https://dotnet.testcontainers.org/", + "image": "/language-logos/dotnet.svg", + }, + { + "label": "Testcontainers for Node.js", + "url": "https://node.testcontainers.org/", + "image": "/language-logos/nodejs.svg", + }, + { + "label": "Testcontainers for Python", + "url": "https://testcontainers-python.readthedocs.io/en/latest/", + "image": "/language-logos/python.svg", + "external": true, + }, + { + "label": "Testcontainers for Rust", + "url": "https://docs.rs/testcontainers/latest/testcontainers/", + "image": "/language-logos/rust.svg", + "external": true, + }, + { + "label": "Testcontainers for Haskell", + "url": "https://github.com/testcontainers/testcontainers-hs", + "image": "/language-logos/haskell.svg", + "external": true, + }, + { + "label": "Testcontainers for Ruby", + "url": "https://github.com/testcontainers/testcontainers-ruby", + "image": "/language-logos/ruby.svg", + "external": true, + }, + ] + }, + { + "label": "Slack", + "url": "https://slack.testcontainers.org/", + "icon": "icon-slack", + }, + { + "label": "GitHub", + "url": "https://github.com/testcontainers", + "icon": "icon-github", + }, + ] +}) %} + + + + + + + + + + + diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..aca8281b7 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,105 @@ +site_name: Testcontainers for Python +site_url: https://python.testcontainers.org +plugins: + - search + - codeinclude + - include-markdown + - markdownextradata +theme: + name: material + custom_dir: docs/theme + palette: + scheme: testcontainers + font: + text: Roboto + code: Roboto Mono + logo: logo.svg + favicon: favicon.ico +extra_css: + - "css/extra.css" + - "css/tc-header.css" +repo_name: "testcontainers-python" +repo_url: "https://github.com/testcontainers/testcontainers-python" +markdown_extensions: + - admonition + - codehilite: + linenums: false + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.snippets + - toc: + permalink: true + - attr_list + - pymdownx.emoji: + emoji_generator: !!python/name:material.extensions.emoji.to_svg + emoji_index: !!python/name:material.extensions.emoji.twemoji +nav: + - Home: index.md + - Quickstart: quickstart.md + - Features: + - Creating Containers: features/creating_container.md + - Configuration: features/configuration.md + - Authentication: features/authentication.md + - Executing Commands: features/executing_commands.md + - Container Logs: features/container_logs.md + - Building Images: features/building_images.md + - Copying Data: features/copying_data.md + - Wait Strategies: features/wait_strategies.md + - Docker Compose: features/docker_compose.md + - Networking: features/networking.md + - Garbage Collector: features/garbage_collector.md + - Advanced Features: features/advanced_features.md + - Modules: + - Databases: + - modules/arangodb.md + - modules/cassandra.md + - modules/chroma.md + - modules/clickhouse.md + - modules/cockroachdb.md + - modules/cosmosdb.md + - modules/db2.md + - modules/elasticsearch.md + - modules/influxdb.md + - modules/mongodb.md + - modules/mssql.md + - modules/mysql.md + - modules/neo4j.md + - modules/opensearch.md + - modules/oracle-free.md + - modules/postgres.md + - modules/qdrant.md + - modules/redis.md + - modules/scylla.md + - modules/trino.md + - modules/weaviate.md + - modules/aws.md + - modules/azurite.md + - modules/generic.md + - modules/google.md + - modules/k3s.md + - modules/keycloak.md + - modules/kafka.md + - modules/localstack.md + - modules/mailpit.md + - modules/memcached.md + - modules/milvus.md + - modules/minio.md + - modules/mqtt.md + - modules/nats.md + - modules/nginx.md + - modules/ollama.md + - modules/rabbitmq.md + - modules/registry.md + - modules/selenium.md + - modules/sftp.md + - modules/test_module_import.md + - modules/vault.md + - System Requirements: + - system_requirements/index.md + - system_requirements/docker.md + - Contributing: contributing.md + - Getting Help: getting_help.md +edit_uri: edit/main/docs/ +extra: + latest_version: 4.10.0 diff --git a/modules/arangodb/example_basic.py b/modules/arangodb/example_basic.py new file mode 100644 index 000000000..e75467610 --- /dev/null +++ b/modules/arangodb/example_basic.py @@ -0,0 +1,91 @@ +import json + +from arango import ArangoClient + +from testcontainers.arangodb import ArangoDbContainer + + +def basic_example(): + with ArangoDbContainer() as arango: + # Get connection parameters + host = arango.get_container_host_ip() + port = arango.get_exposed_port(arango.port) + username = arango.username + password = arango.password + + # Create ArangoDB client + client = ArangoClient(hosts=f"http://{host}:{port}") + db = client.db("_system", username=username, password=password) + print("Connected to ArangoDB") + + # Create a test database + db_name = "test_db" + if not db.has_database(db_name): + db.create_database(db_name) + print(f"Created database: {db_name}") + + # Switch to test database + test_db = client.db(db_name, username=username, password=password) + + # Create a test collection + collection_name = "test_collection" + if not test_db.has_collection(collection_name): + test_db.create_collection(collection_name) + print(f"Created collection: {collection_name}") + + collection = test_db.collection(collection_name) + + # Insert test documents + test_docs = [ + {"_key": "1", "name": "test1", "value": 100, "category": "A"}, + {"_key": "2", "name": "test2", "value": 200, "category": "B"}, + {"_key": "3", "name": "test3", "value": 300, "category": "A"}, + ] + + collection.import_bulk(test_docs) + print("Inserted test documents") + + # Query documents + cursor = test_db.aql.execute(""" + FOR doc IN test_collection + FILTER doc.category == "A" + RETURN doc + """) + + print("\nQuery results:") + for doc in cursor: + print(json.dumps(doc, indent=2)) + + # Execute a more complex query + cursor = test_db.aql.execute(""" + FOR doc IN test_collection + COLLECT category = doc.category + AGGREGATE + count = COUNT(1), + avg_value = AVG(doc.value), + min_value = MIN(doc.value), + max_value = MAX(doc.value) + RETURN { + category: category, + count: count, + avg_value: avg_value, + min_value: min_value, + max_value: max_value + } + """) + + print("\nAggregation results:") + for result in cursor: + print(json.dumps(result, indent=2)) + + # Get collection info + collection_info = collection.properties() + print("\nCollection properties:") + print(f"Name: {collection_info['name']}") + print(f"Type: {collection_info['type']}") + print(f"Status: {collection_info['status']}") + print(f"Count: {collection.count()}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/aws/example_basic.py b/modules/aws/example_basic.py new file mode 100644 index 000000000..64410ed23 --- /dev/null +++ b/modules/aws/example_basic.py @@ -0,0 +1,117 @@ +import json +from datetime import datetime + +import boto3 + +from testcontainers.aws import AwsContainer + + +def basic_example(): + with AwsContainer() as aws: + # Get connection parameters + host = aws.get_container_host_ip() + port = aws.get_exposed_port(aws.port) + access_key = aws.access_key + secret_key = aws.secret_key + region = aws.region + + # Initialize AWS clients + s3 = boto3.client( + "s3", + endpoint_url=f"http://{host}:{port}", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region, + ) + + dynamodb = boto3.resource( + "dynamodb", + endpoint_url=f"http://{host}:{port}", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region, + ) + + sqs = boto3.client( + "sqs", + endpoint_url=f"http://{host}:{port}", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region, + ) + + print("Connected to AWS services") + + # Test S3 + bucket_name = f"test-bucket-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}" + s3.create_bucket(Bucket=bucket_name) + print(f"\nCreated S3 bucket: {bucket_name}") + + # Upload a file + s3.put_object(Bucket=bucket_name, Key="test.txt", Body="Hello, S3!") + print("Uploaded test file") + + # List objects + objects = s3.list_objects(Bucket=bucket_name) + print("\nObjects in bucket:") + for obj in objects.get("Contents", []): + print(f"- {obj['Key']}") + + # Test DynamoDB + table_name = "test_table" + table = dynamodb.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + print(f"\nCreated DynamoDB table: {table_name}") + + # Wait for table to be created + table.meta.client.get_waiter("table_exists").wait(TableName=table_name) + + # Insert items + table.put_item(Item={"id": "1", "name": "Test Item", "value": 42, "timestamp": datetime.utcnow().isoformat()}) + print("Inserted test item") + + # Query items + response = table.scan() + print("\nDynamoDB items:") + for item in response["Items"]: + print(json.dumps(item, indent=2)) + + # Test SQS + queue_name = "test-queue" + queue = sqs.create_queue(QueueName=queue_name) + queue_url = queue["QueueUrl"] + print(f"\nCreated SQS queue: {queue_name}") + + # Send message + response = sqs.send_message(QueueUrl=queue_url, MessageBody="Hello, SQS!") + print(f"Sent message: {response['MessageId']}") + + # Receive message + messages = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=1) + print("\nReceived messages:") + for message in messages.get("Messages", []): + print(json.dumps(message, indent=2)) + + # Clean up + # Delete S3 bucket and its contents + objects = s3.list_objects(Bucket=bucket_name) + for obj in objects.get("Contents", []): + s3.delete_object(Bucket=bucket_name, Key=obj["Key"]) + s3.delete_bucket(Bucket=bucket_name) + print("\nDeleted S3 bucket") + + # Delete DynamoDB table + table.delete() + print("Deleted DynamoDB table") + + # Delete SQS queue + sqs.delete_queue(QueueUrl=queue_url) + print("Deleted SQS queue") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/azurite/example_basic.py b/modules/azurite/example_basic.py new file mode 100644 index 000000000..872046e97 --- /dev/null +++ b/modules/azurite/example_basic.py @@ -0,0 +1,73 @@ +import json + +from azure.storage.blob import BlobServiceClient +from azure.storage.queue import QueueServiceClient + +from testcontainers.azurite import AzuriteContainer + + +def basic_example(): + with AzuriteContainer() as azurite: + # Get connection string + connection_string = azurite.get_connection_string() + + # Create BlobServiceClient + blob_service_client = BlobServiceClient.from_connection_string(connection_string) + + # Create QueueServiceClient + queue_service_client = QueueServiceClient.from_connection_string(connection_string) + + # Create a test container + container_name = "test-container" + container_client = blob_service_client.create_container(container_name) + print(f"Created container: {container_name}") + + # Upload test blobs + test_data = [ + {"name": "test1", "value": 100, "category": "A"}, + {"name": "test2", "value": 200, "category": "B"}, + {"name": "test3", "value": 300, "category": "A"}, + ] + + for i, data in enumerate(test_data, 1): + blob_name = f"test{i}.json" + blob_client = container_client.get_blob_client(blob_name) + blob_client.upload_blob(json.dumps(data), overwrite=True) + print(f"Uploaded blob: {blob_name}") + + # List blobs + print("\nBlobs in container:") + for blob in container_client.list_blobs(): + print(f"Name: {blob.name}, Size: {blob.size} bytes") + + # Download and read a blob + blob_client = container_client.get_blob_client("test1.json") + blob_data = blob_client.download_blob() + content = json.loads(blob_data.readall()) + print("\nBlob content:") + print(json.dumps(content, indent=2)) + + # Create a test queue + queue_name = "test-queue" + queue_client = queue_service_client.create_queue(queue_name) + print(f"\nCreated queue: {queue_name}") + + # Send test messages + test_messages = ["Hello Azurite!", "This is a test message", "Queue is working!"] + + for msg in test_messages: + queue_client.send_message(msg) + print(f"Sent message: {msg}") + + # Receive messages + print("\nReceived messages:") + for _ in range(len(test_messages)): + message = queue_client.receive_message() + if message: + print(f"Message: {message.content}") + queue_client.delete_message(message.id, message.pop_receipt) + print("Deleted message") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cassandra/example_basic.py b/modules/cassandra/example_basic.py new file mode 100644 index 000000000..54cee6f64 --- /dev/null +++ b/modules/cassandra/example_basic.py @@ -0,0 +1,153 @@ +import json +from datetime import datetime + +from cassandra.auth import PlainTextAuthProvider +from cassandra.cluster import Cluster + +from testcontainers.cassandra import CassandraContainer + + +def basic_example(): + with CassandraContainer() as cassandra: + # Get connection parameters + host = cassandra.get_container_host_ip() + port = cassandra.get_exposed_port(cassandra.port) + username = cassandra.username + password = cassandra.password + + # Create Cassandra client + auth_provider = PlainTextAuthProvider(username=username, password=password) + cluster = Cluster([host], port=port, auth_provider=auth_provider) + session = cluster.connect() + print("Connected to Cassandra") + + # Create keyspace + session.execute(""" + CREATE KEYSPACE IF NOT EXISTS test_keyspace + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} + """) + print("Created keyspace") + + # Use keyspace + session.set_keyspace("test_keyspace") + + # Create table + session.execute(""" + CREATE TABLE IF NOT EXISTS test_table ( + id UUID PRIMARY KEY, + name text, + value int, + category text, + created_at timestamp + ) + """) + print("Created table") + + # Insert test data + test_data = [ + { + "id": "550e8400-e29b-41d4-a716-446655440000", + "name": "test1", + "value": 100, + "category": "A", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440001", + "name": "test2", + "value": 200, + "category": "B", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440002", + "name": "test3", + "value": 300, + "category": "A", + "created_at": datetime.utcnow(), + }, + ] + + insert_stmt = session.prepare(""" + INSERT INTO test_table (id, name, value, category, created_at) + VALUES (uuid(), ?, ?, ?, ?) + """) + + for data in test_data: + session.execute(insert_stmt, (data["name"], data["value"], data["category"], data["created_at"])) + print("Inserted test data") + + # Query data + print("\nQuery results:") + rows = session.execute("SELECT * FROM test_table WHERE category = 'A' ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create materialized view + session.execute(""" + CREATE MATERIALIZED VIEW IF NOT EXISTS test_view AS + SELECT category, name, value, created_at + FROM test_table + WHERE category IS NOT NULL AND name IS NOT NULL + PRIMARY KEY (category, name) + """) + print("\nCreated materialized view") + + # Query materialized view + print("\nMaterialized view results:") + rows = session.execute("SELECT * FROM test_view WHERE category = 'A'") + for row in rows: + print( + json.dumps( + { + "category": row.category, + "name": row.name, + "value": row.value, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create secondary index + session.execute("CREATE INDEX IF NOT EXISTS ON test_table (value)") + print("\nCreated secondary index") + + # Query using secondary index + print("\nQuery using secondary index:") + rows = session.execute("SELECT * FROM test_table WHERE value > 150 ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Get table metadata + table_meta = session.cluster.metadata.keyspaces["test_keyspace"].tables["test_table"] + print("\nTable metadata:") + print(f"Columns: {[col.name for col in table_meta.columns.values()]}") + print(f"Partition key: {[col.name for col in table_meta.partition_key]}") + print(f"Clustering key: {[col.name for col in table_meta.clustering_key]}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/chroma/example_basic.py b/modules/chroma/example_basic.py new file mode 100644 index 000000000..3d22c01c7 --- /dev/null +++ b/modules/chroma/example_basic.py @@ -0,0 +1,65 @@ +import chromadb +from chromadb.config import Settings + +from testcontainers.chroma import ChromaContainer + + +def basic_example(): + with ChromaContainer() as chroma: + # Get connection URL + connection_url = chroma.get_connection_url() + + # Create Chroma client + client = chromadb.HttpClient(host=connection_url, settings=Settings(allow_reset=True)) + + # Create a collection + collection_name = "test_collection" + collection = client.create_collection(name=collection_name) + print(f"Created collection: {collection_name}") + + # Add documents and embeddings + documents = [ + "This is a test document about AI", + "Machine learning is a subset of AI", + "Deep learning uses neural networks", + ] + + embeddings = [ + [0.1, 0.2, 0.3], # Simple example embeddings + [0.2, 0.3, 0.4], + [0.3, 0.4, 0.5], + ] + + ids = ["doc1", "doc2", "doc3"] + metadatas = [ + {"source": "test1", "category": "AI"}, + {"source": "test2", "category": "ML"}, + {"source": "test3", "category": "DL"}, + ] + + collection.add(documents=documents, embeddings=embeddings, ids=ids, metadatas=metadatas) + print("Added documents to collection") + + # Query the collection + results = collection.query(query_embeddings=[[0.1, 0.2, 0.3]], n_results=2) + + print("\nQuery results:") + print(f"Documents: {results['documents'][0]}") + print(f"Distances: {results['distances'][0]}") + print(f"Metadatas: {results['metadatas'][0]}") + + # Get collection info + collection_info = client.get_collection(collection_name) + print("\nCollection info:") + print(f"Name: {collection_info.name}") + print(f"Count: {collection_info.count()}") + + # List all collections + collections = client.list_collections() + print("\nAvailable collections:") + for coll in collections: + print(f"- {coll.name}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/clickhouse/example_basic.py b/modules/clickhouse/example_basic.py new file mode 100644 index 000000000..1b4eb5c8d --- /dev/null +++ b/modules/clickhouse/example_basic.py @@ -0,0 +1,76 @@ +from datetime import datetime, timedelta + +import pandas as pd +from clickhouse_driver import Client + +from testcontainers.clickhouse import ClickHouseContainer + + +def basic_example(): + with ClickHouseContainer() as clickhouse: + # Get connection parameters + host = clickhouse.get_container_host_ip() + port = clickhouse.get_exposed_port(clickhouse.port) + + # Create ClickHouse client + client = Client(host=host, port=port) + + # Create a test table + client.execute(""" + CREATE TABLE IF NOT EXISTS test_table ( + id UInt32, + name String, + value Float64, + timestamp DateTime + ) ENGINE = MergeTree() + ORDER BY (id, timestamp) + """) + print("Created test table") + + # Generate test data + now = datetime.now() + data = [ + (1, "test1", 100.0, now), + (2, "test2", 200.0, now + timedelta(hours=1)), + (3, "test3", 300.0, now + timedelta(hours=2)), + ] + + # Insert data + client.execute("INSERT INTO test_table (id, name, value, timestamp) VALUES", data) + print("Inserted test data") + + # Query data + result = client.execute(""" + SELECT * + FROM test_table + ORDER BY id + """) + + print("\nQuery results:") + for row in result: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}, Timestamp: {row[3]}") + + # Execute a more complex query + result = client.execute(""" + SELECT + name, + avg(value) as avg_value, + min(value) as min_value, + max(value) as max_value + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + + print("\nAggregation results:") + for row in result: + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Min: {row[2]:.2f}, Max: {row[3]:.2f}") + + # Convert to pandas DataFrame + df = pd.DataFrame(result, columns=["name", "avg_value", "min_value", "max_value"]) + print("\nDataFrame:") + print(df) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cockroachdb/example_basic.py b/modules/cockroachdb/example_basic.py new file mode 100644 index 000000000..9da3f219c --- /dev/null +++ b/modules/cockroachdb/example_basic.py @@ -0,0 +1,90 @@ +import pandas as pd +import sqlalchemy +from sqlalchemy import text + +from testcontainers.cockroachdb import CockroachContainer + + +def basic_example(): + with CockroachContainer() as cockroach: + # Get connection URL + connection_url = cockroach.get_connection_url() + + # Create SQLAlchemy engine + engine = sqlalchemy.create_engine(connection_url) + + # Create a test table + with engine.begin() as conn: + conn.execute( + text(""" + CREATE TABLE IF NOT EXISTS test_table ( + id SERIAL PRIMARY KEY, + name VARCHAR(50), + value DECIMAL(10,2), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """) + ) + print("Created test table") + + # Insert test data + test_data = [(1, "test1", 100.0), (2, "test2", 200.0), (3, "test3", 300.0)] + + conn.execute( + text(""" + INSERT INTO test_table (id, name, value) + VALUES (:id, :name, :value) + """), + [{"id": item_id, "name": name, "value": value} for item_id, name, value in test_data], + ) + print("Inserted test data") + + # Query data + with engine.connect() as conn: + result = conn.execute( + text(""" + SELECT * + FROM test_table + ORDER BY id + """) + ) + + print("\nQuery results:") + for row in result: + print(f"ID: {row.id}, Name: {row.name}, Value: {row.value}, Created: {row.created_at}") + + # Execute a more complex query + with engine.connect() as conn: + result = conn.execute( + text(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count, + MIN(created_at) as first_created, + MAX(created_at) as last_created + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + ) + + print("\nAggregation results:") + for row in result: + print( + f"Name: {row.name}, " + f"Avg: {row.avg_value:.2f}, " + f"Count: {row.count}, " + f"First: {row.first_created}, " + f"Last: {row.last_created}" + ) + + # Convert to pandas DataFrame + with engine.connect() as conn: + df = pd.read_sql("SELECT * FROM test_table ORDER BY id", conn) + print("\nDataFrame:") + print(df) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cosmosdb/example_basic.py b/modules/cosmosdb/example_basic.py new file mode 100644 index 000000000..c836a1409 --- /dev/null +++ b/modules/cosmosdb/example_basic.py @@ -0,0 +1,75 @@ +import json + +from azure.cosmos import CosmosClient, PartitionKey + +from testcontainers.cosmosdb import CosmosDbContainer + + +def basic_example(): + with CosmosDbContainer() as cosmos: + # Get connection parameters + endpoint = cosmos.get_connection_url() + key = cosmos.get_primary_key() + + # Create CosmosDB client + client = CosmosClient(endpoint, key) + + # Create a database + database_name = "test_database" + database = client.create_database_if_not_exists(id=database_name) + print(f"Created database: {database_name}") + + # Create a container + container_name = "test_container" + container = database.create_container_if_not_exists( + id=container_name, partition_key=PartitionKey(path="/category") + ) + print(f"Created container: {container_name}") + + # Insert test items + test_items = [ + {"id": "1", "category": "test1", "name": "Item 1", "value": 100}, + {"id": "2", "category": "test2", "name": "Item 2", "value": 200}, + {"id": "3", "category": "test1", "name": "Item 3", "value": 300}, + ] + + for item in test_items: + container.create_item(body=item) + print("Inserted test items") + + # Query items + query = "SELECT * FROM c WHERE c.category = 'test1'" + items = list(container.query_items(query=query, enable_cross_partition_query=True)) + + print("\nQuery results:") + for item in items: + print(json.dumps(item, indent=2)) + + # Execute a more complex query + query = """ + SELECT + c.category, + COUNT(1) as count, + AVG(c.value) as avg_value, + MIN(c.value) as min_value, + MAX(c.value) as max_value + FROM c + GROUP BY c.category + """ + + results = list(container.query_items(query=query, enable_cross_partition_query=True)) + + print("\nAggregation results:") + for result in results: + print(json.dumps(result, indent=2)) + + # Get container info + container_properties = container.read() + print("\nContainer properties:") + print(f"ID: {container_properties['id']}") + print(f"Partition Key: {container_properties['partitionKey']}") + print(f"Indexing Policy: {json.dumps(container_properties['indexingPolicy'], indent=2)}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/cosmosdb/tests/test_cosmosdb_emulator.py b/modules/cosmosdb/tests/test_cosmosdb_emulator.py index 542ddd11c..41653cd4a 100644 --- a/modules/cosmosdb/tests/test_cosmosdb_emulator.py +++ b/modules/cosmosdb/tests/test_cosmosdb_emulator.py @@ -1,7 +1,10 @@ import pytest from testcontainers.cosmosdb._emulator import CosmosDBEmulatorContainer +from testcontainers.core.utils import is_arm + +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_runs(): with CosmosDBEmulatorContainer(partition_count=1, bind_ports=False) as emulator: assert emulator.server_certificate_pem is not None diff --git a/modules/cosmosdb/tests/test_cosmosdb_mongodb.py b/modules/cosmosdb/tests/test_cosmosdb_mongodb.py index a50ee82ea..3c10ee19f 100644 --- a/modules/cosmosdb/tests/test_cosmosdb_mongodb.py +++ b/modules/cosmosdb/tests/test_cosmosdb_mongodb.py @@ -1,7 +1,10 @@ import pytest from testcontainers.cosmosdb import CosmosDBMongoEndpointContainer +from testcontainers.core.utils import is_arm + +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_requires_a_version(): with pytest.raises(AssertionError, match="A MongoDB version is required"): CosmosDBMongoEndpointContainer(mongodb_version=None) @@ -10,6 +13,7 @@ def test_requires_a_version(): CosmosDBMongoEndpointContainer(mongodb_version="4.0") +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_runs(): with CosmosDBMongoEndpointContainer(mongodb_version="4.0", partition_count=1, bind_ports=False) as emulator: assert emulator.env["AZURE_COSMOS_EMULATOR_ENABLE_MONGODB_ENDPOINT"] == "4.0" diff --git a/modules/cosmosdb/tests/test_cosmosdb_nosql.py b/modules/cosmosdb/tests/test_cosmosdb_nosql.py index a9460a1b0..a48a52ac8 100644 --- a/modules/cosmosdb/tests/test_cosmosdb_nosql.py +++ b/modules/cosmosdb/tests/test_cosmosdb_nosql.py @@ -1,7 +1,10 @@ import pytest from testcontainers.cosmosdb import CosmosDBNoSQLEndpointContainer +from testcontainers.core.utils import is_arm + +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_runs(): with CosmosDBNoSQLEndpointContainer(partition_count=1, bind_ports=False) as emulator: assert emulator.get_exposed_port(8081) is not None, "The NoSQL endpoint's port should be exposed" diff --git a/modules/db2/example_basic.py b/modules/db2/example_basic.py new file mode 100644 index 000000000..97b5d65f5 --- /dev/null +++ b/modules/db2/example_basic.py @@ -0,0 +1,89 @@ +import ibm_db +import ibm_db_dbi +import pandas as pd + +from testcontainers.db2 import Db2Container + + +def basic_example(): + with Db2Container() as db2: + # Get connection parameters + host = db2.get_container_host_ip() + port = db2.get_exposed_port(db2.port) + database = db2.database + username = db2.username + password = db2.password + + # Create connection string + conn_str = f"DATABASE={database};HOSTNAME={host};PORT={port};PROTOCOL=TCPIP;UID={username};PWD={password}" + + # Connect to DB2 + conn = ibm_db.connect(conn_str, "", "") + print("Connected to DB2") + + # Create a test table + create_table_sql = """ + CREATE TABLE test_table ( + id INTEGER NOT NULL PRIMARY KEY, + name VARCHAR(50), + value DECIMAL(10,2), + created_at TIMESTAMP DEFAULT CURRENT TIMESTAMP + ) + """ + + try: + ibm_db.exec_immediate(conn, create_table_sql) + print("Created test table") + except Exception as e: + print(f"Table might already exist: {e}") + + # Insert test data + test_data = [(1, "test1", 100.0), (2, "test2", 200.0), (3, "test3", 300.0)] + + insert_sql = "INSERT INTO test_table (id, name, value) VALUES (?, ?, ?)" + stmt = ibm_db.prepare(conn, insert_sql) + + for row in test_data: + ibm_db.execute(stmt, row) + print("Inserted test data") + + # Query data using ibm_db_dbi + conn_dbi = ibm_db_dbi.Connection(conn) + cursor = conn_dbi.cursor() + + cursor.execute("SELECT * FROM test_table ORDER BY id") + rows = cursor.fetchall() + + print("\nQuery results:") + for row in rows: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}, Created: {row[3]}") + + # Execute a more complex query + cursor.execute(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count, + MIN(created_at) as first_created, + MAX(created_at) as last_created + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + + print("\nAggregation results:") + for row in cursor.fetchall(): + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Count: {row[2]}, First: {row[3]}, Last: {row[4]}") + + # Convert to pandas DataFrame + df = pd.read_sql("SELECT * FROM test_table ORDER BY id", conn_dbi) + print("\nDataFrame:") + print(df) + + # Clean up + cursor.close() + ibm_db.close(conn) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/db2/tests/test_db2.py b/modules/db2/tests/test_db2.py index 7b6ea844a..c354832ff 100644 --- a/modules/db2/tests/test_db2.py +++ b/modules/db2/tests/test_db2.py @@ -26,6 +26,7 @@ def test_docker_run_db2(version: str): # - sqlserver # - mongodb # - db2 +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_quoted_password(): user = "db2inst1" dbname = "testdb" diff --git a/modules/elasticsearch/example_basic.py b/modules/elasticsearch/example_basic.py new file mode 100644 index 000000000..1b3ed4077 --- /dev/null +++ b/modules/elasticsearch/example_basic.py @@ -0,0 +1,105 @@ +import json +from datetime import datetime + +from elasticsearch import Elasticsearch + +from testcontainers.elasticsearch import ElasticsearchContainer + + +def basic_example(): + with ElasticsearchContainer() as elasticsearch: + # Get connection parameters + host = elasticsearch.get_container_host_ip() + port = elasticsearch.get_exposed_port(elasticsearch.port) + + # Create Elasticsearch client + es = Elasticsearch(f"http://{host}:{port}") + print("Connected to Elasticsearch") + + # Create index + index_name = "test_index" + index_settings = { + "settings": {"number_of_shards": 1, "number_of_replicas": 0}, + "mappings": { + "properties": { + "name": {"type": "text"}, + "value": {"type": "integer"}, + "category": {"type": "keyword"}, + "created_at": {"type": "date"}, + } + }, + } + + if not es.indices.exists(index=index_name): + es.indices.create(index=index_name, body=index_settings) + print(f"Created index: {index_name}") + + # Insert test documents + test_docs = [ + {"name": "test1", "value": 100, "category": "A", "created_at": datetime.utcnow()}, + {"name": "test2", "value": 200, "category": "B", "created_at": datetime.utcnow()}, + {"name": "test3", "value": 300, "category": "A", "created_at": datetime.utcnow()}, + ] + + for i, doc in enumerate(test_docs, 1): + es.index(index=index_name, id=i, document=doc) + print("Inserted test documents") + + # Refresh index + es.indices.refresh(index=index_name) + + # Search documents + search_query = {"query": {"bool": {"must": [{"term": {"category": "A"}}]}}} + + print("\nSearch results:") + response = es.search(index=index_name, body=search_query) + for hit in response["hits"]["hits"]: + print(json.dumps(hit["_source"], default=str, indent=2)) + + # Execute aggregation + agg_query = { + "size": 0, + "aggs": { + "categories": { + "terms": {"field": "category"}, + "aggs": { + "avg_value": {"avg": {"field": "value"}}, + "min_value": {"min": {"field": "value"}}, + "max_value": {"max": {"field": "value"}}, + }, + } + }, + } + + print("\nAggregation results:") + response = es.search(index=index_name, body=agg_query) + for bucket in response["aggregations"]["categories"]["buckets"]: + print(f"\nCategory: {bucket['key']}") + print(f"Count: {bucket['doc_count']}") + print(f"Avg value: {bucket['avg_value']['value']:.2f}") + print(f"Min value: {bucket['min_value']['value']}") + print(f"Max value: {bucket['max_value']['value']}") + + # Update document + update_body = {"doc": {"value": 150, "updated_at": datetime.utcnow()}} + es.update(index=index_name, id=1, body=update_body) + print("\nUpdated document") + + # Get document + doc = es.get(index=index_name, id=1) + print("\nUpdated document:") + print(json.dumps(doc["_source"], default=str, indent=2)) + + # Delete document + es.delete(index=index_name, id=2) + print("\nDeleted document") + + # Get index stats + stats = es.indices.stats(index=index_name) + print("\nIndex stats:") + print(f"Documents: {stats['indices'][index_name]['total']['docs']['count']}") + print(f"Size: {stats['indices'][index_name]['total']['store']['size_in_bytes']} bytes") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/elasticsearch/tests/test_elasticsearch.py b/modules/elasticsearch/tests/test_elasticsearch.py index 661a550c6..5108bb40f 100644 --- a/modules/elasticsearch/tests/test_elasticsearch.py +++ b/modules/elasticsearch/tests/test_elasticsearch.py @@ -3,11 +3,13 @@ import pytest +from testcontainers.core.utils import is_arm from testcontainers.elasticsearch import ElasticSearchContainer # The versions below should reflect the latest stable releases @pytest.mark.parametrize("version", ["7.17.18", "8.12.2"]) +@pytest.mark.skipif(is_arm(), reason="db2 container not available for ARM") def test_docker_run_elasticsearch(version): with ElasticSearchContainer(f"elasticsearch:{version}", mem_limit="3G") as es: resp = urllib.request.urlopen(es.get_url()) diff --git a/modules/generic/example_basic.py b/modules/generic/example_basic.py new file mode 100644 index 000000000..107bcc7c2 --- /dev/null +++ b/modules/generic/example_basic.py @@ -0,0 +1,115 @@ +import requests + +from testcontainers.generic import GenericContainer + + +def basic_example(): + # Example 1: Nginx container + with GenericContainer("nginx:latest") as nginx: + # Get connection parameters + host = nginx.get_container_host_ip() + port = nginx.get_exposed_port(80) + + # Test Nginx + response = requests.get(f"http://{host}:{port}") + print("\nNginx response:") + print(f"Status code: {response.status_code}") + print(f"Content type: {response.headers.get('content-type')}") + + # Example 2: Redis container with custom configuration + with GenericContainer("redis:latest") as redis: + # Get connection parameters + host = redis.get_container_host_ip() + port = redis.get_exposed_port(6379) + + # Test Redis + import redis + + r = redis.Redis(host=host, port=port) + r.set("test_key", "Hello, Redis!") + value = r.get("test_key") + print("\nRedis test:") + print(f"Retrieved value: {value.decode()}") + + # Example 3: PostgreSQL container with environment variables + with GenericContainer( + "postgres:latest", + environment={"POSTGRES_USER": "testuser", "POSTGRES_PASSWORD": "testpass", "POSTGRES_DB": "testdb"}, + ) as postgres: + # Get connection parameters + host = postgres.get_container_host_ip() + port = postgres.get_exposed_port(5432) + + # Test PostgreSQL + import psycopg2 + + conn = psycopg2.connect(host=host, port=port, user="testuser", password="testpass", database="testdb") + cur = conn.cursor() + cur.execute("SELECT version();") + version = cur.fetchone() + print("\nPostgreSQL test:") + print(f"Version: {version[0]}") + cur.close() + conn.close() + + # Example 4: Custom container with volume mounting + with GenericContainer("python:3.9-slim", volumes={"/tmp/test": {"bind": "/app", "mode": "rw"}}) as python: + # Get container ID + container_id = python.get_container_id() + print(f"\nPython container ID: {container_id}") + + # Execute command in container + exit_code, output = python.exec_run("python -c 'print(\"Hello from container!\")'") + print(f"Command output: {output.decode()}") + + # Example 5: Container with health check + with GenericContainer( + "mongo:latest", + healthcheck={ + "test": ["CMD", "mongosh", "--eval", "db.adminCommand('ping')"], + "interval": 1000000000, # 1 second + "timeout": 3000000000, # 3 seconds + "retries": 3, + }, + ) as mongo: + # Get connection parameters + host = mongo.get_container_host_ip() + port = mongo.get_exposed_port(27017) + + # Test MongoDB + from pymongo import MongoClient + + client = MongoClient(f"mongodb://{host}:{port}") + db = client.test_db + collection = db.test_collection + collection.insert_one({"test": "Hello, MongoDB!"}) + result = collection.find_one() + print("\nMongoDB test:") + print(f"Retrieved document: {result}") + + # Example 6: Container with network + with GenericContainer("nginx:latest", network="test_network") as nginx_network: + # Get network info + network_info = nginx_network.get_network_info() + print("\nNetwork test:") + print(f"Network name: {network_info['Name']}") + print(f"Network ID: {network_info['Id']}") + + # Example 7: Container with resource limits + with GenericContainer("nginx:latest", mem_limit="512m", cpu_period=100000, cpu_quota=50000) as nginx_limits: + # Get container stats + stats = nginx_limits.get_stats() + print("\nResource limits test:") + print(f"Memory limit: {stats['memory_stats']['limit']}") + print(f"CPU usage: {stats['cpu_stats']['cpu_usage']['total_usage']}") + + # Example 8: Container with custom command + with GenericContainer("python:3.9-slim", command=["python", "-c", "print('Custom command test')"]) as python_cmd: + # Get logs + logs = python_cmd.get_logs() + print("\nCustom command test:") + print(f"Container logs: {logs.decode()}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/google/example_basic.py b/modules/google/example_basic.py new file mode 100644 index 000000000..323b25817 --- /dev/null +++ b/modules/google/example_basic.py @@ -0,0 +1,127 @@ +import json +from datetime import datetime + +from google.cloud import bigquery, datastore, pubsub, storage + +from testcontainers.google import GoogleContainer + + +def basic_example(): + with GoogleContainer() as google: + # Get connection parameters + project_id = google.project_id + + # Initialize clients + storage_client = storage.Client(project=project_id) + pubsub_client = pubsub.PublisherClient() + bigquery_client = bigquery.Client(project=project_id) + datastore_client = datastore.Client(project=project_id) + + print("Connected to Google Cloud services") + + # Test Cloud Storage + bucket_name = f"test-bucket-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}" + bucket = storage_client.create_bucket(bucket_name) + print(f"\nCreated bucket: {bucket_name}") + + # Upload a file + blob = bucket.blob("test.txt") + blob.upload_from_string("Hello, Google Cloud Storage!") + print("Uploaded test file") + + # List files + blobs = list(bucket.list_blobs()) + print("\nFiles in bucket:") + for blob in blobs: + print(f"- {blob.name}") + + # Test Pub/Sub + topic_name = f"projects/{project_id}/topics/test-topic" + pubsub_client.create_topic(name=topic_name) + print(f"\nCreated topic: {topic_name}") + + # Create subscription + subscription_name = f"projects/{project_id}/subscriptions/test-subscription" + pubsub_client.create_subscription(name=subscription_name, topic=topic_name) + print(f"Created subscription: {subscription_name}") + + # Publish message + message = "Hello, Pub/Sub!" + future = pubsub_client.publish(topic_name, message.encode()) + message_id = future.result() + print(f"Published message: {message_id}") + + # Test BigQuery + dataset_id = "test_dataset" + bigquery_client.create_dataset(dataset_id) + print(f"\nCreated dataset: {dataset_id}") + + # Create table + table_id = f"{project_id}.{dataset_id}.test_table" + schema = [ + bigquery.SchemaField("name", "STRING"), + bigquery.SchemaField("age", "INTEGER"), + bigquery.SchemaField("city", "STRING"), + ] + table = bigquery_client.create_table(bigquery.Table(table_id, schema=schema)) + print(f"Created table: {table_id}") + + # Insert data + rows_to_insert = [ + {"name": "John", "age": 30, "city": "New York"}, + {"name": "Jane", "age": 25, "city": "Los Angeles"}, + {"name": "Bob", "age": 35, "city": "Chicago"}, + ] + errors = bigquery_client.insert_rows_json(table, rows_to_insert) + if not errors: + print("Inserted test data") + else: + print(f"Encountered errors: {errors}") + + # Query data + query = f"SELECT * FROM `{table_id}` WHERE age > 30" + query_job = bigquery_client.query(query) + results = query_job.result() + print("\nQuery results:") + for row in results: + print(json.dumps(dict(row), indent=2)) + + # Test Datastore + kind = "test_entity" + key = datastore_client.key(kind) + entity = datastore.Entity(key=key) + entity.update({"name": "Test Entity", "value": 42, "timestamp": datetime.utcnow()}) + datastore_client.put(entity) + print(f"\nCreated {kind} entity") + + # Query entities + query = datastore_client.query(kind=kind) + results = list(query.fetch()) + print("\nDatastore entities:") + for entity in results: + print(json.dumps(dict(entity), indent=2)) + + # Clean up + # Delete bucket and its contents + bucket.delete(force=True) + print("\nDeleted bucket") + + # Delete topic and subscription + pubsub_client.delete_subscription(subscription_name) + pubsub_client.delete_topic(topic_name) + print("Deleted Pub/Sub topic and subscription") + + # Delete BigQuery dataset and table + bigquery_client.delete_table(table_id) + bigquery_client.delete_dataset(dataset_id, delete_contents=True) + print("Deleted BigQuery dataset and table") + + # Delete Datastore entities + query = datastore_client.query(kind=kind) + keys = [entity.key for entity in query.fetch()] + datastore_client.delete_multi(keys) + print("Deleted Datastore entities") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/google/tests/test_google.py b/modules/google/tests/test_google.py index c91793741..e2dd55014 100644 --- a/modules/google/tests/test_google.py +++ b/modules/google/tests/test_google.py @@ -1,6 +1,7 @@ from queue import Queue from google.cloud.datastore import Entity +import time from testcontainers.core.waiting_utils import wait_for_logs from testcontainers.google import PubSubContainer, DatastoreContainer @@ -25,7 +26,8 @@ def test_pubsub_container(): # Receive the message queue = Queue() subscriber.subscribe(subscription_path, queue.put) - message = queue.get(timeout=1) + # timeout 10 is needed to account for slower arm machines + message = queue.get(timeout=10) assert message.data == b"Hello world!" message.ack() diff --git a/modules/influxdb/example_basic.py b/modules/influxdb/example_basic.py new file mode 100644 index 000000000..94154b034 --- /dev/null +++ b/modules/influxdb/example_basic.py @@ -0,0 +1,170 @@ +import json +from datetime import datetime, timedelta + +from influxdb_client import InfluxDBClient, Point +from influxdb_client.client.write_api import SYNCHRONOUS + +from testcontainers.influxdb import InfluxDBContainer + + +def basic_example(): + with InfluxDBContainer() as influxdb: + # Get connection parameters + host = influxdb.get_container_host_ip() + port = influxdb.get_exposed_port(influxdb.port) + token = influxdb.token + org = influxdb.org + bucket = influxdb.bucket + + # Create InfluxDB client + client = InfluxDBClient(url=f"http://{host}:{port}", token=token, org=org) + print("Connected to InfluxDB") + + # Create write API + write_api = client.write_api(write_options=SYNCHRONOUS) + + # Create test data points + points = [] + for i in range(3): + point = ( + Point("test_measurement") + .tag("location", f"location_{i}") + .tag("device", f"device_{i}") + .field("temperature", 20 + i) + .field("humidity", 50 + i) + .time(datetime.utcnow() + timedelta(minutes=i)) + ) + points.append(point) + + # Write points + write_api.write(bucket=bucket, record=points) + print("Wrote test data points") + + # Create query API + query_api = client.query_api() + + # Query data + query = f'from(bucket: "{bucket}") |> range(start: -1h) |> filter(fn: (r) => r["_measurement"] == "test_measurement")' + + result = query_api.query(query) + print("\nQuery results:") + for table in result: + for record in table.records: + record_data = { + "measurement": record.get_measurement(), + "time": record.get_time().isoformat(), + "location": record.values.get("location"), + "device": record.values.get("device"), + "field": record.get_field(), + "value": record.get_value(), + } + print(json.dumps(record_data, indent=2)) + + # Create aggregation query + agg_query = f'from(bucket: "{bucket}") |> range(start: -1h) |> filter(fn: (r) => r["_measurement"] == "test_measurement") |> group(columns: ["location"]) |> mean()' + + agg_result = query_api.query(agg_query) + print("\nAggregation results:") + for table in agg_result: + for record in table.records: + record_data = { + "location": record.values.get("location"), + "field": record.get_field(), + "mean": record.get_value(), + } + print(json.dumps(record_data, indent=2)) + + # Create window query + window_query = f'from(bucket: "{bucket}") |> range(start: -1h) |> filter(fn: (r) => r["_measurement"] == "test_measurement") |> window(every: 5m) |> mean()' + + window_result = query_api.query(window_query) + print("\nWindow results:") + for table in window_result: + for record in table.records: + record_data = { + "window_start": record.get_start().isoformat(), + "window_stop": record.get_stop().isoformat(), + "field": record.get_field(), + "mean": record.get_value(), + } + print(json.dumps(record_data, indent=2)) + + # Create task + task_flux = ( + "option task = {\n" + ' name: "test_task",\n' + " every: 1h\n" + "}\n\n" + f'from(bucket: "{bucket}")\n' + " |> range(start: -1h)\n" + ' |> filter(fn: (r) => r["_measurement"] == "test_measurement")\n' + " |> mean()\n" + f' |> to(bucket: "{bucket}", measurement: "test_measurement_agg")' + ) + + tasks_api = client.tasks_api() + task = tasks_api.create_task(name="test_task", flux=task_flux, org=org) + print("\nCreated task") + + # Get task info + task_info = tasks_api.find_task_by_id(task.id) + print("\nTask info:") + task_data = { + "id": task_info.id, + "name": task_info.name, + "status": task_info.status, + "every": task_info.every, + } + print(json.dumps(task_data, indent=2)) + + # Create dashboard + dashboards_api = client.dashboards_api() + dashboard = dashboards_api.create_dashboard(name="test_dashboard", org=org) + print("\nCreated dashboard") + + # Add cell to dashboard + dashboards_api.create_dashboard_cell( + dashboard_id=dashboard.id, name="test_cell", x=0, y=0, w=6, h=4, query=query + ) + print("Added cell to dashboard") + + # Get dashboard info + dashboard_info = dashboards_api.find_dashboard_by_id(dashboard.id) + print("\nDashboard info:") + dashboard_data = { + "id": dashboard_info.id, + "name": dashboard_info.name, + "cells": len(dashboard_info.cells), + } + print(json.dumps(dashboard_data, indent=2)) + + # Create bucket + buckets_api = client.buckets_api() + new_bucket = buckets_api.create_bucket(bucket_name="test_bucket_2", org=org) + print("\nCreated new bucket") + + # Get bucket info + bucket_info = buckets_api.find_bucket_by_id(new_bucket.id) + print("\nBucket info:") + bucket_data = { + "id": bucket_info.id, + "name": bucket_info.name, + "org_id": bucket_info.org_id, + } + print(json.dumps(bucket_data, indent=2)) + + # Clean up + tasks_api.delete_task(task.id) + print("\nDeleted task") + + dashboards_api.delete_dashboard(dashboard.id) + print("Deleted dashboard") + + buckets_api.delete_bucket(new_bucket.id) + print("Deleted bucket") + + client.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/k3s/example_basic.py b/modules/k3s/example_basic.py new file mode 100644 index 000000000..75550f0b6 --- /dev/null +++ b/modules/k3s/example_basic.py @@ -0,0 +1,179 @@ +import json +import time + +import yaml +from kubernetes import client, config +from kubernetes.client.rest import ApiException + +from testcontainers.k3s import K3sContainer + + +def basic_example(): + with K3sContainer() as k3s: + # Get kubeconfig + kubeconfig = k3s.get_kubeconfig() + + # Load kubeconfig + config.load_kube_config_from_dict(yaml.safe_load(kubeconfig)) + print("Loaded kubeconfig") + + # Create API clients + v1 = client.CoreV1Api() + apps_v1 = client.AppsV1Api() + + # Create namespace + namespace = "test-namespace" + try: + v1.create_namespace(client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace))) + print(f"Created namespace: {namespace}") + except ApiException as e: + if e.status == 409: # Already exists + print(f"Namespace {namespace} already exists") + else: + raise + + # Create ConfigMap + configmap = client.V1ConfigMap( + metadata=client.V1ObjectMeta(name="test-config", namespace=namespace), data={"config.yaml": "key: value"} + ) + v1.create_namespaced_config_map(namespace=namespace, body=configmap) + print("Created ConfigMap") + + # Create Secret + secret = client.V1Secret( + metadata=client.V1ObjectMeta(name="test-secret", namespace=namespace), + type="Opaque", + data={"username": "dGVzdA==", "password": "cGFzc3dvcmQ="}, # base64 encoded + ) + v1.create_namespaced_secret(namespace=namespace, body=secret) + print("Created Secret") + + # Create Deployment + deployment = client.V1Deployment( + metadata=client.V1ObjectMeta(name="test-deployment", namespace=namespace), + spec=client.V1DeploymentSpec( + replicas=2, + selector=client.V1LabelSelector(match_labels={"app": "test-app"}), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta(labels={"app": "test-app"}), + spec=client.V1PodSpec( + containers=[ + client.V1Container( + name="nginx", image="nginx:latest", ports=[client.V1ContainerPort(container_port=80)] + ) + ] + ), + ), + ), + ) + apps_v1.create_namespaced_deployment(namespace=namespace, body=deployment) + print("Created Deployment") + + # Create Service + service = client.V1Service( + metadata=client.V1ObjectMeta(name="test-service", namespace=namespace), + spec=client.V1ServiceSpec( + selector={"app": "test-app"}, ports=[client.V1ServicePort(port=80, target_port=80)], type="ClusterIP" + ), + ) + v1.create_namespaced_service(namespace=namespace, body=service) + print("Created Service") + + # Wait for pods to be ready + print("\nWaiting for pods to be ready...") + time.sleep(10) # Give some time for pods to start + + # List pods + pods = v1.list_namespaced_pod(namespace=namespace) + print("\nPods:") + for pod in pods.items: + print(json.dumps({"name": pod.metadata.name, "phase": pod.status.phase, "ip": pod.status.pod_ip}, indent=2)) + + # Get deployment status + deployment_status = apps_v1.read_namespaced_deployment_status(name="test-deployment", namespace=namespace) + print("\nDeployment status:") + print( + json.dumps( + { + "name": deployment_status.metadata.name, + "replicas": deployment_status.spec.replicas, + "available_replicas": deployment_status.status.available_replicas, + "ready_replicas": deployment_status.status.ready_replicas, + }, + indent=2, + ) + ) + + # Get service details + service_details = v1.read_namespaced_service(name="test-service", namespace=namespace) + print("\nService details:") + print( + json.dumps( + { + "name": service_details.metadata.name, + "type": service_details.spec.type, + "cluster_ip": service_details.spec.cluster_ip, + "ports": [{"port": p.port, "target_port": p.target_port} for p in service_details.spec.ports], + }, + indent=2, + ) + ) + + # Create Ingress + ingress = client.V1Ingress( + metadata=client.V1ObjectMeta( + name="test-ingress", + namespace=namespace, + annotations={"nginx.ingress.kubernetes.io/rewrite-target": "/"}, + ), + spec=client.V1IngressSpec( + rules=[ + client.V1IngressRule( + host="test.local", + http=client.V1HTTPIngressRuleValue( + paths=[ + client.V1HTTPIngressPath( + path="/", + path_type="Prefix", + backend=client.V1IngressBackend( + service=client.V1IngressServiceBackend( + name="test-service", port=client.V1ServiceBackendPort(number=80) + ) + ), + ) + ] + ), + ) + ] + ), + ) + networking_v1 = client.NetworkingV1Api() + networking_v1.create_namespaced_ingress(namespace=namespace, body=ingress) + print("\nCreated Ingress") + + # Get ingress details + ingress_details = networking_v1.read_namespaced_ingress(name="test-ingress", namespace=namespace) + print("\nIngress details:") + print( + json.dumps( + { + "name": ingress_details.metadata.name, + "host": ingress_details.spec.rules[0].host, + "path": ingress_details.spec.rules[0].http.paths[0].path, + }, + indent=2, + ) + ) + + # Clean up + print("\nCleaning up resources...") + networking_v1.delete_namespaced_ingress(name="test-ingress", namespace=namespace) + v1.delete_namespaced_service(name="test-service", namespace=namespace) + apps_v1.delete_namespaced_deployment(name="test-deployment", namespace=namespace) + v1.delete_namespaced_secret(name="test-secret", namespace=namespace) + v1.delete_namespaced_config_map(name="test-config", namespace=namespace) + v1.delete_namespace(name=namespace) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/kafka/example_basic.py b/modules/kafka/example_basic.py new file mode 100644 index 000000000..37b9a32d0 --- /dev/null +++ b/modules/kafka/example_basic.py @@ -0,0 +1,80 @@ +import json +import time +from datetime import datetime +from threading import Thread + +from kafka import KafkaConsumer, KafkaProducer + +from testcontainers.kafka import KafkaContainer + + +def basic_example(): + with KafkaContainer() as kafka: + # Get connection parameters + bootstrap_servers = kafka.get_bootstrap_server() + + # Create Kafka producer + producer = KafkaProducer( + bootstrap_servers=bootstrap_servers, value_serializer=lambda v: json.dumps(v).encode("utf-8") + ) + print("Created Kafka producer") + + # Create Kafka consumer + consumer = KafkaConsumer( + bootstrap_servers=bootstrap_servers, + value_deserializer=lambda v: json.loads(v.decode("utf-8")), + auto_offset_reset="earliest", + group_id="test_group", + ) + print("Created Kafka consumer") + + # Define topics + topics = ["test_topic1", "test_topic2"] + + # Subscribe to topics + consumer.subscribe(topics) + print(f"Subscribed to topics: {topics}") + + # Start consuming in a separate thread + def consume_messages(): + for message in consumer: + print(f"\nReceived message from {message.topic}:") + print(json.dumps(message.value, indent=2)) + + consumer_thread = Thread(target=consume_messages) + consumer_thread.daemon = True + consumer_thread.start() + + # Produce test messages + test_messages = [ + { + "topic": "test_topic1", + "message": {"id": 1, "content": "Message for topic 1", "timestamp": datetime.utcnow().isoformat()}, + }, + { + "topic": "test_topic2", + "message": {"id": 2, "content": "Message for topic 2", "timestamp": datetime.utcnow().isoformat()}, + }, + ] + + for msg in test_messages: + producer.send(msg["topic"], msg["message"]) + print(f"Sent message to {msg['topic']}") + + # Wait for messages to be processed + time.sleep(2) + + # Get topic information + print("\nTopic information:") + for topic in topics: + partitions = consumer.partitions_for_topic(topic) + print(f"{topic}:") + print(f" Partitions: {partitions}") + + # Clean up + producer.close() + consumer.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/keycloak/example_basic.py b/modules/keycloak/example_basic.py new file mode 100644 index 000000000..f4299f989 --- /dev/null +++ b/modules/keycloak/example_basic.py @@ -0,0 +1,171 @@ +import json + +from keycloak import KeycloakAdmin, KeycloakOpenID + +from testcontainers.keycloak import KeycloakContainer + + +def basic_example(): + with KeycloakContainer() as keycloak: + # Get connection parameters + host = keycloak.get_container_host_ip() + port = keycloak.get_exposed_port(keycloak.port) + admin_username = keycloak.admin_username + admin_password = keycloak.admin_password + + # Create admin client + admin = KeycloakAdmin( + server_url=f"http://{host}:{port}/", + username=admin_username, + password=admin_password, + realm_name="master", + verify=False, + ) + print("Connected to Keycloak as admin") + + # Create realm + realm_name = "test-realm" + admin.create_realm(payload={"realm": realm_name, "enabled": True}) + print(f"\nCreated realm: {realm_name}") + + # Switch to new realm + admin.realm_name = realm_name + + # Create client + client_id = "test-client" + admin.create_client( + payload={ + "clientId": client_id, + "publicClient": True, + "redirectUris": ["http://localhost:8080/*"], + "webOrigins": ["http://localhost:8080"], + } + ) + print(f"Created client: {client_id}") + + # Get client details + client = admin.get_client(client_id=client_id) + print("\nClient details:") + print( + json.dumps( + { + "client_id": client["clientId"], + "public_client": client["publicClient"], + "redirect_uris": client["redirectUris"], + }, + indent=2, + ) + ) + + # Create user + username = "testuser" + admin.create_user( + payload={ + "username": username, + "email": "test@example.com", + "enabled": True, + "credentials": [{"type": "password", "value": "password", "temporary": False}], + } + ) + print(f"\nCreated user: {username}") + + # Get user details + user = admin.get_user(user_id=username) + print("\nUser details:") + print(json.dumps({"username": user["username"], "email": user["email"], "enabled": user["enabled"]}, indent=2)) + + # Create role + role_name = "test-role" + admin.create_realm_role(payload={"name": role_name, "description": "Test role"}) + print(f"\nCreated role: {role_name}") + + # Assign role to user + role = admin.get_realm_role(role_name=role_name) + admin.assign_realm_roles(user_id=user["id"], roles=[role]) + print(f"Assigned role {role_name} to user {username}") + + # Create group + group_name = "test-group" + admin.create_group(payload={"name": group_name}) + print(f"\nCreated group: {group_name}") + + # Add user to group + group = admin.get_group_by_path(path=f"/{group_name}") + admin.group_user_add(user_id=user["id"], group_id=group["id"]) + print(f"Added user {username} to group {group_name}") + + # Create OpenID client + openid = KeycloakOpenID( + server_url=f"http://{host}:{port}/", client_id=client_id, realm_name=realm_name, verify=False + ) + + # Get token + token = openid.token(username=username, password="password") + print("\nToken details:") + print( + json.dumps( + { + "access_token": token["access_token"][:20] + "...", + "refresh_token": token["refresh_token"][:20] + "...", + "expires_in": token["expires_in"], + }, + indent=2, + ) + ) + + # Get user info + userinfo = openid.userinfo(token["access_token"]) + print("\nUser info:") + print(json.dumps(userinfo, indent=2)) + + # Get realm roles + roles = admin.get_realm_roles() + print("\nRealm roles:") + for role in roles: + print(f"- {role['name']}") + + # Get user roles + user_roles = admin.get_realm_roles_of_user(user_id=user["id"]) + print("\nUser roles:") + for role in user_roles: + print(f"- {role['name']}") + + # Get groups + groups = admin.get_groups() + print("\nGroups:") + for group in groups: + print(f"- {group['name']}") + + # Get group members + group_members = admin.get_group_members(group_id=group["id"]) + print("\nGroup members:") + for member in group_members: + print(f"- {member['username']}") + + # Update user + admin.update_user(user_id=user["id"], payload={"firstName": "Test", "lastName": "User"}) + print("\nUpdated user") + + # Update client + admin.update_client(client_id=client["id"], payload={"description": "Updated test client"}) + print("Updated client") + + # Clean up + admin.delete_user(user_id=user["id"]) + print(f"\nDeleted user: {username}") + + admin.delete_client(client_id=client["id"]) + print(f"Deleted client: {client_id}") + + admin.delete_realm_role(role_name=role_name) + print(f"Deleted role: {role_name}") + + admin.delete_group(group_id=group["id"]) + print(f"Deleted group: {group_name}") + + admin.delete_realm(realm_name=realm_name) + print(f"Deleted realm: {realm_name}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/localstack/example_basic.py b/modules/localstack/example_basic.py new file mode 100644 index 000000000..8c622f223 --- /dev/null +++ b/modules/localstack/example_basic.py @@ -0,0 +1,72 @@ +import json + +import boto3 + +from testcontainers.localstack import LocalStackContainer + + +def basic_example(): + with LocalStackContainer() as localstack: + # Get endpoint URL + endpoint_url = localstack.get_endpoint_url() + + # Create S3 client + s3 = boto3.client( + "s3", + endpoint_url=endpoint_url, + aws_access_key_id="test", + aws_secret_access_key="test", + region_name="us-east-1", + ) + + # Create SQS client + sqs = boto3.client( + "sqs", + endpoint_url=endpoint_url, + aws_access_key_id="test", + aws_secret_access_key="test", + region_name="us-east-1", + ) + + # Create S3 bucket + bucket_name = "test-bucket" + s3.create_bucket(Bucket=bucket_name) + print(f"Created S3 bucket: {bucket_name}") + + # Upload file to S3 + test_data = {"message": "Hello from LocalStack!", "timestamp": "2024-01-01"} + s3.put_object(Bucket=bucket_name, Key="test.json", Body=json.dumps(test_data)) + print("Uploaded test.json to S3") + + # Create SQS queue + queue_name = "test-queue" + queue = sqs.create_queue(QueueName=queue_name) + queue_url = queue["QueueUrl"] + print(f"Created SQS queue: {queue_name}") + + # Send message to SQS + message = {"message": "Test message", "number": 42} + sqs.send_message(QueueUrl=queue_url, MessageBody=json.dumps(message)) + print("Sent message to SQS") + + # Receive message from SQS + response = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=1) + + if "Messages" in response: + received_message = json.loads(response["Messages"][0]["Body"]) + print("\nReceived message from SQS:") + print(json.dumps(received_message, indent=2)) + + # Delete message + sqs.delete_message(QueueUrl=queue_url, ReceiptHandle=response["Messages"][0]["ReceiptHandle"]) + print("Deleted message from queue") + + # List S3 objects + objects = s3.list_objects(Bucket=bucket_name) + print("\nS3 bucket contents:") + for obj in objects.get("Contents", []): + print(f"Key: {obj['Key']}, Size: {obj['Size']} bytes") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mailpit/example_basic.py b/modules/mailpit/example_basic.py new file mode 100644 index 000000000..ef97ab906 --- /dev/null +++ b/modules/mailpit/example_basic.py @@ -0,0 +1,62 @@ +import smtplib +import time +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +import requests + +from testcontainers.mailpit import MailpitContainer + + +def basic_example(): + with MailpitContainer() as mailpit: + # Get SMTP and API endpoints + smtp_host = mailpit.get_container_host_ip() + smtp_port = mailpit.get_exposed_smtp_port() + api_url = mailpit.get_base_api_url() + + # Create email message + msg = MIMEMultipart() + msg["From"] = "sender@example.com" + msg["To"] = "recipient@example.com" + msg["Subject"] = "Test Email" + + body = "This is a test email sent to Mailpit." + msg.attach(MIMEText(body, "plain")) + + # Send email using SMTP + with smtplib.SMTP(smtp_host, smtp_port) as server: + server.send_message(msg) + print("Email sent successfully") + + # Wait for email to be processed + time.sleep(1) + + # Check received emails using API + response = requests.get(f"{api_url}/api/v1/messages") + messages = response.json() + + print("\nReceived emails:") + for message in messages["messages"]: + print(f"From: {message['From']['Address']}") + print(f"To: {message['To'][0]['Address']}") + print(f"Subject: {message['Subject']}") + print(f"Body: {message['Text']}") + print("---") + + # Get specific email details + if messages["messages"]: + first_message = messages["messages"][0] + message_id = first_message["ID"] + + response = requests.get(f"{api_url}/api/v1/messages/{message_id}") + message_details = response.json() + + print("\nDetailed message info:") + print(f"Size: {message_details['Size']} bytes") + print(f"Created: {message_details['Created']}") + print(f"Attachments: {len(message_details['Attachments'])}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/memcached/example_basic.py b/modules/memcached/example_basic.py new file mode 100644 index 000000000..01e52dea8 --- /dev/null +++ b/modules/memcached/example_basic.py @@ -0,0 +1,135 @@ +import json +import pickle + +import memcache + +from testcontainers.memcached import MemcachedContainer + + +def basic_example(): + with MemcachedContainer() as memcached: + # Get connection parameters + host = memcached.get_container_host_ip() + port = memcached.get_exposed_port(memcached.port) + + # Create Memcached client + client = memcache.Client([f"{host}:{port}"]) + print("Connected to Memcached") + + # Store simple values + client.set("string_key", "Hello from Memcached") + client.set("int_key", 42) + client.set("float_key", 3.14) + print("Stored simple values") + + # Store complex data + complex_data = {"name": "test", "values": [1, 2, 3], "nested": {"key": "value"}} + client.set("complex_key", json.dumps(complex_data)) + print("Stored complex data") + + # Store with expiration + client.set("expiring_key", "This will expire", time=5) + print("Stored value with expiration") + + # Store with pickle + class TestObject: + def __init__(self, name, value): + self.name = name + self.value = value + + test_obj = TestObject("test", 123) + client.set("object_key", pickle.dumps(test_obj)) + print("Stored pickled object") + + # Retrieve values + print("\nRetrieved values:") + print(f"string_key: {client.get('string_key')}") + print(f"int_key: {client.get('int_key')}") + print(f"float_key: {client.get('float_key')}") + + # Retrieve complex data + complex_value = json.loads(client.get("complex_key")) + print("\nComplex data:") + print(json.dumps(complex_value, indent=2)) + + # Retrieve pickled object + obj_data = pickle.loads(client.get("object_key")) + print("\nPickled object:") + print(f"name: {obj_data.name}") + print(f"value: {obj_data.value}") + + # Check expiration + print("\nChecking expiring key:") + print(f"expiring_key: {client.get('expiring_key')}") + print("Waiting for key to expire...") + import time + + time.sleep(6) + print(f"expiring_key after expiration: {client.get('expiring_key')}") + + # Store multiple values + multi_data = {"key1": "value1", "key2": "value2", "key3": "value3"} + client.set_multi(multi_data) + print("\nStored multiple values") + + # Retrieve multiple values + multi_keys = ["key1", "key2", "key3"] + multi_values = client.get_multi(multi_keys) + print("\nMultiple values:") + print(json.dumps(multi_values, indent=2)) + + # Increment and decrement + client.set("counter", 0) + client.incr("counter") + client.incr("counter", 2) + print("\nCounter after increment:") + print(f"counter: {client.get('counter')}") + + client.decr("counter") + print("Counter after decrement:") + print(f"counter: {client.get('counter')}") + + # Store with flags + client.set("flagged_key", "value", flags=1) + print("\nStored value with flags") + + # Get stats + stats = client.get_stats() + print("\nMemcached stats:") + for server, server_stats in stats: + print(f"\nServer: {server}") + print(json.dumps(dict(server_stats), indent=2)) + + # Delete values + client.delete("string_key") + client.delete_multi(["key1", "key2", "key3"]) + print("\nDeleted values") + + # Check deleted values + print("\nChecking deleted values:") + print(f"string_key: {client.get('string_key')}") + print(f"key1: {client.get('key1')}") + + # Store with CAS + client.set("cas_key", "initial") + cas_value = client.gets("cas_key") + print("\nCAS value:") + print(f"value: {cas_value}") + + # Update with CAS + success = client.cas("cas_key", "updated", cas_value[1]) + print(f"CAS update success: {success}") + print(f"Updated value: {client.get('cas_key')}") + + # Try to update with invalid CAS + success = client.cas("cas_key", "failed", 0) + print(f"Invalid CAS update success: {success}") + print(f"Value after failed update: {client.get('cas_key')}") + + # Clean up + client.flush_all() + print("\nFlushed all values") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/milvus/example_basic.py b/modules/milvus/example_basic.py new file mode 100644 index 000000000..776aa11b3 --- /dev/null +++ b/modules/milvus/example_basic.py @@ -0,0 +1,138 @@ +import json +from datetime import datetime + +import numpy as np +from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections, utility + +from testcontainers.milvus import MilvusContainer + + +def basic_example(): + with MilvusContainer() as milvus: + # Get connection parameters + host = milvus.get_container_host_ip() + port = milvus.get_exposed_port(milvus.port) + + # Connect to Milvus + connections.connect(alias="default", host=host, port=port) + print("Connected to Milvus") + + # Create collection + collection_name = "test_collection" + dim = 128 + + fields = [ + FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True), + FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=dim), + FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=500), + FieldSchema(name="category", dtype=DataType.VARCHAR, max_length=100), + FieldSchema(name="tags", dtype=DataType.JSON), + FieldSchema(name="timestamp", dtype=DataType.VARCHAR, max_length=50), + ] + + schema = CollectionSchema(fields=fields, description="Test collection") + collection = Collection(name=collection_name, schema=schema) + print(f"Created collection: {collection_name}") + + # Create index + index_params = {"metric_type": "COSINE", "index_type": "IVF_FLAT", "params": {"nlist": 1024}} + collection.create_index(field_name="vector", index_params=index_params) + print("Created index on vector field") + + # Generate test data + num_entities = 5 + vectors = np.random.rand(num_entities, dim).tolist() + + texts = [ + "AI and machine learning are transforming industries", + "New study reveals benefits of meditation", + "Global warming reaches critical levels", + "Stock market shows strong growth", + "New restaurant opens in downtown", + ] + + categories = ["Technology", "Health", "Environment", "Finance", "Food"] + + tags = [ + ["AI", "ML", "innovation"], + ["wellness", "mental health"], + ["climate", "sustainability"], + ["investing", "markets"], + ["dining", "local"], + ] + + timestamps = [datetime.utcnow().isoformat() for _ in range(num_entities)] + + # Insert data + entities = [vectors, texts, categories, tags, timestamps] + + collection.insert(entities) + print("Inserted test data") + + # Flush collection + collection.flush() + print("Flushed collection") + + # Load collection + collection.load() + print("Loaded collection") + + # Search vectors + search_params = {"metric_type": "COSINE", "params": {"nprobe": 10}} + + results = collection.search( + data=[vectors[0]], + anns_field="vector", + param=search_params, + limit=3, + output_fields=["text", "category", "tags"], + ) + + print("\nSearch results:") + for hits in results: + for hit in hits: + print(json.dumps({"id": hit.id, "distance": hit.distance, "entity": hit.entity}, indent=2)) + + # Query with filter + filter_expr = 'category == "Technology"' + query_results = collection.query(expr=filter_expr, output_fields=["text", "category", "tags"]) + + print("\nQuery results with filter:") + print(json.dumps(query_results, indent=2)) + + # Get collection stats + stats = collection.get_statistics() + print("\nCollection statistics:") + print(json.dumps(stats, indent=2)) + + # Create partition + partition_name = "test_partition" + collection.create_partition(partition_name) + print(f"\nCreated partition: {partition_name}") + + # List partitions + partitions = collection.partitions + print("\nPartitions:") + for partition in partitions: + print( + json.dumps( + {"name": partition.name, "is_empty": partition.is_empty, "num_entities": partition.num_entities}, + indent=2, + ) + ) + + # Delete partition + collection.drop_partition(partition_name) + print(f"Deleted partition: {partition_name}") + + # Clean up + utility.drop_collection(collection_name) + print("\nDropped collection") + + # Disconnect + connections.disconnect("default") + print("Disconnected from Milvus") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/minio/example_basic.py b/modules/minio/example_basic.py new file mode 100644 index 000000000..5318679be --- /dev/null +++ b/modules/minio/example_basic.py @@ -0,0 +1,120 @@ +import io +import json +from datetime import timedelta + +from minio import Minio + +from testcontainers.minio import MinioContainer + + +def basic_example(): + with MinioContainer() as minio: + # Get connection parameters + host = minio.get_container_host_ip() + port = minio.get_exposed_port(minio.port) + access_key = minio.access_key + secret_key = minio.secret_key + + # Create MinIO client + client = Minio(f"{host}:{port}", access_key=access_key, secret_key=secret_key, secure=False) + print("Connected to MinIO") + + # Create bucket + bucket_name = "test-bucket" + client.make_bucket(bucket_name) + print(f"Created bucket: {bucket_name}") + + # List buckets + buckets = client.list_buckets() + print("\nBuckets:") + for bucket in buckets: + print(f"- {bucket.name} (created: {bucket.creation_date})") + + # Upload test files + test_files = {"test1.txt": "Hello from test1", "test2.txt": "Hello from test2", "test3.txt": "Hello from test3"} + + for filename, content in test_files.items(): + data = io.BytesIO(content.encode()) + client.put_object(bucket_name, filename, data, len(content.encode()), content_type="text/plain") + print(f"Uploaded {filename}") + + # List objects + objects = client.list_objects(bucket_name) + print("\nObjects in bucket:") + for obj in objects: + print(f"- {obj.object_name} (size: {obj.size} bytes)") + + # Get object + print("\nObject contents:") + for filename in test_files: + response = client.get_object(bucket_name, filename) + content = response.read().decode() + print(f"{filename}: {content}") + + # Create directory structure + client.put_object( + bucket_name, "folder1/test4.txt", io.BytesIO(b"Hello from test4"), 15, content_type="text/plain" + ) + print("\nCreated directory structure") + + # List objects with prefix + objects = client.list_objects(bucket_name, prefix="folder1/") + print("\nObjects in folder1:") + for obj in objects: + print(f"- {obj.object_name}") + + # Copy object + client.copy_object(bucket_name, "test1.txt", f"{bucket_name}/folder1/test1_copy.txt") + print("\nCopied object") + + # Get object metadata + stat = client.stat_object(bucket_name, "test1.txt") + print("\nObject metadata:") + print( + json.dumps( + { + "name": stat.object_name, + "size": stat.size, + "content_type": stat.content_type, + "last_modified": stat.last_modified.isoformat(), + }, + indent=2, + ) + ) + + # Generate presigned URL + url = client.presigned_get_object(bucket_name, "test1.txt", expires=timedelta(hours=1)) + print(f"\nPresigned URL: {url}") + + # Set bucket policy + policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"AWS": "*"}, + "Action": ["s3:GetObject"], + "Resource": [f"arn:aws:s3:::{bucket_name}/*"], + } + ], + } + client.set_bucket_policy(bucket_name, json.dumps(policy)) + print("\nSet bucket policy") + + # Get bucket policy + policy = client.get_bucket_policy(bucket_name) + print("\nBucket policy:") + print(json.dumps(json.loads(policy), indent=2)) + + # Remove objects + for filename in test_files: + client.remove_object(bucket_name, filename) + print(f"Removed {filename}") + + # Remove bucket + client.remove_bucket(bucket_name) + print(f"\nRemoved bucket: {bucket_name}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mongodb/example_basic.py b/modules/mongodb/example_basic.py new file mode 100644 index 000000000..8fde30c65 --- /dev/null +++ b/modules/mongodb/example_basic.py @@ -0,0 +1,85 @@ +import json +from datetime import datetime + +from pymongo import MongoClient + +from testcontainers.mongodb import MongoDbContainer + + +def basic_example(): + with MongoDbContainer() as mongodb: + # Get connection URL + connection_url = mongodb.get_connection_url() + + # Create MongoDB client + client = MongoClient(connection_url) + print("Connected to MongoDB") + + # Get database and collection + db = client.test_db + collection = db.test_collection + + # Insert test documents + test_docs = [ + {"name": "test1", "value": 100, "category": "A", "created_at": datetime.utcnow()}, + {"name": "test2", "value": 200, "category": "B", "created_at": datetime.utcnow()}, + {"name": "test3", "value": 300, "category": "A", "created_at": datetime.utcnow()}, + ] + + result = collection.insert_many(test_docs) + print(f"Inserted {len(result.inserted_ids)} documents") + + # Query documents + print("\nQuery results:") + for doc in collection.find({"category": "A"}): + print(json.dumps(doc, default=str, indent=2)) + + # Execute aggregation pipeline + pipeline = [ + { + "$group": { + "_id": "$category", + "avg_value": {"$avg": "$value"}, + "count": {"$sum": 1}, + "min_value": {"$min": "$value"}, + "max_value": {"$max": "$value"}, + } + }, + {"$sort": {"avg_value": -1}}, + ] + + print("\nAggregation results:") + for result in collection.aggregate(pipeline): + print(json.dumps(result, default=str, indent=2)) + + # Create indexes + collection.create_index("name") + collection.create_index([("category", 1), ("value", -1)]) + print("\nCreated indexes") + + # List indexes + print("\nIndexes:") + for index in collection.list_indexes(): + print(json.dumps(index, default=str, indent=2)) + + # Update documents + result = collection.update_many({"category": "A"}, {"$set": {"updated": True}}) + print(f"\nUpdated {result.modified_count} documents") + + # Find updated documents + print("\nUpdated documents:") + for doc in collection.find({"updated": True}): + print(json.dumps(doc, default=str, indent=2)) + + # Delete documents + result = collection.delete_many({"category": "B"}) + print(f"\nDeleted {result.deleted_count} documents") + + # Get collection stats + stats = db.command("collstats", "test_collection") + print("\nCollection stats:") + print(json.dumps(stats, default=str, indent=2)) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mqtt/example_basic.py b/modules/mqtt/example_basic.py new file mode 100644 index 000000000..dc6de9fe3 --- /dev/null +++ b/modules/mqtt/example_basic.py @@ -0,0 +1,51 @@ +import time + +import paho.mqtt.client as mqtt + +from testcontainers.mqtt import MqttContainer + + +def basic_example(): + with MqttContainer() as mqtt_container: + # Get connection parameters + host = mqtt_container.get_container_host_ip() + port = mqtt_container.get_exposed_port(mqtt_container.port) + + # Create MQTT client + client = mqtt.Client() + + # Define callback functions + def on_connect(client, userdata, flags, rc): + print(f"Connected with result code {rc}") + # Subscribe to topics + client.subscribe("test/topic") + + def on_message(client, userdata, msg): + print(f"Received message on topic {msg.topic}: {msg.payload.decode()}") + + # Set callbacks + client.on_connect = on_connect + client.on_message = on_message + + # Connect to broker + client.connect(host, port) + client.loop_start() + + # Publish test messages + test_messages = ["Hello MQTT!", "This is a test message", "MQTT is working!"] + + for msg in test_messages: + client.publish("test/topic", msg) + print(f"Published message: {msg}") + time.sleep(1) # Wait a bit between messages + + # Wait for messages to be processed + time.sleep(2) + + # Clean up + client.loop_stop() + client.disconnect() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mssql/example_basic.py b/modules/mssql/example_basic.py new file mode 100644 index 000000000..f42e541d1 --- /dev/null +++ b/modules/mssql/example_basic.py @@ -0,0 +1,161 @@ +import pymssql + +from testcontainers.mssql import MsSqlContainer + + +def basic_example(): + with MsSqlContainer() as mssql: + # Get connection parameters + host = mssql.get_container_host_ip() + port = mssql.get_exposed_port(mssql.port) + username = mssql.username + password = mssql.password + database = mssql.database + + # Connect to MSSQL + connection = pymssql.connect(server=host, port=port, user=username, password=password, database=database) + print("Connected to MSSQL") + + # Create cursor + cursor = connection.cursor() + + # Create test table + cursor.execute(""" + CREATE TABLE test_table ( + id INT IDENTITY(1,1) PRIMARY KEY, + name NVARCHAR(50), + value INT, + category NVARCHAR(10), + created_at DATETIME2 DEFAULT GETDATE() + ) + """) + print("Created test table") + + # Insert test data + test_data = [("test1", 100, "A"), ("test2", 200, "B"), ("test3", 300, "A")] + + cursor.executemany( + """ + INSERT INTO test_table (name, value, category) + VALUES (%s, %s, %s) + """, + test_data, + ) + print("Inserted test data") + + # Commit changes + connection.commit() + + # Query data + print("\nQuery results:") + cursor.execute("SELECT * FROM test_table WHERE category = 'A'") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Create view + cursor.execute(""" + CREATE OR ALTER VIEW test_view AS + SELECT category, COUNT(*) as count, AVG(value) as avg_value + FROM test_table + GROUP BY category + """) + print("\nCreated view") + + # Query view + print("\nView results:") + cursor.execute("SELECT * FROM test_view") + for row in cursor: + print({"category": row[0], "count": row[1], "avg_value": float(row[2])}) + + # Create index + cursor.execute("CREATE INDEX test_idx ON test_table (value)") + print("\nCreated index") + + # Query using index + print("\nQuery using index:") + cursor.execute("SELECT * FROM test_table WHERE value > 150") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Get table metadata + cursor.execute(""" + SELECT + c.name as column_name, + t.name as data_type, + c.max_length, + c.is_nullable + FROM sys.columns c + JOIN sys.types t ON c.user_type_id = t.user_type_id + WHERE OBJECT_ID = OBJECT_ID('test_table') + ORDER BY c.column_id + """) + print("\nTable metadata:") + for row in cursor: + print({"column": row[0], "type": row[1], "length": row[2], "nullable": row[3]}) + + # Create stored procedure + cursor.execute(""" + CREATE OR ALTER PROCEDURE test_proc + @category NVARCHAR(10), + @count INT OUTPUT + AS + BEGIN + SELECT @count = COUNT(*) + FROM test_table + WHERE category = @category + END + """) + print("\nCreated stored procedure") + + # Execute stored procedure + cursor.execute(""" + DECLARE @count INT + EXEC test_proc @category = 'A', @count = @count OUTPUT + SELECT @count as count + """) + count = cursor.fetchone()[0] + print(f"Count for category A: {count}") + + # Create function + cursor.execute(""" + CREATE OR ALTER FUNCTION test_func(@category NVARCHAR(10)) + RETURNS TABLE + AS + RETURN + ( + SELECT name, value + FROM test_table + WHERE category = @category + ) + """) + print("\nCreated function") + + # Use function + print("\nFunction results:") + cursor.execute("SELECT * FROM test_func('A')") + for row in cursor: + print({"name": row[0], "value": row[1]}) + + # Create trigger + cursor.execute(""" + CREATE OR ALTER TRIGGER test_trigger + ON test_table + AFTER INSERT + AS + BEGIN + PRINT 'New row inserted' + END + """) + print("\nCreated trigger") + + # Test trigger + cursor.execute("INSERT INTO test_table (name, value, category) VALUES ('test4', 400, 'B')") + connection.commit() + + # Clean up + cursor.close() + connection.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/mysql/example_basic.py b/modules/mysql/example_basic.py new file mode 100644 index 000000000..ba3418b28 --- /dev/null +++ b/modules/mysql/example_basic.py @@ -0,0 +1,16 @@ +import sqlalchemy + +from testcontainers.mysql import MySqlContainer + + +def basic_example(): + config = MySqlContainer("mysql:8.3.0", dialect="pymysql") + + with config as mysql: + connection_url = mysql.get_connection_url() + + engine = sqlalchemy.create_engine(connection_url) + with engine.begin() as connection: + result = connection.execute(sqlalchemy.text("select version()")) + for row in result: + print(f"MySQL version: {row[0]}") diff --git a/modules/nats/example_basic.py b/modules/nats/example_basic.py new file mode 100644 index 000000000..9e941bf9b --- /dev/null +++ b/modules/nats/example_basic.py @@ -0,0 +1,152 @@ +import asyncio +import json + +from nats.aio.client import Client as NATS +from nats.aio.msg import Msg + +from testcontainers.nats import NatsContainer + + +async def message_handler(msg: Msg): + subject = msg.subject + data = msg.data.decode() + print(f"Received message on {subject}: {data}") + + +async def basic_example(): + with NatsContainer() as nats_container: + # Get connection parameters + host = nats_container.get_container_host_ip() + port = nats_container.get_exposed_port(nats_container.port) + + # Create NATS client + nc = NATS() + await nc.connect(f"nats://{host}:{port}") + print("Connected to NATS") + + # Create JetStream context + js = nc.jetstream() + + # Create stream + stream = await js.add_stream(name="test-stream", subjects=["test.>"]) + print(f"\nCreated stream: {stream.config.name}") + + # Create consumer + consumer = await js.add_consumer(stream_name="test-stream", durable_name="test-consumer") + print(f"Created consumer: {consumer.name}") + + # Subscribe to subjects + subjects = ["test.1", "test.2", "test.3"] + for subject in subjects: + await nc.subscribe(subject, cb=message_handler) + print(f"Subscribed to {subject}") + + # Publish messages + messages = {"test.1": "Hello from test.1", "test.2": "Hello from test.2", "test.3": "Hello from test.3"} + + for subject, message in messages.items(): + await nc.publish(subject, message.encode()) + print(f"Published to {subject}") + + # Publish with headers + headers = {"header1": "value1", "header2": "value2"} + await nc.publish("test.headers", b"Message with headers", headers=headers) + print("\nPublished message with headers") + + # Publish with reply + reply_subject = "test.reply" + await nc.subscribe(reply_subject, cb=message_handler) + print(f"Subscribed to {reply_subject}") + + response = await nc.request("test.request", b"Request message", timeout=1) + print(f"Received reply: {response.data.decode()}") + + # Publish to JetStream + for subject, message in messages.items(): + ack = await js.publish(subject, message.encode()) + print(f"Published to JetStream {subject}: {ack.stream}") + + # Get stream info + stream_info = await js.stream_info("test-stream") + print("\nStream info:") + print( + json.dumps( + { + "name": stream_info.config.name, + "subjects": stream_info.config.subjects, + "messages": stream_info.state.messages, + "bytes": stream_info.state.bytes, + }, + indent=2, + ) + ) + + # Get consumer info + consumer_info = await js.consumer_info("test-stream", "test-consumer") + print("\nConsumer info:") + print( + json.dumps( + { + "name": consumer_info.name, + "stream_name": consumer_info.stream_name, + "delivered": consumer_info.delivered.stream_seq, + "ack_floor": consumer_info.ack_floor.stream_seq, + }, + indent=2, + ) + ) + + # Create key-value store + kv = await js.create_key_value(bucket="test-kv", history=5, ttl=3600) + print("\nCreated key-value store") + + # Put values + await kv.put("key1", b"value1") + await kv.put("key2", b"value2") + print("Put values in key-value store") + + # Get values + entry = await kv.get("key1") + print(f"Got value: {entry.value.decode()}") + + # List keys + keys = await kv.keys() + print("\nKeys in store:") + for key in keys: + print(f"- {key}") + + # Delete key + await kv.delete("key1") + print("Deleted key1") + + # Create object store + os = await js.create_object_store(bucket="test-os", ttl=3600) + print("\nCreated object store") + + # Put object + await os.put("test.txt", b"Hello from object store") + print("Put object in store") + + # Get object + obj = await os.get("test.txt") + print(f"Got object: {obj.data.decode()}") + + # List objects + objects = await os.list() + print("\nObjects in store:") + for obj in objects: + print(f"- {obj.name}") + + # Delete object + await os.delete("test.txt") + print("Deleted object") + + # Clean up + await js.delete_stream("test-stream") + print("\nDeleted stream") + + await nc.close() + + +if __name__ == "__main__": + asyncio.run(basic_example()) diff --git a/modules/neo4j/example_basic.py b/modules/neo4j/example_basic.py new file mode 100644 index 000000000..c6114bc70 --- /dev/null +++ b/modules/neo4j/example_basic.py @@ -0,0 +1,198 @@ +import json + +from neo4j import GraphDatabase + +from testcontainers.neo4j import Neo4jContainer + + +def basic_example(): + with Neo4jContainer() as neo4j: + # Get connection parameters + host = neo4j.get_container_host_ip() + port = neo4j.get_exposed_port(neo4j.port) + username = neo4j.username + password = neo4j.password + + # Create Neo4j driver + driver = GraphDatabase.driver(f"bolt://{host}:{port}", auth=(username, password)) + print("Connected to Neo4j") + + # Create session + with driver.session() as session: + # Create nodes + create_nodes_query = """ + CREATE (p1:Person {name: 'Alice', age: 30}) + CREATE (p2:Person {name: 'Bob', age: 35}) + CREATE (p3:Person {name: 'Charlie', age: 25}) + CREATE (c1:Company {name: 'Tech Corp', founded: 2000}) + CREATE (c2:Company {name: 'Data Inc', founded: 2010}) + """ + session.run(create_nodes_query) + print("Created nodes") + + # Create relationships + create_rels_query = """ + MATCH (p1:Person {name: 'Alice'}), (c1:Company {name: 'Tech Corp'}) + CREATE (p1)-[:WORKS_AT {since: 2015}]->(c1) + + MATCH (p2:Person {name: 'Bob'}), (c1:Company {name: 'Tech Corp'}) + CREATE (p2)-[:WORKS_AT {since: 2018}]->(c1) + + MATCH (p3:Person {name: 'Charlie'}), (c2:Company {name: 'Data Inc'}) + CREATE (p3)-[:WORKS_AT {since: 2020}]->(c2) + + MATCH (p1:Person {name: 'Alice'}), (p2:Person {name: 'Bob'}) + CREATE (p1)-[:KNOWS {since: 2016}]->(p2) + """ + session.run(create_rels_query) + print("Created relationships") + + # Query nodes + query_nodes = """ + MATCH (n) + RETURN n + """ + result = session.run(query_nodes) + print("\nAll nodes:") + for record in result: + node = record["n"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Query relationships + query_rels = """ + MATCH (n)-[r]->(m) + RETURN n, r, m + """ + result = session.run(query_rels) + print("\nAll relationships:") + for record in result: + print( + json.dumps( + { + "from": {"labels": list(record["n"].labels), "properties": dict(record["n"])}, + "relationship": {"type": record["r"].type, "properties": dict(record["r"])}, + "to": {"labels": list(record["m"].labels), "properties": dict(record["m"])}, + }, + indent=2, + ) + ) + + # Create index + create_index = """ + CREATE INDEX person_name IF NOT EXISTS + FOR (p:Person) + ON (p.name) + """ + session.run(create_index) + print("\nCreated index on Person.name") + + # Query using index + query_indexed = """ + MATCH (p:Person) + WHERE p.name = 'Alice' + RETURN p + """ + result = session.run(query_indexed) + print("\nQuery using index:") + for record in result: + node = record["p"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Create constraint + create_constraint = """ + CREATE CONSTRAINT company_name IF NOT EXISTS + FOR (c:Company) + REQUIRE c.name IS UNIQUE + """ + session.run(create_constraint) + print("\nCreated constraint on Company.name") + + # Create full-text index + create_ft_index = """ + CALL db.index.fulltext.createNodeIndex( + "personSearch", + ["Person"], + ["name"] + ) + """ + session.run(create_ft_index) + print("Created full-text index") + + # Query using full-text index + query_ft = """ + CALL db.index.fulltext.queryNodes( + "personSearch", + "Alice" + ) + YIELD node + RETURN node + """ + result = session.run(query_ft) + print("\nFull-text search results:") + for record in result: + node = record["node"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Create stored procedure + create_proc = """ + CALL apoc.custom.asProcedure( + 'getCompanyEmployees', + 'MATCH (p:Person)-[:WORKS_AT]->(c:Company {name: $companyName}) + RETURN p', + 'READ', + [['p', 'NODE']], + [['companyName', 'STRING']] + ) + """ + session.run(create_proc) + print("\nCreated stored procedure") + + # Call stored procedure + call_proc = """ + CALL custom.getCompanyEmployees('Tech Corp') + YIELD p + RETURN p + """ + result = session.run(call_proc) + print("\nStored procedure results:") + for record in result: + node = record["p"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Create trigger + create_trigger = """ + CALL apoc.trigger.add( + 'setTimestamp', + 'UNWIND apoc.trigger.nodesByLabel($assignedLabels, "Person") AS n + SET n.updated_at = datetime()', + {phase: 'after'} + ) + """ + session.run(create_trigger) + print("\nCreated trigger") + + # Test trigger + test_trigger = """ + MATCH (p:Person {name: 'Alice'}) + SET p.age = 31 + RETURN p + """ + result = session.run(test_trigger) + print("\nTrigger test results:") + for record in result: + node = record["p"] + print(json.dumps({"labels": list(node.labels), "properties": dict(node)}, indent=2)) + + # Clean up + cleanup = """ + MATCH (n) + DETACH DELETE n + """ + session.run(cleanup) + print("\nCleaned up database") + + driver.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/nginx/example_basic.py b/modules/nginx/example_basic.py new file mode 100644 index 000000000..d7aaec122 --- /dev/null +++ b/modules/nginx/example_basic.py @@ -0,0 +1,116 @@ +import json +import os +from pathlib import Path + +import requests + +from testcontainers.nginx import NginxContainer + + +def basic_example(): + with NginxContainer() as nginx: + # Get connection parameters + host = nginx.get_container_host_ip() + port = nginx.get_exposed_port(nginx.port) + nginx_url = f"http://{host}:{port}" + print(f"Nginx URL: {nginx_url}") + + # Create test HTML file + test_html = """ + + + + Test Page + + +

Hello from Nginx!

+

This is a test page.

+ + + """ + + # Create test directory and file + test_dir = Path("/tmp/nginx_test") + test_dir.mkdir(exist_ok=True) + test_file = test_dir / "index.html" + test_file.write_text(test_html) + + # Copy test file to container + nginx.get_container().copy_to_container(test_file, "/usr/share/nginx/html/") + print("Copied test file to container") + + # Test basic HTTP request + response = requests.get(nginx_url) + print(f"\nBasic request status: {response.status_code}") + print(f"Content type: {response.headers.get('content-type')}") + print(f"Content length: {response.headers.get('content-length')}") + + # Test HEAD request + head_response = requests.head(nginx_url) + print("\nHEAD request headers:") + print(json.dumps(dict(head_response.headers), indent=2)) + + # Create test configuration + test_config = """ + server { + listen 80; + server_name test.local; + + location /test { + return 200 'Test location'; + } + + location /redirect { + return 301 /test; + } + + location /error { + return 404 'Not Found'; + } + } + """ + + # Write and copy configuration + config_file = test_dir / "test.conf" + config_file.write_text(test_config) + nginx.get_container().copy_to_container(config_file, "/etc/nginx/conf.d/") + print("\nCopied test configuration") + + # Reload Nginx configuration + nginx.get_container().exec_run("nginx -s reload") + print("Reloaded Nginx configuration") + + # Test custom location + test_response = requests.get(f"{nginx_url}/test") + print(f"\nTest location response: {test_response.text}") + + # Test redirect + redirect_response = requests.get(f"{nginx_url}/redirect", allow_redirects=False) + print(f"\nRedirect status: {redirect_response.status_code}") + print(f"Redirect location: {redirect_response.headers.get('location')}") + + # Test error + error_response = requests.get(f"{nginx_url}/error") + print(f"\nError status: {error_response.status_code}") + print(f"Error response: {error_response.text}") + + # Get Nginx version + version_response = requests.get(nginx_url) + server = version_response.headers.get("server") + print(f"\nNginx version: {server}") + + # Test with different HTTP methods + methods = ["GET", "POST", "PUT", "DELETE", "OPTIONS"] + print("\nHTTP method tests:") + for method in methods: + response = requests.request(method, nginx_url) + print(f"{method}: {response.status_code}") + + # Clean up + os.remove(test_file) + os.remove(config_file) + os.rmdir(test_dir) + + +if __name__ == "__main__": + basic_example() diff --git a/modules/ollama/example_basic.py b/modules/ollama/example_basic.py new file mode 100644 index 000000000..134b636f5 --- /dev/null +++ b/modules/ollama/example_basic.py @@ -0,0 +1,50 @@ +import requests + +from testcontainers.ollama import OllamaContainer + + +def basic_example(): + with OllamaContainer() as ollama: + # Get API endpoint + api_url = ollama.get_api_url() + + # Pull a model + model_name = "llama2" + print(f"Pulling model: {model_name}") + response = requests.post(f"{api_url}/api/pull", json={"name": model_name}) + print(f"Pull response: {response.json()}") + + # Generate text + prompt = "Write a short poem about programming." + print(f"\nGenerating text for prompt: {prompt}") + + response = requests.post( + f"{api_url}/api/generate", json={"model": model_name, "prompt": prompt, "stream": False} + ) + + result = response.json() + print("\nGenerated text:") + print(result["response"]) + + # Embed text + text_to_embed = "The quick brown fox jumps over the lazy dog" + print(f"\nGenerating embedding for: {text_to_embed}") + + response = requests.post(f"{api_url}/api/embeddings", json={"model": model_name, "prompt": text_to_embed}) + + embedding = response.json() + print("\nEmbedding:") + print(f"Length: {len(embedding['embedding'])}") + print(f"First 5 values: {embedding['embedding'][:5]}") + + # List available models + response = requests.get(f"{api_url}/api/tags") + models = response.json() + + print("\nAvailable models:") + for model in models["models"]: + print(f"Name: {model['name']}, Size: {model['size']}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/opensearch/example_basic.py b/modules/opensearch/example_basic.py new file mode 100644 index 000000000..e69de29bb diff --git a/modules/oracle-free/example_basic.py b/modules/oracle-free/example_basic.py new file mode 100644 index 000000000..8abad4d01 --- /dev/null +++ b/modules/oracle-free/example_basic.py @@ -0,0 +1,140 @@ +import oracledb + +from testcontainers.oracle_free import OracleFreeContainer + + +def basic_example(): + with OracleFreeContainer() as oracle: + # Get connection parameters + host = oracle.get_container_host_ip() + port = oracle.get_exposed_port(oracle.port) + username = oracle.username + password = oracle.password + service_name = oracle.service_name + + # Create connection string + dsn = f"{host}:{port}/{service_name}" + + # Connect to Oracle + connection = oracledb.connect(user=username, password=password, dsn=dsn) + print("Connected to Oracle") + + # Create cursor + cursor = connection.cursor() + + # Create test table + cursor.execute(""" + CREATE TABLE test_table ( + id NUMBER GENERATED ALWAYS AS IDENTITY, + name VARCHAR2(50), + value NUMBER, + category VARCHAR2(10), + created_at TIMESTAMP DEFAULT SYSTIMESTAMP + ) + """) + print("Created test table") + + # Insert test data + test_data = [("test1", 100, "A"), ("test2", 200, "B"), ("test3", 300, "A")] + + cursor.executemany( + """ + INSERT INTO test_table (name, value, category) + VALUES (:1, :2, :3) + """, + test_data, + ) + print("Inserted test data") + + # Commit changes + connection.commit() + + # Query data + print("\nQuery results:") + cursor.execute("SELECT * FROM test_table WHERE category = 'A'") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Create view + cursor.execute(""" + CREATE OR REPLACE VIEW test_view AS + SELECT category, COUNT(*) as count, AVG(value) as avg_value + FROM test_table + GROUP BY category + """) + print("\nCreated view") + + # Query view + print("\nView results:") + cursor.execute("SELECT * FROM test_view") + for row in cursor: + print({"category": row[0], "count": row[1], "avg_value": float(row[2])}) + + # Create index + cursor.execute("CREATE INDEX test_idx ON test_table (value)") + print("\nCreated index") + + # Query using index + print("\nQuery using index:") + cursor.execute("SELECT * FROM test_table WHERE value > 150") + for row in cursor: + print({"id": row[0], "name": row[1], "value": row[2], "category": row[3], "created_at": row[4].isoformat()}) + + # Get table metadata + cursor.execute(""" + SELECT column_name, data_type, data_length, nullable + FROM user_tab_columns + WHERE table_name = 'TEST_TABLE' + ORDER BY column_id + """) + print("\nTable metadata:") + for row in cursor: + print({"column": row[0], "type": row[1], "length": row[2], "nullable": row[3]}) + + # Create sequence + cursor.execute(""" + CREATE SEQUENCE test_seq + START WITH 1 + INCREMENT BY 1 + NOCACHE + NOCYCLE + """) + print("\nCreated sequence") + + # Use sequence + cursor.execute("SELECT test_seq.NEXTVAL FROM DUAL") + next_val = cursor.fetchone()[0] + print(f"Next sequence value: {next_val}") + + # Create procedure + cursor.execute(""" + CREATE OR REPLACE PROCEDURE test_proc ( + p_category IN VARCHAR2, + p_count OUT NUMBER + ) AS + BEGIN + SELECT COUNT(*) + INTO p_count + FROM test_table + WHERE category = p_category; + END; + """) + print("\nCreated procedure") + + # Execute procedure + cursor.execute(""" + DECLARE + v_count NUMBER; + BEGIN + test_proc('A', v_count); + DBMS_OUTPUT.PUT_LINE('Count for category A: ' || v_count); + END; + """) + + # Clean up + cursor.close() + connection.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/postgres/example_basic.py b/modules/postgres/example_basic.py new file mode 100644 index 000000000..611081023 --- /dev/null +++ b/modules/postgres/example_basic.py @@ -0,0 +1,99 @@ +import pandas as pd +import sqlalchemy +from sqlalchemy import text + +from testcontainers.postgres import PostgresContainer + + +def basic_example(): + with PostgresContainer() as postgres: + # Get connection URL + connection_url = postgres.get_connection_url() + + # Create SQLAlchemy engine + engine = sqlalchemy.create_engine(connection_url) + print("Connected to PostgreSQL") + + # Create a test table + create_table_sql = """ + CREATE TABLE test_table ( + id SERIAL PRIMARY KEY, + name VARCHAR(50), + value DECIMAL(10,2), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + + with engine.begin() as connection: + connection.execute(text(create_table_sql)) + print("Created test table") + + # Insert test data + test_data = [ + {"name": "test1", "value": 100.0}, + {"name": "test2", "value": 200.0}, + {"name": "test3", "value": 300.0}, + ] + + with engine.begin() as connection: + for data in test_data: + connection.execute(text("INSERT INTO test_table (name, value) VALUES (:name, :value)"), data) + print("Inserted test data") + + # Query data + with engine.connect() as connection: + result = connection.execute(text("SELECT * FROM test_table ORDER BY id")) + rows = result.fetchall() + + print("\nQuery results:") + for row in rows: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}, Created: {row[3]}") + + # Execute a more complex query + with engine.connect() as connection: + result = connection.execute( + text(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count, + MIN(created_at) as first_created, + MAX(created_at) as last_created + FROM test_table + GROUP BY name + ORDER BY avg_value DESC + """) + ) + + print("\nAggregation results:") + for row in result: + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Count: {row[2]}, First: {row[3]}, Last: {row[4]}") + + # Convert to pandas DataFrame + df = pd.read_sql("SELECT * FROM test_table ORDER BY id", engine) + print("\nDataFrame:") + print(df) + + # Create and query a view + create_view_sql = """ + CREATE OR REPLACE VIEW test_view AS + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count + FROM test_table + GROUP BY name + """ + + with engine.begin() as connection: + connection.execute(text(create_view_sql)) + print("\nCreated view") + + result = connection.execute(text("SELECT * FROM test_view")) + print("\nView results:") + for row in result: + print(f"Name: {row[0]}, Avg: {row[1]:.2f}, Count: {row[2]}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/qdrant/example_basic.py b/modules/qdrant/example_basic.py new file mode 100644 index 000000000..589735e1e --- /dev/null +++ b/modules/qdrant/example_basic.py @@ -0,0 +1,149 @@ +import json +from datetime import datetime + +import numpy as np +from qdrant_client import QdrantClient +from qdrant_client.http import models + +from testcontainers.qdrant import QdrantContainer + + +def basic_example(): + with QdrantContainer() as qdrant: + # Get connection parameters + host = qdrant.get_container_host_ip() + port = qdrant.get_exposed_port(qdrant.port) + + # Create Qdrant client + client = QdrantClient(host=host, port=port) + print("Connected to Qdrant") + + # Create collection + collection_name = "test_collection" + vector_size = 128 + + client.create_collection( + collection_name=collection_name, + vectors_config=models.VectorParams(size=vector_size, distance=models.Distance.COSINE), + ) + print(f"Created collection: {collection_name}") + + # Generate test vectors and payloads + num_vectors = 5 + vectors = np.random.rand(num_vectors, vector_size).tolist() + + payloads = [ + { + "text": "AI and machine learning are transforming industries", + "category": "Technology", + "tags": ["AI", "ML", "innovation"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "New study reveals benefits of meditation", + "category": "Health", + "tags": ["wellness", "mental health"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "Global warming reaches critical levels", + "category": "Environment", + "tags": ["climate", "sustainability"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "Stock market shows strong growth", + "category": "Finance", + "tags": ["investing", "markets"], + "timestamp": datetime.utcnow().isoformat(), + }, + { + "text": "New restaurant opens in downtown", + "category": "Food", + "tags": ["dining", "local"], + "timestamp": datetime.utcnow().isoformat(), + }, + ] + + # Upload vectors with payloads + client.upsert( + collection_name=collection_name, + points=models.Batch(ids=list(range(num_vectors)), vectors=vectors, payloads=payloads), + ) + print("Uploaded vectors with payloads") + + # Search vectors + search_result = client.search(collection_name=collection_name, query_vector=vectors[0], limit=3) + print("\nSearch results:") + for scored_point in search_result: + print( + json.dumps( + {"id": scored_point.id, "score": scored_point.score, "payload": scored_point.payload}, indent=2 + ) + ) + + # Filtered search + filter_result = client.search( + collection_name=collection_name, + query_vector=vectors[0], + query_filter=models.Filter( + must=[models.FieldCondition(key="category", match=models.MatchValue(value="Technology"))] + ), + limit=2, + ) + print("\nFiltered search results:") + for scored_point in filter_result: + print( + json.dumps( + {"id": scored_point.id, "score": scored_point.score, "payload": scored_point.payload}, indent=2 + ) + ) + + # Create payload index + client.create_payload_index( + collection_name=collection_name, field_name="category", field_schema=models.PayloadFieldSchema.KEYWORD + ) + print("\nCreated payload index on category field") + + # Create vector index + client.create_payload_index( + collection_name=collection_name, field_name="tags", field_schema=models.PayloadFieldSchema.KEYWORD + ) + print("Created payload index on tags field") + + # Scroll through collection + scroll_result = client.scroll(collection_name=collection_name, limit=10, with_payload=True, with_vectors=True) + print("\nScrolled through collection:") + for point in scroll_result[0]: + print(json.dumps({"id": point.id, "payload": point.payload}, indent=2)) + + # Get collection info + collection_info = client.get_collection(collection_name) + print("\nCollection info:") + print( + json.dumps( + { + "name": collection_info.name, + "vectors_count": collection_info.vectors_count, + "points_count": collection_info.points_count, + "status": collection_info.status, + }, + indent=2, + ) + ) + + # Update payload + client.set_payload(collection_name=collection_name, payload={"new_field": "updated value"}, points=[0, 1]) + print("\nUpdated payload for points 0 and 1") + + # Delete points + client.delete(collection_name=collection_name, points_selector=models.PointIdsList(points=[4])) + print("Deleted point with id 4") + + # Clean up + client.delete_collection(collection_name) + print("\nDeleted collection") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/rabbitmq/example_basic.py b/modules/rabbitmq/example_basic.py new file mode 100644 index 000000000..906a0e24f --- /dev/null +++ b/modules/rabbitmq/example_basic.py @@ -0,0 +1,98 @@ +import json +import time +from threading import Thread + +import pika + +from testcontainers.rabbitmq import RabbitMQContainer + + +def basic_example(): + with RabbitMQContainer() as rabbitmq: + # Get connection parameters + host = rabbitmq.get_container_host_ip() + port = rabbitmq.get_exposed_port(rabbitmq.port) + username = rabbitmq.username + password = rabbitmq.password + + # Create connection + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, credentials=credentials) + connection = pika.BlockingConnection(parameters) + channel = connection.channel() + print("Connected to RabbitMQ") + + # Declare exchange + exchange_name = "test_exchange" + channel.exchange_declare(exchange=exchange_name, exchange_type="direct", durable=True) + print(f"Declared exchange: {exchange_name}") + + # Declare queues + queues = {"queue1": "routing_key1", "queue2": "routing_key2"} + + for queue_name, routing_key in queues.items(): + channel.queue_declare(queue=queue_name, durable=True) + channel.queue_bind(exchange=exchange_name, queue=queue_name, routing_key=routing_key) + print(f"Declared and bound queue: {queue_name}") + + # Define message handler + def message_handler(ch, method, properties, body): + message = json.loads(body) + print(f"\nReceived message on {method.routing_key}:") + print(json.dumps(message, indent=2)) + ch.basic_ack(delivery_tag=method.delivery_tag) + + # Start consuming in a separate thread + def consume_messages(): + channel.basic_qos(prefetch_count=1) + for queue_name in queues: + channel.basic_consume(queue=queue_name, on_message_callback=message_handler) + channel.start_consuming() + + consumer_thread = Thread(target=consume_messages) + consumer_thread.daemon = True + consumer_thread.start() + + # Publish messages + test_messages = [ + { + "queue": "queue1", + "routing_key": "routing_key1", + "message": {"id": 1, "content": "Message for queue 1", "timestamp": time.time()}, + }, + { + "queue": "queue2", + "routing_key": "routing_key2", + "message": {"id": 2, "content": "Message for queue 2", "timestamp": time.time()}, + }, + ] + + for msg in test_messages: + channel.basic_publish( + exchange=exchange_name, + routing_key=msg["routing_key"], + body=json.dumps(msg["message"]), + properties=pika.BasicProperties( + delivery_mode=2, # make message persistent + content_type="application/json", + ), + ) + print(f"Published message to {msg['queue']}") + + # Wait for messages to be processed + time.sleep(2) + + # Get queue information + print("\nQueue information:") + for queue_name in queues: + queue = channel.queue_declare(queue=queue_name, passive=True) + print(f"{queue_name}:") + print(f" Messages: {queue.method.message_count}") + print(f" Consumers: {queue.method.consumer_count}") + + # Clean up + connection.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/redis/example_basic.py b/modules/redis/example_basic.py new file mode 100644 index 000000000..5fce0a7b7 --- /dev/null +++ b/modules/redis/example_basic.py @@ -0,0 +1,84 @@ +from datetime import timedelta + +import redis + +from testcontainers.redis import RedisContainer + + +def basic_example(): + with RedisContainer() as redis_container: + # Get connection parameters + host = redis_container.get_container_host_ip() + port = redis_container.get_exposed_port(redis_container.port) + + # Create Redis client + client = redis.Redis(host=host, port=port, decode_responses=True) + print("Connected to Redis") + + # String operations + client.set("greeting", "Hello, Redis!") + value = client.get("greeting") + print(f"\nString value: {value}") + + # List operations + client.lpush("tasks", "task1", "task2", "task3") + tasks = client.lrange("tasks", 0, -1) + print("\nTasks list:") + for task in tasks: + print(f"- {task}") + + # Set operations + client.sadd("tags", "python", "redis", "docker", "testing") + tags = client.smembers("tags") + print("\nTags set:") + for tag in tags: + print(f"- {tag}") + + # Hash operations + user_data = {"name": "John Doe", "email": "john@example.com", "age": "30"} + client.hset("user:1", mapping=user_data) + user = client.hgetall("user:1") + print("\nUser hash:") + for field, value in user.items(): + print(f"{field}: {value}") + + # Sorted set operations + scores = {"player1": 100, "player2": 200, "player3": 150} + client.zadd("leaderboard", scores) + leaderboard = client.zrevrange("leaderboard", 0, -1, withscores=True) + print("\nLeaderboard:") + for player, score in leaderboard: + print(f"{player}: {score}") + + # Key expiration + client.setex("temp_key", timedelta(seconds=10), "This will expire") + ttl = client.ttl("temp_key") + print(f"\nTemp key TTL: {ttl} seconds") + + # Pipeline operations + with client.pipeline() as pipe: + pipe.set("pipeline_key1", "value1") + pipe.set("pipeline_key2", "value2") + pipe.set("pipeline_key3", "value3") + pipe.execute() + print("\nPipeline operations completed") + + # Pub/Sub operations + pubsub = client.pubsub() + pubsub.subscribe("test_channel") + + # Publish a message + client.publish("test_channel", "Hello from Redis!") + + # Get the message + message = pubsub.get_message() + if message and message["type"] == "message": + print(f"\nReceived message: {message['data']}") + + # Clean up + pubsub.unsubscribe() + pubsub.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/registry/example_basic.py b/modules/registry/example_basic.py new file mode 100644 index 000000000..0bd136872 --- /dev/null +++ b/modules/registry/example_basic.py @@ -0,0 +1,92 @@ +import json + +import requests + +from testcontainers.registry import RegistryContainer + + +def basic_example(): + with RegistryContainer() as registry: + # Get connection parameters + host = registry.get_container_host_ip() + port = registry.get_exposed_port(registry.port) + registry_url = f"http://{host}:{port}" + print(f"Registry URL: {registry_url}") + + # Get registry version + version_response = requests.get(f"{registry_url}/v2/") + print(f"Registry version: {version_response.headers.get('Docker-Distribution-Api-Version')}") + + # List repositories + catalog_response = requests.get(f"{registry_url}/v2/_catalog") + repositories = catalog_response.json()["repositories"] + print("\nRepositories:") + print(json.dumps(repositories, indent=2)) + + # Create test repository + test_repo = "test-repo" + test_tag = "latest" + + # Create a simple manifest + manifest = { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1000, + "digest": "sha256:1234567890abcdef", + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 2000, + "digest": "sha256:abcdef1234567890", + } + ], + } + + # Upload manifest + manifest_url = f"{registry_url}/v2/{test_repo}/manifests/{test_tag}" + headers = {"Content-Type": "application/vnd.docker.distribution.manifest.v2+json"} + manifest_response = requests.put(manifest_url, json=manifest, headers=headers) + print(f"\nUploaded manifest: {manifest_response.status_code}") + + # List tags for repository + tags_url = f"{registry_url}/v2/{test_repo}/tags/list" + tags_response = requests.get(tags_url) + tags = tags_response.json()["tags"] + print("\nTags:") + print(json.dumps(tags, indent=2)) + + # Get manifest + manifest_response = requests.get(manifest_url, headers=headers) + manifest_data = manifest_response.json() + print("\nManifest:") + print(json.dumps(manifest_data, indent=2)) + + # Get manifest digest + digest = manifest_response.headers.get("Docker-Content-Digest") + print(f"\nManifest digest: {digest}") + + # Delete manifest + delete_response = requests.delete(manifest_url) + print(f"\nDeleted manifest: {delete_response.status_code}") + + # Verify deletion + verify_response = requests.get(manifest_url) + print(f"Manifest exists: {verify_response.status_code == 200}") + + # Get registry configuration + config_url = f"{registry_url}/v2/" + config_response = requests.get(config_url) + print("\nRegistry configuration:") + print(json.dumps(dict(config_response.headers), indent=2)) + + # Get registry health + health_url = f"{registry_url}/v2/" + health_response = requests.get(health_url) + print(f"\nRegistry health: {health_response.status_code == 200}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/scylla/example_basic.py b/modules/scylla/example_basic.py new file mode 100644 index 000000000..fa26369cc --- /dev/null +++ b/modules/scylla/example_basic.py @@ -0,0 +1,153 @@ +import json +from datetime import datetime + +from cassandra.auth import PlainTextAuthProvider +from cassandra.cluster import Cluster + +from testcontainers.scylla import ScyllaContainer + + +def basic_example(): + with ScyllaContainer() as scylla: + # Get connection parameters + host = scylla.get_container_host_ip() + port = scylla.get_exposed_port(scylla.port) + username = scylla.username + password = scylla.password + + # Create Scylla client + auth_provider = PlainTextAuthProvider(username=username, password=password) + cluster = Cluster([host], port=port, auth_provider=auth_provider) + session = cluster.connect() + print("Connected to Scylla") + + # Create keyspace + session.execute(""" + CREATE KEYSPACE IF NOT EXISTS test_keyspace + WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1} + """) + print("Created keyspace") + + # Use keyspace + session.set_keyspace("test_keyspace") + + # Create table + session.execute(""" + CREATE TABLE IF NOT EXISTS test_table ( + id UUID PRIMARY KEY, + name text, + value int, + category text, + created_at timestamp + ) + """) + print("Created table") + + # Insert test data + test_data = [ + { + "id": "550e8400-e29b-41d4-a716-446655440000", + "name": "test1", + "value": 100, + "category": "A", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440001", + "name": "test2", + "value": 200, + "category": "B", + "created_at": datetime.utcnow(), + }, + { + "id": "550e8400-e29b-41d4-a716-446655440002", + "name": "test3", + "value": 300, + "category": "A", + "created_at": datetime.utcnow(), + }, + ] + + insert_stmt = session.prepare(""" + INSERT INTO test_table (id, name, value, category, created_at) + VALUES (uuid(), ?, ?, ?, ?) + """) + + for data in test_data: + session.execute(insert_stmt, (data["name"], data["value"], data["category"], data["created_at"])) + print("Inserted test data") + + # Query data + print("\nQuery results:") + rows = session.execute("SELECT * FROM test_table WHERE category = 'A' ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create materialized view + session.execute(""" + CREATE MATERIALIZED VIEW IF NOT EXISTS test_view AS + SELECT category, name, value, created_at + FROM test_table + WHERE category IS NOT NULL AND name IS NOT NULL + PRIMARY KEY (category, name) + """) + print("\nCreated materialized view") + + # Query materialized view + print("\nMaterialized view results:") + rows = session.execute("SELECT * FROM test_view WHERE category = 'A'") + for row in rows: + print( + json.dumps( + { + "category": row.category, + "name": row.name, + "value": row.value, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Create secondary index + session.execute("CREATE INDEX IF NOT EXISTS ON test_table (value)") + print("\nCreated secondary index") + + # Query using secondary index + print("\nQuery using secondary index:") + rows = session.execute("SELECT * FROM test_table WHERE value > 150 ALLOW FILTERING") + for row in rows: + print( + json.dumps( + { + "id": str(row.id), + "name": row.name, + "value": row.value, + "category": row.category, + "created_at": row.created_at.isoformat(), + }, + indent=2, + ) + ) + + # Get table metadata + table_meta = session.cluster.metadata.keyspaces["test_keyspace"].tables["test_table"] + print("\nTable metadata:") + print(f"Columns: {[col.name for col in table_meta.columns.values()]}") + print(f"Partition key: {[col.name for col in table_meta.partition_key]}") + print(f"Clustering key: {[col.name for col in table_meta.clustering_key]}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/selenium/example_basic.py b/modules/selenium/example_basic.py new file mode 100644 index 000000000..f136126fb --- /dev/null +++ b/modules/selenium/example_basic.py @@ -0,0 +1,49 @@ +from selenium.webdriver.common.by import By +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +from testcontainers.selenium import SeleniumContainer + + +def basic_example(): + with SeleniumContainer() as selenium: + # Get the Selenium WebDriver + driver = selenium.get_driver() + + try: + # Navigate to a test page + driver.get("https://www.python.org") + print("Navigated to python.org") + + # Wait for the search box to be present + search_box = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "id-search-field"))) + + # Type in the search box + search_box.send_keys("selenium") + print("Entered search term") + + # Click the search button + search_button = driver.find_element(By.ID, "submit") + search_button.click() + print("Clicked search button") + + # Wait for search results + WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "list-recent-events"))) + + # Get search results + results = driver.find_elements(By.CSS_SELECTOR, ".list-recent-events li") + print("\nSearch results:") + for result in results[:3]: # Print first 3 results + print(result.text) + + # Take a screenshot + driver.save_screenshot("python_search_results.png") + print("\nScreenshot saved as 'python_search_results.png'") + + finally: + # Clean up + driver.quit() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/sftp/example_basic.py b/modules/sftp/example_basic.py new file mode 100644 index 000000000..f5d2058eb --- /dev/null +++ b/modules/sftp/example_basic.py @@ -0,0 +1,137 @@ +import json +import os +from datetime import datetime + +import paramiko + +from testcontainers.sftp import SftpContainer + + +def basic_example(): + with SftpContainer() as sftp: + # Get connection parameters + host = sftp.get_container_host_ip() + port = sftp.get_exposed_port(sftp.port) + username = sftp.username + password = sftp.password + + # Create SSH client + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(host, port, username, password) + print("Connected to SFTP server") + + # Create SFTP client + sftp_client = ssh.open_sftp() + + # Create test directory + test_dir = "/home/testuser/test_dir" + sftp_client.mkdir(test_dir) + print(f"Created directory: {test_dir}") + + # Create and upload test files + test_files = [ + {"name": "test1.txt", "content": "This is test file 1"}, + {"name": "test2.txt", "content": "This is test file 2"}, + {"name": "test3.txt", "content": "This is test file 3"}, + ] + + for file_info in test_files: + local_path = f"/tmp/{file_info['name']}" + remote_path = f"{test_dir}/{file_info['name']}" + + # Create local file + with open(local_path, "w") as f: + f.write(file_info["content"]) + + # Upload file + sftp_client.put(local_path, remote_path) + print(f"Uploaded file: {file_info['name']}") + + # Remove local file + os.remove(local_path) + + # List directory contents + print("\nDirectory contents:") + for entry in sftp_client.listdir_attr(test_dir): + print( + json.dumps( + { + "filename": entry.filename, + "size": entry.st_size, + "modified": datetime.fromtimestamp(entry.st_mtime).isoformat(), + }, + indent=2, + ) + ) + + # Download and read file + print("\nReading file contents:") + for file_info in test_files: + remote_path = f"{test_dir}/{file_info['name']}" + local_path = f"/tmp/{file_info['name']}" + + # Download file + sftp_client.get(remote_path, local_path) + + # Read and print contents + with open(local_path) as f: + content = f.read() + print(f"\n{file_info['name']}:") + print(content) + + # Remove local file + os.remove(local_path) + + # Create nested directory + nested_dir = f"{test_dir}/nested" + sftp_client.mkdir(nested_dir) + print(f"\nCreated nested directory: {nested_dir}") + + # Move file to nested directory + old_path = f"{test_dir}/test1.txt" + new_path = f"{nested_dir}/test1.txt" + sftp_client.rename(old_path, new_path) + print("Moved file to nested directory") + + # List nested directory + print("\nNested directory contents:") + for entry in sftp_client.listdir_attr(nested_dir): + print( + json.dumps( + { + "filename": entry.filename, + "size": entry.st_size, + "modified": datetime.fromtimestamp(entry.st_mtime).isoformat(), + }, + indent=2, + ) + ) + + # Get file attributes + print("\nFile attributes:") + for file_info in test_files: + remote_path = f"{test_dir}/{file_info['name']}" + try: + attrs = sftp_client.stat(remote_path) + print(f"\n{file_info['name']}:") + print( + json.dumps( + { + "size": attrs.st_size, + "permissions": oct(attrs.st_mode)[-3:], + "modified": datetime.fromtimestamp(attrs.st_mtime).isoformat(), + }, + indent=2, + ) + ) + except FileNotFoundError: + print(f"File not found: {file_info['name']}") + + # Clean up + sftp_client.close() + ssh.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/test_module_import/examples/01_basic_import.py b/modules/test_module_import/examples/01_basic_import.py new file mode 100644 index 000000000..9068c9944 --- /dev/null +++ b/modules/test_module_import/examples/01_basic_import.py @@ -0,0 +1,58 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_module_import(): + try: + import test_module + + print("\nSuccessfully imported test_module") + print(f"Module version: {test_module.__version__}") + print(f"Module description: {test_module.__description__}") + except ImportError as e: + print(f"\nFailed to import test_module: {e}") + + +def test_submodule_import(): + try: + from test_module import submodule + + print("\nSuccessfully imported test_module.submodule") + print(f"Submodule function result: {submodule.test_function()}") + except ImportError as e: + print(f"\nFailed to import test_module.submodule: {e}") + + +def test_package_import(): + try: + import test_package + + print("\nSuccessfully imported test_package") + print(f"Package version: {test_package.__version__}") + except ImportError as e: + print(f"\nFailed to import test_package: {e}") + + +def basic_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test various imports + test_module_import() + test_submodule_import() + test_package_import() + + # Clean up + if "test_module" in sys.modules: + del sys.modules["test_module"] + if "test_package" in sys.modules: + del sys.modules["test_package"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/test_module_import/examples/02_module_reloading.py b/modules/test_module_import/examples/02_module_reloading.py new file mode 100644 index 000000000..4e05ff8bd --- /dev/null +++ b/modules/test_module_import/examples/02_module_reloading.py @@ -0,0 +1,41 @@ +import importlib +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_module_reloading(): + try: + import test_module + + print("\nSuccessfully imported test_module") + print(f"Initial version: {test_module.__version__}") + + # Simulate module changes by reloading + importlib.reload(test_module) + print("\nSuccessfully reloaded test_module") + print(f"Updated version: {test_module.__version__}") + except ImportError as e: + print(f"\nFailed to import test_module: {e}") + except NameError: + print("\nCould not reload test_module (not imported)") + + +def reloading_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test module reloading + test_module_reloading() + + # Clean up + if "test_module" in sys.modules: + del sys.modules["test_module"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + reloading_example() diff --git a/modules/test_module_import/examples/03_version_specific.py b/modules/test_module_import/examples/03_version_specific.py new file mode 100644 index 000000000..b24a6b47e --- /dev/null +++ b/modules/test_module_import/examples/03_version_specific.py @@ -0,0 +1,34 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_version_import(): + try: + import test_module_v2 + + print("\nSuccessfully imported test_module_v2") + print(f"Module version: {test_module_v2.__version__}") + print(f"Module features: {test_module_v2.FEATURES}") + except ImportError as e: + print(f"\nFailed to import test_module_v2: {e}") + + +def version_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test version-specific imports + test_version_import() + + # Clean up + if "test_module_v2" in sys.modules: + del sys.modules["test_module_v2"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + version_example() diff --git a/modules/test_module_import/examples/04_dependencies_and_env.py b/modules/test_module_import/examples/04_dependencies_and_env.py new file mode 100644 index 000000000..de49fc55b --- /dev/null +++ b/modules/test_module_import/examples/04_dependencies_and_env.py @@ -0,0 +1,48 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_deps_import(): + try: + import test_module_with_deps + + print("\nSuccessfully imported test_module_with_deps") + print(f"Dependencies: {test_module_with_deps.DEPENDENCIES}") + print(f"Required versions: {test_module_with_deps.REQUIRED_VERSIONS}") + except ImportError as e: + print(f"\nFailed to import test_module_with_deps: {e}") + + +def test_env_import(): + try: + import test_module_with_env + + print("\nSuccessfully imported test_module_with_env") + print(f"Environment variables: {test_module_with_env.ENV_VARS}") + print(f"Environment values: {test_module_with_env.ENV_VALUES}") + except ImportError as e: + print(f"\nFailed to import test_module_with_env: {e}") + + +def deps_and_env_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test dependencies and environment imports + test_deps_import() + test_env_import() + + # Clean up + if "test_module_with_deps" in sys.modules: + del sys.modules["test_module_with_deps"] + if "test_module_with_env" in sys.modules: + del sys.modules["test_module_with_env"] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + deps_and_env_example() diff --git a/modules/test_module_import/examples/05_advanced_features.py b/modules/test_module_import/examples/05_advanced_features.py new file mode 100644 index 000000000..45c24faa8 --- /dev/null +++ b/modules/test_module_import/examples/05_advanced_features.py @@ -0,0 +1,59 @@ +import sys +from pathlib import Path + +from testcontainers.test_module_import import TestModuleImportContainer + + +def test_custom_loader_import(): + try: + import test_module_custom_loader + + print("\nSuccessfully imported test_module_custom_loader") + print(f"Loader type: {test_module_custom_loader.LOADER_TYPE}") + print(f"Loader configuration: {test_module_custom_loader.LOADER_CONFIG}") + except ImportError as e: + print(f"\nFailed to import test_module_custom_loader: {e}") + + +def test_namespace_import(): + try: + import test_namespace_package + + print("\nSuccessfully imported test_namespace_package") + print(f"Namespace: {test_namespace_package.__namespace__}") + print(f"Available subpackages: {test_namespace_package.SUBPACKAGES}") + except ImportError as e: + print(f"\nFailed to import test_namespace_package: {e}") + + +def test_entry_points_import(): + try: + import test_module_with_entry_points + + print("\nSuccessfully imported test_module_with_entry_points") + print(f"Entry points: {test_module_with_entry_points.ENTRY_POINTS}") + print(f"Entry point groups: {test_module_with_entry_points.ENTRY_POINT_GROUPS}") + except ImportError as e: + print(f"\nFailed to import test_module_with_entry_points: {e}") + + +def advanced_features_example(): + with TestModuleImportContainer(): + # Add test module to Python path + sys.path.append(str(Path(__file__).parent)) + print("Added test module to Python path") + + # Test advanced features + test_custom_loader_import() + test_namespace_import() + test_entry_points_import() + + # Clean up + for module in ["test_module_custom_loader", "test_namespace_package", "test_module_with_entry_points"]: + if module in sys.modules: + del sys.modules[module] + print("\nCleaned up imported modules") + + +if __name__ == "__main__": + advanced_features_example() diff --git a/modules/trino/example_basic.py b/modules/trino/example_basic.py new file mode 100644 index 000000000..f2b351243 --- /dev/null +++ b/modules/trino/example_basic.py @@ -0,0 +1,66 @@ +import trino +from trino.exceptions import TrinoQueryError + +from testcontainers.trino import TrinoContainer + + +def basic_example(): + with TrinoContainer() as trino_container: + # Get connection parameters + host = trino_container.get_container_host_ip() + port = trino_container.get_exposed_port(trino_container.port) + + # Create Trino client + conn = trino.dbapi.connect(host=host, port=port, user="test", catalog="memory", schema="default") + cur = conn.cursor() + + # Create a test table + try: + cur.execute(""" + CREATE TABLE memory.default.test_table ( + id BIGINT, + name VARCHAR, + value DOUBLE + ) + """) + print("Created test table") + except TrinoQueryError as e: + print(f"Table might already exist: {e}") + + # Insert test data + test_data = [(1, "test1", 100.0), (2, "test2", 200.0), (3, "test3", 300.0)] + + for row in test_data: + cur.execute("INSERT INTO memory.default.test_table VALUES (%s, %s, %s)", row) + print("Inserted test data") + + # Query data + cur.execute("SELECT * FROM memory.default.test_table ORDER BY id") + rows = cur.fetchall() + + print("\nQuery results:") + for row in rows: + print(f"ID: {row[0]}, Name: {row[1]}, Value: {row[2]}") + + # Execute a more complex query + cur.execute(""" + SELECT + name, + AVG(value) as avg_value, + COUNT(*) as count + FROM memory.default.test_table + GROUP BY name + ORDER BY avg_value DESC + """) + + print("\nAggregation results:") + for row in cur.fetchall(): + print(f"Name: {row[0]}, Average Value: {row[1]}, Count: {row[2]}") + + # Clean up + cur.close() + conn.close() + + +if __name__ == "__main__": + basic_example() diff --git a/modules/vault/example_basic.py b/modules/vault/example_basic.py new file mode 100644 index 000000000..2dd873f7a --- /dev/null +++ b/modules/vault/example_basic.py @@ -0,0 +1,75 @@ +import json + +import hvac + +from testcontainers.vault import VaultContainer + + +def basic_example(): + with VaultContainer() as vault: + # Get connection parameters + host = vault.get_container_host_ip() + port = vault.get_exposed_port(vault.port) + token = vault.token + + # Create Vault client + client = hvac.Client(url=f"http://{host}:{port}", token=token) + print("Connected to Vault") + + # Enable KV secrets engine + client.sys.enable_secrets_engine(backend_type="kv", path="secret", options={"version": "2"}) + print("Enabled KV secrets engine") + + # Write secrets + test_secrets = { + "database": {"username": "admin", "password": "secret123", "host": "localhost"}, + "api": {"key": "api-key-123", "endpoint": "https://api.example.com"}, + } + + for path, secret in test_secrets.items(): + client.secrets.kv.v2.create_or_update_secret(path=path, secret=secret) + print(f"Created secret at: {path}") + + # Read secrets + print("\nReading secrets:") + for path in test_secrets: + secret = client.secrets.kv.v2.read_secret_version(path=path) + print(f"\nSecret at {path}:") + print(json.dumps(secret["data"]["data"], indent=2)) + + # Enable and configure AWS secrets engine + client.sys.enable_secrets_engine(backend_type="aws", path="aws") + print("\nEnabled AWS secrets engine") + + # Configure AWS credentials + client.secrets.aws.configure_root( + access_key="test-access-key", secret_key="test-secret-key", region="us-east-1" + ) + print("Configured AWS credentials") + + # Create a role + client.secrets.aws.create_role( + name="test-role", + credential_type="iam_user", + policy_document=json.dumps( + { + "Version": "2012-10-17", + "Statement": [{"Effect": "Allow", "Action": "s3:ListAllMyBuckets", "Resource": "*"}], + } + ), + ) + print("Created AWS role") + + # Generate AWS credentials + aws_creds = client.secrets.aws.generate_credentials(name="test-role") + print("\nGenerated AWS credentials:") + print(json.dumps(aws_creds["data"], indent=2)) + + # List enabled secrets engines + print("\nEnabled secrets engines:") + for path, engine in client.sys.list_mounted_secrets_engines()["data"].items(): + print(f"Path: {path}, Type: {engine['type']}") + + +if __name__ == "__main__": + basic_example() diff --git a/modules/weaviate/example_basic.py b/modules/weaviate/example_basic.py new file mode 100644 index 000000000..0c7097723 --- /dev/null +++ b/modules/weaviate/example_basic.py @@ -0,0 +1,143 @@ +import json +from datetime import datetime + +import weaviate + +from testcontainers.weaviate import WeaviateContainer + + +def basic_example(): + with WeaviateContainer() as weaviate_container: + # Get connection parameters + host = weaviate_container.get_container_host_ip() + port = weaviate_container.get_exposed_port(weaviate_container.port) + + # Create Weaviate client + client = weaviate.Client( + url=f"http://{host}:{port}", auth_client_secret=weaviate.AuthApiKey(api_key=weaviate_container.api_key) + ) + print("Connected to Weaviate") + + # Create schema + schema = { + "classes": [ + { + "class": "Article", + "description": "A class for news articles", + "vectorizer": "text2vec-transformers", + "properties": [ + {"name": "title", "dataType": ["text"], "description": "The title of the article"}, + {"name": "content", "dataType": ["text"], "description": "The content of the article"}, + {"name": "category", "dataType": ["text"], "description": "The category of the article"}, + {"name": "publishedAt", "dataType": ["date"], "description": "When the article was published"}, + ], + } + ] + } + + client.schema.create(schema) + print("Created schema") + + # Add objects + articles = [ + { + "title": "AI Breakthrough in Natural Language Processing", + "content": "Researchers have made significant progress in understanding and generating human language.", + "category": "Technology", + "publishedAt": datetime.utcnow().isoformat(), + }, + { + "title": "New Study Shows Benefits of Exercise", + "content": "Regular physical activity has been linked to improved mental health and longevity.", + "category": "Health", + "publishedAt": datetime.utcnow().isoformat(), + }, + { + "title": "Global Climate Summit Reaches Agreement", + "content": "World leaders have agreed on new measures to combat climate change.", + "category": "Environment", + "publishedAt": datetime.utcnow().isoformat(), + }, + ] + + for article in articles: + client.data_object.create(data_object=article, class_name="Article") + print("Added test articles") + + # Query objects + result = client.query.get("Article", ["title", "category", "publishedAt"]).do() + print("\nAll articles:") + print(json.dumps(result, indent=2)) + + # Semantic search + semantic_result = ( + client.query.get("Article", ["title", "content", "category"]) + .with_near_text({"concepts": ["artificial intelligence"]}) + .with_limit(2) + .do() + ) + print("\nSemantic search results:") + print(json.dumps(semantic_result, indent=2)) + + # Filtered search + filtered_result = ( + client.query.get("Article", ["title", "category"]) + .with_where({"path": ["category"], "operator": "Equal", "valueText": "Technology"}) + .do() + ) + print("\nFiltered search results:") + print(json.dumps(filtered_result, indent=2)) + + # Create cross-reference + cross_ref_schema = { + "classes": [ + { + "class": "Author", + "description": "A class for article authors", + "vectorizer": "text2vec-transformers", + "properties": [ + {"name": "name", "dataType": ["text"], "description": "The name of the author"}, + {"name": "writes", "dataType": ["Article"], "description": "Articles written by the author"}, + ], + } + ] + } + + client.schema.create(cross_ref_schema) + print("\nCreated cross-reference schema") + + # Add author with cross-reference + author_uuid = client.data_object.create(data_object={"name": "John Doe"}, class_name="Author") + + article_uuid = result["data"]["Get"]["Article"][0]["_additional"]["id"] + client.data_object.reference.add( + from_uuid=author_uuid, + from_property_name="writes", + to_uuid=article_uuid, + from_class_name="Author", + to_class_name="Article", + ) + print("Added author with cross-reference") + + # Query with cross-reference + cross_ref_result = ( + client.query.get("Author", ["name"]) + .with_additional(["id"]) + .with_references({"writes": {"properties": ["title", "category"]}}) + .do() + ) + print("\nCross-reference query results:") + print(json.dumps(cross_ref_result, indent=2)) + + # Create aggregation + agg_result = client.query.aggregate("Article").with_fields("category").with_meta_count().do() + print("\nAggregation results:") + print(json.dumps(agg_result, indent=2)) + + # Clean up + client.schema.delete_all() + print("\nCleaned up schema") + + +if __name__ == "__main__": + basic_example() diff --git a/poetry.lock b/poetry.lock index d80fa130a..5a892b169 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. [[package]] name = "alabaster" @@ -1907,7 +1907,7 @@ description = "Python DBI driver for DB2 (LUW, zOS, i5) and IDS" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"db2\"" +markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and extra == \"db2\"" files = [ {file = "ibm_db-3.2.3-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:3399466141c29704f4e8ba709a67ba27ab413239c0244c3c4510126e946ff603"}, {file = "ibm_db-3.2.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e12ff6426d4f718e1ff6615e64a2880bd570826f19a031c82dbf296714cafd7d"}, @@ -1954,7 +1954,7 @@ description = "SQLAlchemy support for IBM Data Servers" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"db2\"" +markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and extra == \"db2\"" files = [ {file = "ibm_db_sa-0.4.1-py3-none-any.whl", hash = "sha256:49926ba9799e6ebd9ddd847141537c83d179ecf32fe24b7e997ac4614d3f616a"}, {file = "ibm_db_sa-0.4.1.tar.gz", hash = "sha256:a46df130a3681646490925cf4e1bca12b46283f71eea39b70b4f9a56e95341ac"}, @@ -5951,4 +5951,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.1" python-versions = ">=3.9,<4.0" -content-hash = "bacae2cc8c7947dae5d1f6f05bc1a98d488470a5947f95479edabe75cf036f41" +content-hash = "24c7e4335574e67bef9fac3bbb2b17a3199449734010d7931a60efa679529235" diff --git a/pyproject.toml b/pyproject.toml index 1ec495d02..e5cec61af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,12 @@ [tool.poetry] name = "testcontainers" -version = "4.10.0" # auto-incremented by release-please +version = "4.10.0" # auto-incremented by release-please description = "Python library for throwaway instances of anything that can run in a Docker container" authors = ["Sergey Pirogov "] maintainers = [ "Balint Bartha ", "David Ankin ", - "Vemund Santi " + "Vemund Santi ", ] readme = "README.md" keywords = ["testing", "logging", "docker", "test automation"] @@ -29,7 +29,7 @@ classifiers = [ packages = [ { include = "testcontainers", from = "core" }, { include = "testcontainers", from = "modules/arangodb" }, - { include = "testcontainers", from = "modules/aws"}, + { include = "testcontainers", from = "modules/aws" }, { include = "testcontainers", from = "modules/azurite" }, { include = "testcontainers", from = "modules/cassandra" }, { include = "testcontainers", from = "modules/chroma" }, @@ -39,7 +39,7 @@ packages = [ { include = "testcontainers", from = "modules/db2" }, { include = "testcontainers", from = "modules/elasticsearch" }, { include = "testcontainers", from = "modules/generic" }, - { include = "testcontainers", from = "modules/test_module_import"}, + { include = "testcontainers", from = "modules/test_module_import" }, { include = "testcontainers", from = "modules/google" }, { include = "testcontainers", from = "modules/influxdb" }, { include = "testcontainers", from = "modules/k3s" }, @@ -79,9 +79,9 @@ packages = [ [tool.poetry.dependencies] python = ">=3.9,<4.0" -docker = "*" # ">=4.0" -urllib3 = "*" # "<2.0" -wrapt = "*" # "^1.16.0" +docker = "*" # ">=4.0" +urllib3 = "*" # "<2.0" +wrapt = "*" # "^1.16.0" typing-extensions = "*" python-dotenv = "*" @@ -118,7 +118,7 @@ httpx = { version = "*", optional = true } azure-cosmos = { version = "*", optional = true } cryptography = { version = "*", optional = true } trino = { version = "*", optional = true } -ibm_db_sa = { version = "*", optional = true } +ibm_db_sa = { version = "*", optional = true, markers = "platform_machine != 'aarch64' and platform_machine != 'arm64'" } [tool.poetry.extras] arangodb = ["python-arango"] @@ -130,7 +130,10 @@ cosmosdb = ["azure-cosmos"] cockroachdb = [] db2 = ["sqlalchemy", "ibm_db_sa"] elasticsearch = [] -generic = ["httpx", "redis"] # The advance doctests for ServerContainer require redis +generic = [ + "httpx", + "redis", +] # The advance doctests for ServerContainer require redis test_module_import = ["httpx"] google = ["google-cloud-pubsub", "google-cloud-datastore"] influxdb = ["influxdb", "influxdb-client"] @@ -204,19 +207,17 @@ addopts = "--tb=short --strict-markers" log_cli = true log_cli_level = "INFO" markers = [ - "inside_docker_check: mark test to be used to validate DinD/DooD is working as expected" + "inside_docker_check: mark test to be used to validate DinD/DooD is working as expected", ] [tool.coverage.run] branch = true -omit = [ - "oracle.py" -] +omit = ["oracle.py"] [tool.coverage.report] exclude_lines = [ "pass", - "raise NotImplementedError" # TODO: used in core/generic.py, not sure we need DbContainer + "raise NotImplementedError", # TODO: used in core/generic.py, not sure we need DbContainer ] [tool.ruff] @@ -254,7 +255,8 @@ select = [ # mccabe "C90", # pycodestyle - "E", "W", + "E", + "W", # pyflakes "F", # pygrep-hooks @@ -272,9 +274,13 @@ ignore = [ # the must-have __init__.py (we are using package namespaces) "INP001", # we do have some imports shadowing builtins - "A004" + "A004", ] +[tool.ruff.lint.per-file-ignores] +"**/example_*.py" = ["T201"] +"**/examples/*.py" = ["T201"] + [tool.ruff.lint.pyupgrade] keep-runtime-typing = true @@ -293,47 +299,41 @@ strict = true modules = ["testcontainers.core"] mypy_path = [ "core", -# "modules/arangodb", -# "modules/azurite", -# "modules/cassandra", -# "modules/clickhouse", -# "modules/elasticsearch", -# "modules/google", -# "modules/k3s", -# "modules/kafka", -# "modules/keycloak", -# "modules/localstack", + # "modules/arangodb", + # "modules/azurite", + # "modules/cassandra", + # "modules/clickhouse", + # "modules/elasticsearch", + # "modules/google", + # "modules/k3s", + # "modules/kafka", + # "modules/keycloak", + # "modules/localstack", "modules/mailpit", -# "modules/minio", -# "modules/mongodb", -# "modules/mssql", -# "modules/mysql", -# "modules/neo4j", -# "modules/nginx", -# "modules/ollama", -# "modules/opensearch", -# "modules/oracle", -# "modules/postgres", -# "modules/rabbitmq", -# "modules/redis", -# "modules/selenium" + # "modules/minio", + # "modules/mongodb", + # "modules/mssql", + # "modules/mysql", + # "modules/neo4j", + # "modules/nginx", + # "modules/ollama", + # "modules/opensearch", + # "modules/oracle", + # "modules/postgres", + # "modules/rabbitmq", + # "modules/redis", + # "modules/selenium" "modules/sftp", -# "modules/vault" -# "modules/weaviate" -] -enable_error_code = [ - "ignore-without-code", - "redundant-expr", - "truthy-bool", + # "modules/vault" + # "modules/weaviate" ] +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] [[tool.mypy.overrides]] module = ['tests.*'] # in pytest we allow fixtures to be more relaxed, though we check the untyped functions check_untyped_defs = true -disable_error_code = [ - 'no-untyped-def' -] +disable_error_code = ['no-untyped-def'] [[tool.mypy.overrides]] module = ['docker.*']