diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9096a62c9ef..1656b659ab1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -150,6 +150,8 @@ ddtrace/contrib/internal/anthropic @DataDog/ml-observ ddtrace/contrib/_anthropic.py @DataDog/ml-observability ddtrace/contrib/internal/google_generativeai @DataDog/ml-observability ddtrace/contrib/_google_generativeai.py @DataDog/ml-observability +ddtrace/contrib/internal/google_genai @DataDog/ml-observability +ddtrace/contrib/_google_genai.py @DataDog/ml-observability ddtrace/contrib/internal/vertexai @DataDog/ml-observability ddtrace/contrib/_vertexai.py @DataDog/ml-observability ddtrace/contrib/internal/langgraph @DataDog/ml-observability diff --git a/.riot/requirements/1de4a65.txt b/.riot/requirements/1de4a65.txt new file mode 100644 index 00000000000..96c05a60b10 --- /dev/null +++ b/.riot/requirements/1de4a65.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/1de4a65.in +# +annotated-types==0.7.0 +anyio==4.9.0 +attrs==25.3.0 +cachetools==5.5.2 +certifi==2025.4.26 +charset-normalizer==3.4.2 +coverage[toml]==7.9.0 +google-auth==2.40.3 +google-genai==1.20.0 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +multidict==6.4.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pydantic==2.11.5 +pydantic-core==2.33.2 +pygments==2.19.1 +pytest==8.4.0 +pytest-asyncio==1.0.0 +pytest-cov==6.2.1 +pytest-mock==3.14.1 +pyyaml==6.0.2 +requests==2.32.4 +rsa==4.9.1 +sniffio==1.3.1 +sortedcontainers==2.4.0 +typing-extensions==4.14.0 +typing-inspection==0.4.1 +urllib3==2.4.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.2 +yarl==1.20.1 diff --git a/.riot/requirements/7d83e7d.txt b/.riot/requirements/7d83e7d.txt new file mode 100644 index 00000000000..641d16ccf74 --- /dev/null +++ b/.riot/requirements/7d83e7d.txt @@ -0,0 +1,50 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/7d83e7d.in +# +annotated-types==0.7.0 +anyio==4.9.0 +attrs==25.3.0 +cachetools==5.5.2 +certifi==2025.4.26 +charset-normalizer==3.4.2 +coverage[toml]==7.9.0 +exceptiongroup==1.3.0 +google-auth==2.40.3 +google-genai==1.20.0 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +multidict==6.4.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pydantic==2.11.5 +pydantic-core==2.33.2 +pygments==2.19.1 +pytest==8.4.0 +pytest-asyncio==1.0.0 +pytest-cov==6.2.1 +pytest-mock==3.14.1 +pyyaml==6.0.2 +requests==2.32.4 +rsa==4.9.1 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.14.0 +typing-inspection==0.4.1 +urllib3==2.4.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.2 +yarl==1.20.1 diff --git a/.riot/requirements/97b1ae2.txt b/.riot/requirements/97b1ae2.txt new file mode 100644 index 00000000000..97aa5d8fe5c --- /dev/null +++ b/.riot/requirements/97b1ae2.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.13 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/97b1ae2.in +# +annotated-types==0.7.0 +anyio==4.9.0 +attrs==25.3.0 +cachetools==5.5.2 +certifi==2025.6.15 +charset-normalizer==3.4.2 +coverage[toml]==7.9.1 +google-auth==2.40.3 +google-genai==1.20.0 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +multidict==6.5.0 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pydantic==2.11.7 +pydantic-core==2.33.2 +pygments==2.19.1 +pytest==8.4.0 +pytest-asyncio==1.0.0 +pytest-cov==6.2.1 +pytest-mock==3.14.1 +pyyaml==6.0.2 +requests==2.32.4 +rsa==4.9.1 +sniffio==1.3.1 +sortedcontainers==2.4.0 +typing-extensions==4.14.0 +typing-inspection==0.4.1 +urllib3==2.4.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.2 +yarl==1.20.1 diff --git a/.riot/requirements/ce785c0.txt b/.riot/requirements/ce785c0.txt new file mode 100644 index 00000000000..e5b6da388ae --- /dev/null +++ b/.riot/requirements/ce785c0.txt @@ -0,0 +1,48 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/ce785c0.in +# +annotated-types==0.7.0 +anyio==4.9.0 +attrs==25.3.0 +cachetools==5.5.2 +certifi==2025.4.26 +charset-normalizer==3.4.2 +coverage[toml]==7.9.0 +google-auth==2.40.3 +google-genai==1.20.0 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +multidict==6.4.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pydantic==2.11.5 +pydantic-core==2.33.2 +pygments==2.19.1 +pytest==8.4.0 +pytest-asyncio==1.0.0 +pytest-cov==6.2.1 +pytest-mock==3.14.1 +pyyaml==6.0.2 +requests==2.32.4 +rsa==4.9.1 +sniffio==1.3.1 +sortedcontainers==2.4.0 +typing-extensions==4.14.0 +typing-inspection==0.4.1 +urllib3==2.4.0 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.2 +yarl==1.20.1 diff --git a/.riot/requirements/f5e518d.txt b/.riot/requirements/f5e518d.txt new file mode 100644 index 00000000000..b5b84a9e0f1 --- /dev/null +++ b/.riot/requirements/f5e518d.txt @@ -0,0 +1,50 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --no-annotate .riot/requirements/f5e518d.in +# +annotated-types==0.7.0 +anyio==4.9.0 +attrs==25.3.0 +cachetools==5.5.2 +certifi==2025.4.26 +charset-normalizer==3.4.2 +coverage[toml]==7.9.0 +exceptiongroup==1.3.0 +google-auth==2.40.3 +google-genai==1.20.0 +h11==0.16.0 +httpcore==1.0.9 +httpx==0.28.1 +hypothesis==6.45.0 +idna==3.10 +iniconfig==2.1.0 +mock==5.2.0 +multidict==6.4.4 +opentracing==2.4.0 +packaging==25.0 +pluggy==1.6.0 +propcache==0.3.2 +pyasn1==0.6.1 +pyasn1-modules==0.4.2 +pydantic==2.11.5 +pydantic-core==2.33.2 +pygments==2.19.1 +pytest==8.4.0 +pytest-asyncio==1.0.0 +pytest-cov==6.2.1 +pytest-mock==3.14.1 +pyyaml==6.0.2 +requests==2.32.4 +rsa==4.9.1 +sniffio==1.3.1 +sortedcontainers==2.4.0 +tomli==2.2.1 +typing-extensions==4.14.0 +typing-inspection==0.4.1 +urllib3==1.26.20 +vcrpy==7.0.0 +websockets==15.0.1 +wrapt==1.17.2 +yarl==1.20.1 diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index cd212f00905..c1125ea9467 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -50,6 +50,7 @@ "futures": True, "freezegun": True, "google_generativeai": True, + "google_genai": True, "gevent": True, "graphql": True, "grpc": True, @@ -160,6 +161,7 @@ "httplib": ("http.client",), "kafka": ("confluent_kafka",), "google_generativeai": ("google.generativeai",), + "google_genai": ("google.genai",), "langgraph": ( "langgraph", "langgraph.graph", diff --git a/ddtrace/contrib/_google_genai.py b/ddtrace/contrib/_google_genai.py new file mode 100644 index 00000000000..cbafa40e3ae --- /dev/null +++ b/ddtrace/contrib/_google_genai.py @@ -0,0 +1,48 @@ +""" +The Google GenAI integration instruments the Google GenAI Python SDK to trace LLM requests made to +Gemini and VertexAI models. + +All traces submitted from the Google GenAI integration are tagged by: + +- ``service``, ``env``, ``version``: see the `Unified Service Tagging docs `_. +- model used in the request. +- provider used in the request. + + +Enabling +~~~~~~~~ + +The Google GenAI integration is enabled automatically when you use +:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. + +Alternatively, use :func:`patch() ` to manually enable the Google GenAI integration:: + + from ddtrace import config, patch + + patch(google_genai=True) + +Global Configuration +~~~~~~~~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.google_genai["service"] + + The service name reported by default for Google GenAI requests. + + Alternatively, you can set this option with the ``DD_SERVICE`` or ``DD_GOOGLE_GENAI_SERVICE`` environment + variables. + + Default: ``DD_SERVICE`` + + +Instance Configuration +~~~~~~~~~~~~~~~~~~~~~~ + +To configure the Google GenAI integration on a per-instance basis use the +``Pin`` API:: + + from google import genai + from ddtrace import config + from ddtrace.trace import Pin + + Pin.override(genai, service="my-google-genai-service") +""" diff --git a/ddtrace/contrib/integration_registry/registry.yaml b/ddtrace/contrib/integration_registry/registry.yaml index 6480cbdb706..29f3fa968f3 100644 --- a/ddtrace/contrib/integration_registry/registry.yaml +++ b/ddtrace/contrib/integration_registry/registry.yaml @@ -383,6 +383,16 @@ integrations: min: 20.12.1 max: 24.11.1 +- integration_name: google_genai + is_external_package: true + is_tested: true + dependency_names: + - google-genai + tested_versions_by_dependency: + google-genai: + min: 1.19.0 + max: 1.20.0 + - integration_name: google_generativeai is_external_package: true is_tested: true diff --git a/ddtrace/contrib/internal/google_genai/_utils.py b/ddtrace/contrib/internal/google_genai/_utils.py new file mode 100644 index 00000000000..c53d734f0dc --- /dev/null +++ b/ddtrace/contrib/internal/google_genai/_utils.py @@ -0,0 +1,80 @@ +import sys + +import wrapt + + +# https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-partner-models +# GeminiAPI: only exports google provided models +# VertexAI: can map provided models to provider based on prefix, a best effort mapping +# as huggingface exports hundreds of custom provided models +MODEL_PREFIX_TO_PROVIDER = { + "gemini": "google", + "imagen": "google", + "veo": "google", + "jamba": "ai21labs", + "claude": "anthropic", + "llama": "meta", + "mistral": "mistral", + "codestral": "mistral", + "deepseek": "deepseek", + "olmo": "ai2", + "tulu": "ai2", + "molmo": "ai2", + "specter": "ai2", + "cosmoo": "ai2", + "qodo": "qodo", + "mars": "camb.ai", +} + + +def extract_provider_and_model_name(kwargs): + model_path = kwargs.get("model", "") + model_name = model_path.split("/")[-1] + for prefix in MODEL_PREFIX_TO_PROVIDER.keys(): + if model_name.lower().startswith(prefix): + provider_name = MODEL_PREFIX_TO_PROVIDER[prefix] + return provider_name, model_name + return "custom", model_name if len(model_name) > 0 else "custom" + + +class BaseTracedGoogleGenAIStreamResponse(wrapt.ObjectProxy): + def __init__(self, generation_response, span): + super().__init__(generation_response) + self._self_dd_span = span + self._self_chunks = [] + + +class TracedGoogleGenAIStreamResponse(BaseTracedGoogleGenAIStreamResponse): + def __iter__(self): + return self + + def __next__(self): + try: + chunk = self.__wrapped__.__next__() + self._self_chunks.append(chunk) + return chunk + except StopIteration: + self._self_dd_span.finish() + raise + except Exception: + self._self_dd_span.set_exc_info(*sys.exc_info()) + self._self_dd_span.finish() + raise + + +class TracedAsyncGoogleGenAIStreamResponse(BaseTracedGoogleGenAIStreamResponse): + def __aiter__(self): + return self + + async def __anext__(self): + try: + chunk = await self.__wrapped__.__anext__() + self._self_chunks.append(chunk) + return chunk + except StopAsyncIteration: + self._self_dd_span.finish() + raise + except Exception: + self._self_dd_span.set_exc_info(*sys.exc_info()) + self._self_dd_span.finish() + raise diff --git a/ddtrace/contrib/internal/google_genai/patch.py b/ddtrace/contrib/internal/google_genai/patch.py new file mode 100644 index 00000000000..c91341b9035 --- /dev/null +++ b/ddtrace/contrib/internal/google_genai/patch.py @@ -0,0 +1,127 @@ +import sys + +from google import genai + +from ddtrace import config +from ddtrace.contrib.internal.google_genai._utils import TracedAsyncGoogleGenAIStreamResponse +from ddtrace.contrib.internal.google_genai._utils import TracedGoogleGenAIStreamResponse +from ddtrace.contrib.internal.google_genai._utils import extract_provider_and_model_name +from ddtrace.contrib.internal.trace_utils import unwrap +from ddtrace.contrib.internal.trace_utils import with_traced_module +from ddtrace.contrib.internal.trace_utils import wrap +from ddtrace.llmobs._integrations import GoogleGenAIIntegration +from ddtrace.trace import Pin + + +config._add("google_genai", {}) + + +def _supported_versions(): + return {"google.genai": ">=1.19.0"} + + +def get_version() -> str: + return getattr(genai, "__version__", "") + + +@with_traced_module +def traced_generate(genai, pin, func, instance, args, kwargs): + integration = genai._datadog_integration + provider_name, model_name = extract_provider_and_model_name(kwargs) + with integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, func.__name__), + provider=provider_name, + model=model_name, + submit_to_llmobs=False, + ): + return func(*args, **kwargs) + + +@with_traced_module +async def traced_async_generate(genai, pin, func, instance, args, kwargs): + integration = genai._datadog_integration + provider_name, model_name = extract_provider_and_model_name(kwargs) + with integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, func.__name__), + provider=provider_name, + model=model_name, + submit_to_llmobs=False, + ): + return await func(*args, **kwargs) + + +@with_traced_module +def traced_generate_stream(genai, pin, func, instance, args, kwargs): + integration = genai._datadog_integration + resp = None + provider_name, model_name = extract_provider_and_model_name(kwargs) + span = integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, func.__name__), + provider=provider_name, + model=model_name, + submit_to_llmobs=False, + ) + try: + resp = func(*args, **kwargs) + return TracedGoogleGenAIStreamResponse(resp, span) + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + if span.error: + span.finish() + + +@with_traced_module +async def traced_async_generate_stream(genai, pin, func, instance, args, kwargs): + integration = genai._datadog_integration + resp = None + provider_name, model_name = extract_provider_and_model_name(kwargs) + span = integration.trace( + pin, + "%s.%s" % (instance.__class__.__name__, func.__name__), + provider=provider_name, + model=model_name, + submit_to_llmobs=False, + ) + try: + resp = await func(*args, **kwargs) + return TracedAsyncGoogleGenAIStreamResponse(resp, span) + except Exception: + span.set_exc_info(*sys.exc_info()) + raise + finally: + if span.error: + span.finish() + + +def patch(): + if getattr(genai, "_datadog_patch", False): + return + + genai._datadog_patch = True + Pin().onto(genai) + integration = GoogleGenAIIntegration(integration_config=config.google_genai) + genai._datadog_integration = integration + + wrap("google.genai", "models.Models.generate_content", traced_generate(genai)) + wrap("google.genai", "models.Models.generate_content_stream", traced_generate_stream(genai)) + wrap("google.genai", "models.AsyncModels.generate_content", traced_async_generate(genai)) + wrap("google.genai", "models.AsyncModels.generate_content_stream", traced_async_generate_stream(genai)) + + +def unpatch(): + if not getattr(genai, "_datadog_patch", False): + return + + genai._datadog_patch = False + + unwrap(genai.models.Models, "generate_content") + unwrap(genai.models.Models, "generate_content_stream") + unwrap(genai.models.AsyncModels, "generate_content") + unwrap(genai.models.AsyncModels, "generate_content_stream") + + delattr(genai, "_datadog_integration") diff --git a/ddtrace/llmobs/_integrations/__init__.py b/ddtrace/llmobs/_integrations/__init__.py index f09becfc8ed..af7e3d20746 100644 --- a/ddtrace/llmobs/_integrations/__init__.py +++ b/ddtrace/llmobs/_integrations/__init__.py @@ -3,6 +3,7 @@ from .bedrock import BedrockIntegration from .crewai import CrewAIIntegration from .gemini import GeminiIntegration +from .google_genai import GoogleGenAIIntegration from .langchain import LangChainIntegration from .litellm import LiteLLMIntegration from .openai import OpenAIIntegration @@ -15,6 +16,7 @@ "BedrockIntegration", "CrewAIIntegration", "GeminiIntegration", + "GoogleGenAIIntegration", "LangChainIntegration", "LiteLLMIntegration", "OpenAIIntegration", diff --git a/ddtrace/llmobs/_integrations/google_genai.py b/ddtrace/llmobs/_integrations/google_genai.py new file mode 100644 index 00000000000..27a75604cb3 --- /dev/null +++ b/ddtrace/llmobs/_integrations/google_genai.py @@ -0,0 +1,18 @@ +from typing import Any +from typing import Dict +from typing import Optional + +from ddtrace._trace.span import Span +from ddtrace.llmobs._integrations.base import BaseLLMIntegration + + +class GoogleGenAIIntegration(BaseLLMIntegration): + _integration_name = "google_genai" + + def _set_base_span_tags( + self, span: Span, provider: Optional[str] = None, model: Optional[str] = None, **kwargs: Dict[str, Any] + ) -> None: + if provider is not None: + span.set_tag_str("google_genai.request.provider", provider) + if model is not None: + span.set_tag_str("google_genai.request.model", model) diff --git a/ddtrace/settings/_config.py b/ddtrace/settings/_config.py index 344b4f5d2f2..428d43f8066 100644 --- a/ddtrace/settings/_config.py +++ b/ddtrace/settings/_config.py @@ -101,6 +101,7 @@ "dramatiq", "flask", "google_generativeai", + "google_genai", "urllib3", "subprocess", "kafka", diff --git a/docs/integrations.rst b/docs/integrations.rst index 0d4b054134f..fed2d311af6 100644 --- a/docs/integrations.rst +++ b/docs/integrations.rst @@ -237,6 +237,12 @@ gevent .. automodule:: ddtrace.contrib._gevent +.. _google_genai: + +google-genai +^^^^^^^^^^^^ +.. automodule:: ddtrace.contrib._google_genai + .. _google_generativeai: google-generativeai diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index ee462ba1dc4..aeefda6d874 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -115,6 +115,7 @@ flamegraph fnmatch formatter freezegun +genai generativeai gevent Gitlab diff --git a/releasenotes/notes/google_genai_apm_tracing-a88d4a4dada947d6.yaml b/releasenotes/notes/google_genai_apm_tracing-a88d4a4dada947d6.yaml new file mode 100644 index 00000000000..c252bc2704c --- /dev/null +++ b/releasenotes/notes/google_genai_apm_tracing-a88d4a4dada947d6.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + google_genai: Introduces tracing support for Google's Generative AI SDK for Python's ``generate_content`` and ``generate_content_stream`` methods. + See `the docs `_ + for more information. \ No newline at end of file diff --git a/riotfile.py b/riotfile.py index ecb4c751b97..ba364c27593 100644 --- a/riotfile.py +++ b/riotfile.py @@ -2753,6 +2753,16 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT "google-ai-generativelanguage": [latest], }, ), + Venv( + name="google_genai", + command="pytest {cmdargs} tests/contrib/google_genai", + pys=select_pys(min_version="3.9", max_version="3.13"), + pkgs={ + "pytest-asyncio": latest, + "google-genai": [latest], + "vcrpy": latest, + }, + ), Venv( name="crewai", command="pytest {cmdargs} tests/contrib/crewai", diff --git a/supported_versions_output.json b/supported_versions_output.json index ddfd01e6a36..882e01eb6c5 100644 --- a/supported_versions_output.json +++ b/supported_versions_output.json @@ -308,6 +308,13 @@ "max_tracer_supported": "24.11.1", "auto-instrumented": true }, + { + "dependency": "google-genai", + "integration": "google_genai", + "minimum_tracer_supported": "1.20.0", + "max_tracer_supported": "1.20.0", + "auto-instrumented": true + }, { "dependency": "google-generativeai", "integration": "google_generativeai", diff --git a/supported_versions_table.csv b/supported_versions_table.csv index 651e2dbc0e4..5205edb99c5 100644 --- a/supported_versions_table.csv +++ b/supported_versions_table.csv @@ -42,6 +42,7 @@ flask-cache,flask_cache,0.13.1,0.13.1,False flask-caching,flask_cache,1.10.1,2.3.0,False freezegun,freezegun *,1.3.1,1.5.2,True gevent,gevent,20.12.1,24.11.1,True +google-genai,google_genai,1.20.0,1.20.0,True google-generativeai,google_generativeai,0.7.2,0.8.3,True graphql-core,graphql,3.1.7,3.2.6,True grpcio,grpc,1.34.1,1.68.1,True diff --git a/tests/contrib/google_genai/__init__.py b/tests/contrib/google_genai/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/contrib/google_genai/cassettes/v1/generate_content.yaml b/tests/contrib/google_genai/cassettes/v1/generate_content.yaml new file mode 100644 index 00000000000..93011820c89 --- /dev/null +++ b/tests/contrib/google_genai/cassettes/v1/generate_content.yaml @@ -0,0 +1,66 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "Why is the sky blue? Explain in 2-3 + sentences."}], "role": "user"}], "generationConfig": {"temperature": 0.0, "topP": + 0.95, "topK": 20.0, "candidateCount": 1, "maxOutputTokens": 100, "stopSequences": + ["STOP!"], "presencePenalty": 0.0, "frequencyPenalty": 0.0, "seed": 5}}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '306' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + user-agent: + - google-genai-sdk/1.20.0 gl-python/3.12.11 + x-goog-api-client: + - google-genai-sdk/1.20.0 gl-python/3.12.11 + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:generateContent + response: + body: + string: !!binary | + H4sIAAAAAAAC/61STY+bMBC98ytGPicRocmm6q3qh7TVrrIfUVWp7WECA1gxNmsPaaMo/70DhCxp + r0UyGL/nmTcz7xgBqBRtpjNkCuodfJcTgGP3bjFnmSwLMBzJYY2eX7n9cxzthcL0u72kNiVB2B0A + 65rQB9iahiCTxQ4Q6pKsq2RZSNEYyuAJD4Z0UUJIkZm8tsUM4Ll0Xn7gF+7JkC24DOByMELkiXx2 + 1AeWQmCvnSE5RU9DEIlbOfmlPKeUtcQ4wPYALNpYWxGnvRAMpY2hANp2CHLlggiUe1yiBeNscS1h + IqKbIAqhCW09gc4yOl2QuqrFcu8qkOIg077N7myY/bBq1K7TZf9z8tpkL4LaDlYuIzPQTwNB5drq + UD4RBmdb2vNm/aAuKO6LO1fU3m3bOU3j2TxJlstktYrfvlms4mSVREPmLqdqAhZ0T4ziA7xMW0mE + quaN25H94JrOB/NFn2Vkmyt8mZxxdozmCrq5mfwTNnyUpNqM7TRymhSPRvOhs9Knbxs1ahBfqxo6 + FI0a+bfG/5RsmVwni86D6Wf1lXzQ/VAKEgvoaTKLp7nBUE7jeN5FVZ5CLU6g26zl6cXnR7x7eb/+ + svUv4f5Brzf78HirolP0B0iCYreiAwAA + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Date: + - Mon, 16 Jun 2025 20:41:48 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=650 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/google_genai/cassettes/v1/generate_content_stream.yaml b/tests/contrib/google_genai/cassettes/v1/generate_content_stream.yaml new file mode 100644 index 00000000000..a800c203be9 --- /dev/null +++ b/tests/contrib/google_genai/cassettes/v1/generate_content_stream.yaml @@ -0,0 +1,74 @@ +interactions: +- request: + body: '{"contents": [{"parts": [{"text": "Why is the sky blue? Explain in 2-3 + sentences."}], "role": "user"}]}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '103' + content-type: + - application/json + host: + - generativelanguage.googleapis.com + method: POST + uri: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash-001:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \"The\"}],\"role\": + \"model\"}}],\"usageMetadata\": {\"promptTokenCount\": 15,\"totalTokenCount\": + 15,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 15}]},\"modelVersion\": + \"gemini-2.0-flash-001\",\"responseId\": \"vCdTaL7lLK-y2cAP-6zM0Ac\"}\r\n\r\ndata: + {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" sky appears blue\"}],\"role\": + \"model\"}}],\"usageMetadata\": {\"promptTokenCount\": 15,\"totalTokenCount\": + 15,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 15}]},\"modelVersion\": + \"gemini-2.0-flash-001\",\"responseId\": \"vCdTaL7lLK-y2cAP-6zM0Ac\"}\r\n\r\ndata: + {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" due to a phenomenon + called Rayleigh scattering. Shorter wavelengths of light, like blue\"}],\"role\": + \"model\"}}],\"usageMetadata\": {\"promptTokenCount\": 15,\"totalTokenCount\": + 15,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 15}]},\"modelVersion\": + \"gemini-2.0-flash-001\",\"responseId\": \"vCdTaL7lLK-y2cAP-6zM0Ac\"}\r\n\r\ndata: + {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" and violet, are + scattered more by the tiny molecules in the Earth's atmosphere,\"}],\"role\": + \"model\"}}],\"usageMetadata\": {\"promptTokenCount\": 15,\"totalTokenCount\": + 15,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 15}]},\"modelVersion\": + \"gemini-2.0-flash-001\",\"responseId\": \"vCdTaL7lLK-y2cAP-6zM0Ac\"}\r\n\r\ndata: + {\"candidates\": [{\"content\": {\"parts\": [{\"text\": \" making blue light + more visible to our eyes in all directions.\\n\"}],\"role\": \"model\"},\"finishReason\": + \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 14,\"candidatesTokenCount\": + 51,\"totalTokenCount\": 65,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": + 14}],\"candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": + 51}]},\"modelVersion\": \"gemini-2.0-flash-001\",\"responseId\": \"vCdTaL7lLK-y2cAP-6zM0Ac\"}\r\n\r\n" + headers: + Alt-Svc: + - h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Date: + - Wed, 18 Jun 2025 20:55:24 GMT + Server: + - scaffolding on HTTPServer2 + Server-Timing: + - gfet4t7; dur=380 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/tests/contrib/google_genai/conftest.py b/tests/contrib/google_genai/conftest.py new file mode 100644 index 00000000000..ee53b255d11 --- /dev/null +++ b/tests/contrib/google_genai/conftest.py @@ -0,0 +1,79 @@ +import os +from typing import Any +from typing import Iterator + +import pytest + +from ddtrace.contrib.internal.google_genai.patch import patch +from ddtrace.contrib.internal.google_genai.patch import unpatch +from ddtrace.trace import Pin +from tests.utils import DummyTracer +from tests.utils import DummyWriter + + +@pytest.fixture +def mock_tracer(genai): + try: + pin = Pin.get_from(genai) + mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False)) + pin._override(genai, tracer=mock_tracer) + yield mock_tracer + except Exception: + yield + + +@pytest.fixture +def genai(): + patch() + from google import genai + + # these environment variables are checked by vertexai + # when running locally, these lines ensure that the client is not using the real API key. + os.environ["GOOGLE_CLOUD_LOCATION"] = "" + os.environ["GOOGLE_CLOUD_PROJECT"] = "" + + # When testing locally to generate new cassette files, + # comment the lines below to use the real Google API key + os.environ["GOOGLE_API_KEY"] = "" + + yield genai + unpatch() + + +@pytest.fixture +def mock_vertex_generate_content(monkeypatch): + """ + Vertex enabled genAI clients are difficult to test with VCRpy due to their use of google auth. + Instead we patch the generate_content and generate_content_stream methods (sync and async) to return a mock response + """ + from google import genai + from google.genai import types + + candidate = types.Candidate( + content=types.Content( + role="user", parts=[types.Part.from_text(text="The sky is blue due to rayleigh scattering")] + ) + ) + _response = types.GenerateContentResponse(candidates=[candidate]) + + def _fake_stream(self, *, model: str, contents, config=None) -> Iterator[Any]: + yield _response + + def _fake_generate_content(self, *, model: str, contents, config=None): + return _response + + async def _fake_async_stream(self, *, model: str, contents, config=None): + async def _async_iterator(): + yield _response + + return _async_iterator() + + async def _fake_async_generate_content(self, *, model: str, contents, config=None): + return _response + + monkeypatch.setattr(genai.models.Models, "_generate_content_stream", _fake_stream) + monkeypatch.setattr(genai.models.Models, "_generate_content", _fake_generate_content) + monkeypatch.setattr(genai.models.AsyncModels, "_generate_content_stream", _fake_async_stream) + monkeypatch.setattr(genai.models.AsyncModels, "_generate_content", _fake_async_generate_content) + + yield diff --git a/tests/contrib/google_genai/test_google_genai.py b/tests/contrib/google_genai/test_google_genai.py new file mode 100644 index 00000000000..8e681069189 --- /dev/null +++ b/tests/contrib/google_genai/test_google_genai.py @@ -0,0 +1,341 @@ +import os + +import pytest + +from tests.contrib.google_genai.utils import FULL_GENERATE_CONTENT_CONFIG +from tests.contrib.google_genai.utils import get_google_genai_vcr +from tests.utils import override_global_config + + +@pytest.fixture(scope="session") +def google_genai_vcr(): + yield get_google_genai_vcr(subdirectory_name="v1") + + +def test_global_tags(google_genai_vcr, genai, mock_tracer): + """ + When the global config UST tags are set + The service name should be used for all data + The env should be used for all data + The version should be used for all data + """ + with override_global_config(dict(service="test-svc", env="staging", version="1234")): + with google_genai_vcr.use_cassette("generate_content.yaml"): + client = genai.Client() + client.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + + span = mock_tracer.pop_traces()[0][0] + assert span.resource == "Models.generate_content" + assert span.service == "test-svc" + assert span.get_tag("env") == "staging" + assert span.get_tag("version") == "1234" + assert span.get_tag("google_genai.request.model") == "gemini-2.0-flash-001" + assert span.get_tag("google_genai.request.provider") == "google" + + +@pytest.mark.snapshot(token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content") +def test_google_genai_generate_content(google_genai_vcr, genai): + with google_genai_vcr.use_cassette("generate_content.yaml"): + client = genai.Client() + client.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error", + ignores=["meta.error.stack", "meta.error.message"], +) +def test_google_genai_generate_content_error(genai): + with pytest.raises(TypeError): + client = genai.Client() + client.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream", +) +def test_google_genai_generate_content_stream(google_genai_vcr, genai): + with google_genai_vcr.use_cassette("generate_content_stream.yaml"): + client = genai.Client() + response = client.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error", + ignores=["meta.error.stack", "meta.error.message"], +) +def test_google_genai_generate_content_stream_error(genai): + with pytest.raises(TypeError): + client = genai.Client() + response = client.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content", + ignores=["resource"], +) +async def test_google_genai_generate_content_async(google_genai_vcr, genai): + with google_genai_vcr.use_cassette("generate_content.yaml"): + client = genai.Client() + await client.aio.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error", + ignores=["resource", "meta.error.message", "meta.error.stack"], +) +async def test_google_genai_generate_content_async_error(genai): + with pytest.raises(TypeError): + client = genai.Client() + await client.aio.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream", + ignores=["resource"], +) +async def test_google_genai_generate_content_async_stream(google_genai_vcr, genai): + with google_genai_vcr.use_cassette("generate_content_stream.yaml"): + client = genai.Client() + response = await client.aio.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + async for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error", + ignores=["resource", "meta.error.message", "meta.error.stack"], +) +async def test_google_genai_generate_content_async_stream_error(genai): + with pytest.raises(TypeError): + client = genai.Client() + response = await client.aio.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + async for _ in response: + pass + + +@pytest.mark.snapshot(token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content") +def test_google_genai_generate_content_vertex(mock_vertex_generate_content, genai): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + client.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error", + ignores=["meta.error.stack", "meta.error.message"], +) +def test_google_genai_generate_content_vertex_error(mock_vertex_generate_content, genai): + with pytest.raises(TypeError): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + client.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream", +) +def test_google_genai_generate_content_stream_vertex(mock_vertex_generate_content, genai): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + response = client.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error", + ignores=["meta.error.stack", "meta.error.message"], +) +def test_google_genai_generate_content_stream_vertex_error(mock_vertex_generate_content, genai): + with pytest.raises(TypeError): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + response = client.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content", + ignores=["resource"], +) +async def test_google_genai_generate_content_async_vertex(mock_vertex_generate_content, genai): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + await client.aio.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error", + ignores=["resource", "meta.error.message", "meta.error.stack"], +) +async def test_google_genai_generate_content_async_vertex_error(mock_vertex_generate_content, genai): + with pytest.raises(TypeError): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + await client.aio.models.generate_content( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream", + ignores=["resource"], +) +async def test_google_genai_generate_content_async_stream_vertex(mock_vertex_generate_content, genai): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + response = await client.aio.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + ) + async for _ in response: + pass + + +@pytest.mark.snapshot( + token="tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error", + ignores=["resource", "meta.error.message", "meta.error.stack"], +) +async def test_google_genai_generate_content_async_stream_vertex_error(mock_vertex_generate_content, genai): + with pytest.raises(TypeError): + client = genai.Client( + vertexai=True, + project=os.environ.get("GOOGLE_CLOUD_PROJECT", "dummy-project"), + location=os.environ.get("GOOGLE_CLOUD_LOCATION", "us-central1"), + ) + response = await client.aio.models.generate_content_stream( + model="gemini-2.0-flash-001", + contents="Why is the sky blue? Explain in 2-3 sentences.", + config=FULL_GENERATE_CONTENT_CONFIG, + not_an_argument="why am i here?", + ) + async for _ in response: + pass + + +@pytest.mark.parametrize( + "model_name,expected_provider,expected_model", + [ + ( + "projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash", + "google", + "gemini-2.0-flash", + ), + ("imagen-1.0", "google", "imagen-1.0"), + ("models/veo-1.0", "google", "veo-1.0"), + ("jamba-1.0", "ai21labs", "jamba-1.0"), + ("claude-3-opus", "anthropic", "claude-3-opus"), + ("publishers/meta/models/llama-3.1-405b-instruct-maas", "meta", "llama-3.1-405b-instruct-maas"), + ("mistral-7b", "mistral", "mistral-7b"), + ("codestral-22b", "mistral", "codestral-22b"), + ("deepseek-coder", "deepseek", "deepseek-coder"), + ("olmo-7b", "ai2", "olmo-7b"), + ("qodo-7b", "qodo", "qodo-7b"), + ("mars-7b", "camb.ai", "mars-7b"), + # edge cases + ("weird_directory/unknown-model", "custom", "unknown-model"), + ("", "custom", "custom"), + ("just-a-slash/", "custom", "custom"), + ("multiple/slashes/in/path/model-name", "custom", "model-name"), + ], +) +def test_extract_provider_and_model_name(model_name, expected_provider, expected_model): + from ddtrace.contrib.internal.google_genai._utils import extract_provider_and_model_name + + kwargs = {"model": model_name} + provider, model = extract_provider_and_model_name(kwargs) + + assert provider == expected_provider + assert model == expected_model diff --git a/tests/contrib/google_genai/test_google_genai_patch.py b/tests/contrib/google_genai/test_google_genai_patch.py new file mode 100644 index 00000000000..f645af82141 --- /dev/null +++ b/tests/contrib/google_genai/test_google_genai_patch.py @@ -0,0 +1,30 @@ +from ddtrace.contrib.internal.google_genai.patch import get_version +from ddtrace.contrib.internal.google_genai.patch import patch +from ddtrace.contrib.internal.google_genai.patch import unpatch +from tests.contrib.patch import PatchTestCase + + +class TestGoogleGenAIPatch(PatchTestCase.Base): + __integration_name__ = "google_genai" + __module_name__ = "google.genai" + __patch_func__ = patch + __unpatch_func__ = unpatch + __get_version__ = get_version + + def assert_module_patched(self, google_genai): + self.assert_wrapped(google_genai.models.Models.generate_content) + self.assert_wrapped(google_genai.models.Models.generate_content_stream) + self.assert_wrapped(google_genai.models.AsyncModels.generate_content) + self.assert_wrapped(google_genai.models.AsyncModels.generate_content_stream) + + def assert_not_module_patched(self, google_genai): + self.assert_not_wrapped(google_genai.models.Models.generate_content) + self.assert_not_wrapped(google_genai.models.Models.generate_content_stream) + self.assert_not_wrapped(google_genai.models.AsyncModels.generate_content) + self.assert_not_wrapped(google_genai.models.AsyncModels.generate_content_stream) + + def assert_not_module_double_patched(self, google_genai): + self.assert_not_double_wrapped(google_genai.models.Models.generate_content) + self.assert_not_double_wrapped(google_genai.models.Models.generate_content_stream) + self.assert_not_double_wrapped(google_genai.models.AsyncModels.generate_content) + self.assert_not_double_wrapped(google_genai.models.AsyncModels.generate_content_stream) diff --git a/tests/contrib/google_genai/utils.py b/tests/contrib/google_genai/utils.py new file mode 100644 index 00000000000..c75b7f404ac --- /dev/null +++ b/tests/contrib/google_genai/utils.py @@ -0,0 +1,38 @@ +import os + +from google.genai import types +import vcr + + +# sample config for generate_content +FULL_GENERATE_CONTENT_CONFIG = types.GenerateContentConfig( + temperature=0, + top_p=0.95, + top_k=20, + candidate_count=1, + seed=5, + max_output_tokens=100, + stop_sequences=["STOP!"], + presence_penalty=0.0, + frequency_penalty=0.0, + system_instruction="You are a helpful assistant.", +) + + +# VCR is used to capture and store network requests. +# This is done to avoid making real calls to the API which could introduce +# flakiness and cost. +# To (re)-generate the cassettes: set environment variables for GOOGLE_API_KEY +# and delete the old cassettes, then rerun the tests +# NOTE: be sure to check that the generated cassettes don't contain your +# API key. Keys should be redacted by the filter_headers option below. +def get_google_genai_vcr(subdirectory_name=""): + vcr_instance = vcr.VCR( + cassette_library_dir=os.path.join(os.path.dirname(__file__), "cassettes/%s" % subdirectory_name), + record_mode="once", + match_on=["path"], + filter_headers=["x-goog-api-key", "authorization", "x-goog-api-client", "user-agent"], + # Ignore requests to the agent + ignore_localhost=True, + ) + return vcr_instance diff --git a/tests/llmobs/suitespec.yml b/tests/llmobs/suitespec.yml index 85d05f0bf21..b17558ff9d0 100644 --- a/tests/llmobs/suitespec.yml +++ b/tests/llmobs/suitespec.yml @@ -6,6 +6,9 @@ components: google_generativeai: - ddtrace/contrib/_google_generativeai.py - ddtrace/contrib/internal/google_generativeai/* + google_genai: + - ddtrace/contrib/_google_genai.py + - ddtrace/contrib/internal/google_genai/* vertexai: - ddtrace/contrib/_vertexai.py - ddtrace/contrib/internal/vertexai/* @@ -57,6 +60,19 @@ suites: - tests/snapshots/tests.contrib.google_generativeai.* runner: riot snapshot: true + google_genai: + parallelism: 1 + paths: + - '@bootstrap' + - '@core' + - '@tracing' + - '@contrib' + - '@google_genai' + - '@llmobs' + - tests/contrib/google_genai/* + - tests/snapshots/tests.contrib.google_genai.* + runner: riot + snapshot: true vertexai: parallelism: 2 paths: diff --git a/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content.json b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content.json new file mode 100644 index 00000000000..2a5bf2fdd7a --- /dev/null +++ b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content.json @@ -0,0 +1,28 @@ +[[ + { + "name": "google_genai.request", + "service": "tests.contrib.google_genai", + "resource": "Models.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6850374b00000000", + "google_genai.request.model": "gemini-2.0-flash-001", + "google_genai.request.provider": "google", + "language": "python", + "runtime-id": "21de5d02783a4bda9916cba8736920d0" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23200 + }, + "duration": 1091000, + "start": 1750087499807542000 + }]] diff --git a/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error.json b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error.json new file mode 100644 index 00000000000..0f1552f6f43 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_error.json @@ -0,0 +1,31 @@ +[[ + { + "name": "google_genai.request", + "service": "tests.contrib.google_genai", + "resource": "Models.generate_content", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 1, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6850374b00000000", + "error.message": "Models.generate_content() got an unexpected keyword argument 'not_an_argument'", + "error.stack": "Traceback (most recent call last):\n File \"/Users/max.zhang/dd-trace-py/ddtrace/contrib/internal/google_genai/patch.py\", line 41, in traced_generate\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\nTypeError: Models.generate_content() got an unexpected keyword argument 'not_an_argument'\n", + "error.type": "builtins.TypeError", + "google_genai.request.model": "gemini-2.0-flash-001", + "google_genai.request.provider": "google", + "language": "python", + "runtime-id": "21de5d02783a4bda9916cba8736920d0" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23200 + }, + "duration": 133000, + "start": 1750087499827076000 + }]] diff --git a/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream.json b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream.json new file mode 100644 index 00000000000..24fb9211fa3 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream.json @@ -0,0 +1,28 @@ +[[ + { + "name": "google_genai.request", + "service": "tests.contrib.google_genai", + "resource": "Models.generate_content_stream", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 0, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6850374b00000000", + "google_genai.request.model": "gemini-2.0-flash-001", + "google_genai.request.provider": "google", + "language": "python", + "runtime-id": "21de5d02783a4bda9916cba8736920d0" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23200 + }, + "duration": 20000, + "start": 1750087499840478000 + }]] diff --git a/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error.json b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error.json new file mode 100644 index 00000000000..205e179af04 --- /dev/null +++ b/tests/snapshots/tests.contrib.google_genai.test_google_genai.test_google_genai_generate_content_stream_error.json @@ -0,0 +1,31 @@ +[[ + { + "name": "google_genai.request", + "service": "tests.contrib.google_genai", + "resource": "Models.generate_content_stream", + "trace_id": 0, + "span_id": 1, + "parent_id": 0, + "type": "", + "error": 1, + "meta": { + "_dd.p.dm": "-0", + "_dd.p.tid": "6850378c00000000", + "error.message": "Models.generate_content_stream() got an unexpected keyword argument 'not_an_argument'", + "error.stack": "Traceback (most recent call last):\n File \"/Users/max.zhang/dd-trace-py/ddtrace/contrib/internal/google_genai/patch.py\", line 57, in traced_generate_stream\n generation_response = func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\nTypeError: Models.generate_content_stream() got an unexpected keyword argument 'not_an_argument'\n", + "error.type": "builtins.TypeError", + "google_genai.request.model": "gemini-2.0-flash-001", + "google_genai.request.provider": "google", + "language": "python", + "runtime-id": "ea00956580e94060979ee6b81c8d2926" + }, + "metrics": { + "_dd.measured": 1, + "_dd.top_level": 1, + "_dd.tracer_kr": 1.0, + "_sampling_priority_v1": 1, + "process_id": 23815 + }, + "duration": 129000, + "start": 1750087564075716000 + }]]