From ddefed1bb545588ed5ce77fa53ef26589b1dbc2e Mon Sep 17 00:00:00 2001 From: Brandon Ruffridge Date: Mon, 19 May 2025 16:50:57 -0400 Subject: [PATCH 1/5] Add reasoning_content if available --- .../instrumentation/smolagents/_wrappers.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py b/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py index 72b502fa4..b486bc845 100644 --- a/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py +++ b/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py @@ -198,6 +198,21 @@ def _llm_output_messages(output_message: Any) -> Iterator[Tuple[str, Any]]: f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_CONTENT}", content, ) + + # Add the reasoning_content if available in raw.choices[0].message structure + try: + if hasattr(output_message, "raw") and output_message.raw: + if hasattr(output_message.raw, "choices") and output_message.raw.choices: + if hasattr(output_message.raw.choices[0], "message"): + reasoning = getattr(output_message.raw.choices[0].message, "reasoning_content", None) + if reasoning is not None: + yield ( + f"{LLM_OUTPUT_MESSAGES}.0.message.reasoning_content", + reasoning, + ) + except (AttributeError, IndexError): + pass + if isinstance(tool_calls := getattr(output_message, "tool_calls", None), list): for tool_call_index, tool_call in enumerate(tool_calls): if (tool_call_id := getattr(tool_call, "id", None)) is not None: From 0b3b14a9e70eccf59cafd23d11321de33beb721a Mon Sep 17 00:00:00 2001 From: Brandon Ruffridge Date: Mon, 19 May 2025 17:39:38 -0400 Subject: [PATCH 2/5] Create reasoning_content.py --- .../examples/reasoning_content.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 python/instrumentation/openinference-instrumentation-smolagents/examples/reasoning_content.py diff --git a/python/instrumentation/openinference-instrumentation-smolagents/examples/reasoning_content.py b/python/instrumentation/openinference-instrumentation-smolagents/examples/reasoning_content.py new file mode 100644 index 000000000..32f321ab7 --- /dev/null +++ b/python/instrumentation/openinference-instrumentation-smolagents/examples/reasoning_content.py @@ -0,0 +1,15 @@ +from smolagents import ( + LiteLLMModel +) +from smolagents.agents import CodeAgent + +model_params = {"thinking": { + "type": "enabled", + "budget_tokens": 4000 +}} + +model = LiteLLMModel(model_id="anthropic/claude-3-7-sonnet-20250219", **model_params) + +agent = CodeAgent(model=model, add_base_tools=False) + +print(agent.run("What's the weather like in Paris?")) From 62ad80b185925a427e6452fdb55c4de1a4baeeae Mon Sep 17 00:00:00 2001 From: Brandon Ruffridge Date: Tue, 20 May 2025 09:32:00 -0400 Subject: [PATCH 3/5] Made code syntax match the conventions of the surrounding code. --- .../instrumentation/smolagents/_wrappers.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py b/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py index b486bc845..c1d64f8d6 100644 --- a/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py +++ b/python/instrumentation/openinference-instrumentation-smolagents/src/openinference/instrumentation/smolagents/_wrappers.py @@ -200,18 +200,15 @@ def _llm_output_messages(output_message: Any) -> Iterator[Tuple[str, Any]]: ) # Add the reasoning_content if available in raw.choices[0].message structure - try: - if hasattr(output_message, "raw") and output_message.raw: - if hasattr(output_message.raw, "choices") and output_message.raw.choices: - if hasattr(output_message.raw.choices[0], "message"): - reasoning = getattr(output_message.raw.choices[0].message, "reasoning_content", None) - if reasoning is not None: + if (raw := getattr(output_message, "raw", None)) is not None: + if(choices := getattr(raw, "choices", None)) is not None: + if (type(choices) is list) and len(choices) > 0: + if(message := getattr(choices[0], "message", None)) is not None: + if(reasoning := getattr(message, "reasoning_content", None)) is not None: yield ( f"{LLM_OUTPUT_MESSAGES}.0.message.reasoning_content", reasoning, ) - except (AttributeError, IndexError): - pass if isinstance(tool_calls := getattr(output_message, "tool_calls", None), list): for tool_call_index, tool_call in enumerate(tool_calls): From 1f23d49d5ef1a9159394c446c685cf7f2e9b5295 Mon Sep 17 00:00:00 2001 From: Brandon Ruffridge Date: Tue, 20 May 2025 10:34:23 -0400 Subject: [PATCH 4/5] Update test_instrumentor.py added reasoning_content test. https://docs.litellm.ai/docs/reasoning_content --- .../smolagents/test_instrumentor.py | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/python/instrumentation/openinference-instrumentation-smolagents/tests/openinference/instrumentation/smolagents/test_instrumentor.py b/python/instrumentation/openinference-instrumentation-smolagents/tests/openinference/instrumentation/smolagents/test_instrumentor.py index fc6267371..dcf55350a 100644 --- a/python/instrumentation/openinference-instrumentation-smolagents/tests/openinference/instrumentation/smolagents/test_instrumentor.py +++ b/python/instrumentation/openinference-instrumentation-smolagents/tests/openinference/instrumentation/smolagents/test_instrumentor.py @@ -275,6 +275,70 @@ def forward(self, location: str) -> str: assert json.loads(tool_call_arguments_json) == {"location": "Paris"} assert not attributes + @pytest.mark.vcr( + decode_compressed_response=True, + before_record_request=remove_all_vcr_request_headers, + before_record_response=remove_all_vcr_response_headers, + ) + def test_litellm_reasoning_model_has_expected_attributes( + self, + anthropic_api_key: str, + in_memory_span_exporter: InMemorySpanExporter, + ) -> None: + + model_params = {"thinking": { + "type": "enabled", + "budget_tokens": 4000 + }} + + model = LiteLLMModel( + model_id="anthropic/claude-3-7-sonnet-20250219", + api_key=os.environ["ANTHROPIC_API_KEY"], + api_base="https://api.anthropic.com/v1", + **model_params + ) + + input_message_content = ( + "Who won the World Cup in 2018? Answer in one word with no punctuation." + ) + output_message = model( + messages=[ + { + "role": "user", + "content": [{"type": "text", "text": input_message_content}], + } + ] + ) + output_message_content = output_message.content + spans = in_memory_span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + assert span.name == "LiteLLMModel.__call__" + assert span.status.is_ok + attributes = dict(span.attributes or {}) + assert attributes.pop(OPENINFERENCE_SPAN_KIND) == LLM + assert attributes.pop(INPUT_MIME_TYPE) == JSON + assert isinstance(input_value := attributes.pop(INPUT_VALUE), str) + input_data = json.loads(input_value) + assert "messages" in input_data + assert attributes.pop(OUTPUT_MIME_TYPE) == JSON + assert isinstance(output_value := attributes.pop(OUTPUT_VALUE), str) + assert isinstance(json.loads(output_value), dict) + assert attributes.pop(LLM_MODEL_NAME) == "anthropic/claude-3-7-sonnet-20250219" + assert isinstance(inv_params := attributes.pop(LLM_INVOCATION_PARAMETERS), str) + assert json.loads(inv_params) == model_params + assert attributes.pop(f"{LLM_INPUT_MESSAGES}.0.{MESSAGE_ROLE}") == "user" + assert attributes.pop(f"{LLM_INPUT_MESSAGES}.0.{MESSAGE_CONTENT}") == input_message_content + assert isinstance(attributes.pop(LLM_TOKEN_COUNT_PROMPT), int) + assert isinstance(attributes.pop(LLM_TOKEN_COUNT_COMPLETION), int) + assert isinstance(attributes.pop(LLM_TOKEN_COUNT_TOTAL), int) + assert attributes.pop(f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_ROLE}") == "assistant" + assert ( + attributes.pop(f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_CONTENT}") == output_message_content + ) + assert isinstance(attributes.pop(f"{LLM_OUTPUT_MESSAGES}.0.message.reasoning_content"), str) + assert not attributes + class TestRun: @pytest.mark.xfail From 08a1bcd2031538b30bd3df389a87fab75b312545 Mon Sep 17 00:00:00 2001 From: Brandon Ruffridge Date: Tue, 20 May 2025 10:38:52 -0400 Subject: [PATCH 5/5] Update env.example added anthropic api key which is needed for the reasoning_content example. --- .../examples/env.example | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/instrumentation/openinference-instrumentation-smolagents/examples/env.example b/python/instrumentation/openinference-instrumentation-smolagents/examples/env.example index 360fa9564..bdb94b30e 100644 --- a/python/instrumentation/openinference-instrumentation-smolagents/examples/env.example +++ b/python/instrumentation/openinference-instrumentation-smolagents/examples/env.example @@ -2,6 +2,8 @@ OPENAI_API_KEY=sk-YOUR_API_KEY # API Key from https://e2b.dev/docs/legacy/getting-started/api-key E2B_API_KEY=e2b_YOUR_API_KEY +# API Key from https://console.anthropic.com/ +ANTHROPIC_API_KEY=YOUR_API_KEY # Phoenix listens on the default gRPC port 4317, so you don't need to change # exporter settings. If you prefer to export via HTTP, uncomment this: