Skip to content

Commit 44bbc6c

Browse files
evan-onyxZhipengHe
authored andcommitted
anthropic fix (onyx-dot-app#4733)
* anthropic fix * naming
1 parent 86eab10 commit 44bbc6c

File tree

2 files changed

+103
-1
lines changed

2 files changed

+103
-1
lines changed

backend/onyx/tools/utils.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
import json
22

3+
import litellm
34
from sqlalchemy.orm import Session
45

56
from onyx.configs.app_configs import AZURE_DALLE_API_KEY
67
from onyx.db.connector import check_connectors_exist
78
from onyx.db.document import check_docs_exist
89
from onyx.db.models import LLMProvider
10+
from onyx.llm.llm_provider_options import ANTHROPIC_PROVIDER_NAME
911
from onyx.llm.utils import find_model_obj
1012
from onyx.llm.utils import get_model_map
1113
from onyx.natural_language_processing.utils import BaseTokenizer
@@ -20,7 +22,20 @@ def explicit_tool_calling_supported(model_provider: str, model_name: str) -> boo
2022
model_name=model_name,
2123
)
2224

23-
return model_obj.get("supports_function_calling", False) if model_obj else False
25+
model_supports = (
26+
model_obj.get("supports_function_calling", False) if model_obj else False
27+
)
28+
# Anthropic models support tool calling, but
29+
# a) will raise an error if you provide any tool messages and don't provide a list of tools.
30+
# b) will send text before and after generating tool calls.
31+
# We don't want to provide that list of tools because our UI doesn't support sequential
32+
# tool calling yet for (a) and just looks bad for (b), so for now we just treat anthropic
33+
# models as non-tool-calling.
34+
return (
35+
model_supports
36+
and model_provider != ANTHROPIC_PROVIDER_NAME
37+
and model_name not in litellm.anthropic_models
38+
)
2439

2540

2641
def compute_tool_tokens(tool: Tool, llm_tokenizer: BaseTokenizer) -> int:
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
from unittest.mock import MagicMock
2+
from unittest.mock import patch
3+
4+
import pytest
5+
6+
from onyx.llm.llm_provider_options import ANTHROPIC_PROVIDER_NAME
7+
from onyx.tools.utils import explicit_tool_calling_supported
8+
9+
10+
@pytest.mark.parametrize(
11+
"model_provider, model_name, mock_model_supports_fc, mock_litellm_anthropic_models, expected_result",
12+
[
13+
# === Anthropic Scenarios (expected False due to override) ===
14+
# Provider is Anthropic, base model claims FC support
15+
(ANTHROPIC_PROVIDER_NAME, "claude-3-opus-20240229", True, [], False),
16+
# Model name in litellm.anthropic_models, base model claims FC support
17+
(
18+
"another-provider",
19+
"claude-3-haiku-20240307",
20+
True,
21+
["claude-3-haiku-20240307"],
22+
False,
23+
),
24+
# Both provider is Anthropic AND model name in litellm.anthropic_models, base model claims FC support
25+
(
26+
ANTHROPIC_PROVIDER_NAME,
27+
"claude-3-sonnet-20240229",
28+
True,
29+
["claude-3-sonnet-20240229"],
30+
False,
31+
),
32+
# === Anthropic Scenarios (expected False due to base support being False) ===
33+
# Provider is Anthropic, base model does NOT claim FC support
34+
(ANTHROPIC_PROVIDER_NAME, "claude-2.1", False, [], False),
35+
# === Non-Anthropic Scenarios ===
36+
# Non-Anthropic provider, base model claims FC support -> True
37+
("openai", "gpt-4o", True, [], True),
38+
# Non-Anthropic provider, base model does NOT claim FC support -> False
39+
("openai", "gpt-3.5-turbo-instruct", False, [], False),
40+
# Non-Anthropic provider, model name happens to be in litellm list (should still be True if provider isn't Anthropic)
41+
(
42+
"yet-another-provider",
43+
"model-also-in-anthropic-list",
44+
True,
45+
["model-also-in-anthropic-list"],
46+
False,
47+
),
48+
# Control for the above: Non-Anthropic provider, model NOT in litellm list, supports FC -> True
49+
(
50+
"yet-another-provider",
51+
"some-other-model",
52+
True,
53+
["model-NOT-this-one"],
54+
True,
55+
),
56+
],
57+
)
58+
@patch("onyx.tools.utils.find_model_obj")
59+
@patch("onyx.tools.utils.litellm")
60+
def test_explicit_tool_calling_supported(
61+
mock_litellm: MagicMock,
62+
mock_find_model_obj: MagicMock,
63+
model_provider: str,
64+
model_name: str,
65+
mock_model_supports_fc: bool,
66+
mock_litellm_anthropic_models: list[str],
67+
expected_result: bool,
68+
) -> None:
69+
"""
70+
Anthropic models support tool calling, but
71+
a) will raise an error if you provide any tool messages and don't provide a list of tools.
72+
b) will send text before and after generating tool calls.
73+
We don't want to provide that list of tools because our UI doesn't support sequential
74+
tool calling yet for (a) and just looks bad for (b), so for now we just treat anthropic
75+
models as non-tool-calling.
76+
"""
77+
mock_find_model_obj.return_value = {
78+
"supports_function_calling": mock_model_supports_fc
79+
}
80+
mock_litellm.anthropic_models = mock_litellm_anthropic_models
81+
82+
# get_model_map is called inside explicit_tool_calling_supported before find_model_obj,
83+
# but its return value doesn't affect the mocked find_model_obj.
84+
# So, no need to mock get_model_map separately if find_model_obj is fully mocked.
85+
86+
actual_result = explicit_tool_calling_supported(model_provider, model_name)
87+
assert actual_result == expected_result

0 commit comments

Comments
 (0)