Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion backend/onyx/tools/utils.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import json

import litellm
from sqlalchemy.orm import Session

from onyx.configs.app_configs import AZURE_DALLE_API_KEY
from onyx.db.connector import check_connectors_exist
from onyx.db.document import check_docs_exist
from onyx.db.models import LLMProvider
from onyx.llm.llm_provider_options import ANTHROPIC_PROVIDER_NAME
from onyx.llm.utils import find_model_obj
from onyx.llm.utils import get_model_map
from onyx.natural_language_processing.utils import BaseTokenizer
Expand All @@ -20,7 +22,20 @@ def explicit_tool_calling_supported(model_provider: str, model_name: str) -> boo
model_name=model_name,
)

return model_obj.get("supports_function_calling", False) if model_obj else False
model_supports = (
model_obj.get("supports_function_calling", False) if model_obj else False
)
# Anthropic models support tool calling, but
# a) will raise an error if you provide any tool messages and don't provide a list of tools.
# b) will send text before and after generating tool calls.
# We don't want to provide that list of tools because our UI doesn't support sequential
# tool calling yet for (a) and just looks bad for (b), so for now we just treat anthropic
# models as non-tool-calling.
return (
model_supports
and model_provider != ANTHROPIC_PROVIDER_NAME
and model_name not in litellm.anthropic_models
)


def compute_tool_tokens(tool: Tool, llm_tokenizer: BaseTokenizer) -> int:
Expand Down
87 changes: 87 additions & 0 deletions backend/tests/unit/onyx/tools/test_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
from unittest.mock import MagicMock
from unittest.mock import patch

import pytest

from onyx.llm.llm_provider_options import ANTHROPIC_PROVIDER_NAME
from onyx.tools.utils import explicit_tool_calling_supported


@pytest.mark.parametrize(
"model_provider, model_name, mock_model_supports_fc, mock_litellm_anthropic_models, expected_result",
[
# === Anthropic Scenarios (expected False due to override) ===
# Provider is Anthropic, base model claims FC support
(ANTHROPIC_PROVIDER_NAME, "claude-3-opus-20240229", True, [], False),
# Model name in litellm.anthropic_models, base model claims FC support
(
"another-provider",
"claude-3-haiku-20240307",
True,
["claude-3-haiku-20240307"],
False,
),
# Both provider is Anthropic AND model name in litellm.anthropic_models, base model claims FC support
(
ANTHROPIC_PROVIDER_NAME,
"claude-3-sonnet-20240229",
True,
["claude-3-sonnet-20240229"],
False,
),
# === Anthropic Scenarios (expected False due to base support being False) ===
# Provider is Anthropic, base model does NOT claim FC support
(ANTHROPIC_PROVIDER_NAME, "claude-2.1", False, [], False),
# === Non-Anthropic Scenarios ===
# Non-Anthropic provider, base model claims FC support -> True
("openai", "gpt-4o", True, [], True),
# Non-Anthropic provider, base model does NOT claim FC support -> False
("openai", "gpt-3.5-turbo-instruct", False, [], False),
# Non-Anthropic provider, model name happens to be in litellm list (should still be True if provider isn't Anthropic)
(
"yet-another-provider",
"model-also-in-anthropic-list",
True,
["model-also-in-anthropic-list"],
False,
),
# Control for the above: Non-Anthropic provider, model NOT in litellm list, supports FC -> True
(
"yet-another-provider",
"some-other-model",
True,
["model-NOT-this-one"],
True,
),
],
)
@patch("onyx.tools.utils.find_model_obj")
@patch("onyx.tools.utils.litellm")
def test_explicit_tool_calling_supported(
mock_litellm: MagicMock,
mock_find_model_obj: MagicMock,
model_provider: str,
model_name: str,
mock_model_supports_fc: bool,
mock_litellm_anthropic_models: list[str],
expected_result: bool,
) -> None:
"""
Anthropic models support tool calling, but
a) will raise an error if you provide any tool messages and don't provide a list of tools.
b) will send text before and after generating tool calls.
We don't want to provide that list of tools because our UI doesn't support sequential
tool calling yet for (a) and just looks bad for (b), so for now we just treat anthropic
models as non-tool-calling.
"""
mock_find_model_obj.return_value = {
"supports_function_calling": mock_model_supports_fc
}
mock_litellm.anthropic_models = mock_litellm_anthropic_models

# get_model_map is called inside explicit_tool_calling_supported before find_model_obj,
# but its return value doesn't affect the mocked find_model_obj.
# So, no need to mock get_model_map separately if find_model_obj is fully mocked.

actual_result = explicit_tool_calling_supported(model_provider, model_name)
assert actual_result == expected_result
Loading