Skip to content

Commit 0b41733

Browse files
committed
.
1 parent ea45dc5 commit 0b41733

File tree

3 files changed

+11
-17
lines changed

3 files changed

+11
-17
lines changed

backend/onyx/llm/chat_llm.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -175,17 +175,15 @@ def _convert_delta_to_message_chunk(
175175
stop_reason: str | None = None,
176176
) -> BaseMessageChunk:
177177
configure_litellm()
178-
import litellm
178+
from litellm.utils import ChatCompletionDeltaToolCall
179179

180180
"""Adapted from langchain_community.chat_models.litellm._convert_delta_to_message_chunk"""
181181
role = _dict.get("role") or (_base_msg_to_role(curr_msg) if curr_msg else "unknown")
182182
content = _dict.get("content") or ""
183183
additional_kwargs = {}
184184
if _dict.get("function_call"):
185185
additional_kwargs.update({"function_call": dict(_dict["function_call"])})
186-
tool_calls = cast(
187-
list[litellm.utils.ChatCompletionDeltaToolCall] | None, _dict.get("tool_calls")
188-
)
186+
tool_calls = cast(list[ChatCompletionDeltaToolCall] | None, _dict.get("tool_calls"))
189187

190188
if role == "user":
191189
return HumanMessageChunk(content=content)
@@ -323,9 +321,7 @@ def __init__(
323321
self._max_token_param = LEGACY_MAX_TOKENS_KWARG
324322
try:
325323
configure_litellm()
326-
import litellm
327-
328-
get_supported_openai_params = litellm.utils.get_supported_openai_params
324+
from litellm.utils import get_supported_openai_params
329325

330326
params = get_supported_openai_params(model_name, model_provider)
331327
if STANDARD_MAX_TOKENS_KWARG in (params or []):

backend/scripts/check_lazy_imports.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919

2020
@dataclass
2121
class LazyImportSettings:
22-
"""Settings for which directories to ignore when checking for lazy imports."""
22+
"""Settings for which files to ignore when checking for lazy imports."""
2323

24-
ignore_directories: Set[str] | None = None
24+
ignore_files: Set[str] | None = None
2525

2626

2727
# Map of modules to lazy import -> settings for what to ignore
@@ -31,9 +31,7 @@ class LazyImportSettings:
3131
"markitdown": LazyImportSettings(),
3232
"tiktoken": LazyImportSettings(),
3333
"unstructured": LazyImportSettings(),
34-
"litellm": LazyImportSettings(
35-
ignore_directories={"onyx/llm/llm_provider_options.py"}
36-
),
34+
"litellm": LazyImportSettings(ignore_files={"onyx/llm/llm_provider_options.py"}),
3735
}
3836

3937

@@ -150,12 +148,12 @@ def should_check_file_for_module(
150148
Args:
151149
file_path: Path to the file to check
152150
backend_dir: Path to the backend directory
153-
settings: Settings containing directories to ignore for this module
151+
settings: Settings containing files to ignore for this module
154152
155153
Returns:
156154
True if the file should be checked, False if it should be ignored
157155
"""
158-
if not settings.ignore_directories:
156+
if not settings.ignore_files:
159157
# Empty set means check everywhere
160158
return True
161159

@@ -164,7 +162,7 @@ def should_check_file_for_module(
164162
rel_path_str = str(rel_path)
165163

166164
# Check if this specific file path is in the ignore list
167-
return rel_path_str not in settings.ignore_directories
165+
return rel_path_str not in settings.ignore_files
168166

169167

170168
def main(modules_to_lazy_import: Dict[str, LazyImportSettings]) -> None:

backend/tests/unit/onyx/llm/test_chat_llm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def default_multi_llm() -> DefaultMultiLLM:
4545

4646
def test_multiple_tool_calls(default_multi_llm: DefaultMultiLLM) -> None:
4747
# Mock the litellm.completion function
48-
with patch("onyx.llm.chat_llm.litellm.completion") as mock_completion:
48+
with patch("litellm.completion") as mock_completion:
4949
# Create a mock response with multiple tool calls using litellm objects
5050
mock_response = litellm.ModelResponse(
5151
id="chatcmpl-123",
@@ -158,7 +158,7 @@ def test_multiple_tool_calls(default_multi_llm: DefaultMultiLLM) -> None:
158158

159159
def test_multiple_tool_calls_streaming(default_multi_llm: DefaultMultiLLM) -> None:
160160
# Mock the litellm.completion function
161-
with patch("onyx.llm.chat_llm.litellm.completion") as mock_completion:
161+
with patch("litellm.completion") as mock_completion:
162162
# Create a mock response with multiple tool calls using litellm objects
163163
mock_response = [
164164
litellm.ModelResponse(

0 commit comments

Comments
 (0)