Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -121,15 +121,25 @@ def custom_tool_act(
f"Result: {tool_result_str}"
)

tool_summary_prompt = CUSTOM_TOOL_USE_PROMPT.build(
query=branch_query, base_question=base_question, tool_response=tool_str
)
answer_string = str(
graph_config.tooling.primary_llm.invoke(
tool_summary_prompt, timeout_override=TF_DR_TIMEOUT_SHORT
).content
).strip()

# Use LLM summary if configured, otherwise use raw tool result
if graph_config.tooling.dr_custom_tool_use_llm_summary:
tool_summary_prompt = CUSTOM_TOOL_USE_PROMPT.build(
query=branch_query, base_question=base_question, tool_response=tool_str
)
answer_string = str(
graph_config.tooling.primary_llm.invoke(
tool_summary_prompt, timeout_override=TF_DR_TIMEOUT_SHORT
).content
).strip()
else:
answer_string = tool_result_str
# Format JSON response for better readability when not using LLM summary
if response_summary.response_type == "json":
try:
parsed_json = json.loads(tool_result_str)
answer_string = json.dumps(parsed_json, indent=2, ensure_ascii=False)
except json.JSONDecodeError:
answer_string = tool_result_str
# get file_ids:
file_ids = None
if response_summary.response_type in {"image", "csv"} and hasattr(
Expand Down
2 changes: 2 additions & 0 deletions backend/onyx/agents/agent_search/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ class GraphTooling(BaseModel):
# force tool args IF the tool is used
force_use_tool: ForceUseTool
using_tool_calling_llm: bool = False
# Whether to use LLM to summarize custom tool responses in DR agent
dr_custom_tool_use_llm_summary: bool = True

class Config:
arbitrary_types_allowed = True
Expand Down
2 changes: 2 additions & 0 deletions backend/onyx/chat/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from onyx.configs.agent_configs import AGENT_ALLOW_REFINEMENT
from onyx.configs.agent_configs import INITIAL_SEARCH_DECOMPOSITION_ENABLED
from onyx.configs.agent_configs import TF_DR_DEFAULT_FAST
from onyx.configs.tool_configs import DR_CUSTOM_TOOL_USE_LLM_SUMMARY
from onyx.context.search.models import RerankingDetails
from onyx.db.kg_config import get_kg_config_settings
from onyx.db.models import Persona
Expand Down Expand Up @@ -105,6 +106,7 @@ def __init__(
tools=tools or [],
force_use_tool=force_use_tool,
using_tool_calling_llm=using_tool_calling_llm,
dr_custom_tool_use_llm_summary=DR_CUSTOM_TOOL_USE_LLM_SUMMARY,
)
self.graph_persistence = GraphPersistence(
db_session=db_session,
Expand Down
5 changes: 5 additions & 0 deletions backend/onyx/configs/tool_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,8 @@
logger.error(
"Failed to parse CUSTOM_TOOL_PASS_THROUGH_HEADERS, must be a valid JSON object"
)

# Whether to use LLM to summarize custom tool responses in DR agent
DR_CUSTOM_TOOL_USE_LLM_SUMMARY = (
os.environ.get("DR_CUSTOM_TOOL_USE_LLM_SUMMARY", "true").lower() == "true"
)
Loading