Skip to content

Commit ada4cd7

Browse files
feat: add configurable LLM summarization for DR custom tools
Add DR_CUSTOM_TOOL_USE_LLM_SUMMARY config to control whether custom tool responses are summarized by LLM in DR agent. Defaults to true for backward compatibility. Set to false to return raw tool results directly.
1 parent f4d135d commit ada4cd7

File tree

4 files changed

+28
-9
lines changed

4 files changed

+28
-9
lines changed

backend/onyx/agents/agent_search/dr/sub_agents/custom_tool/dr_custom_tool_2_act.py

Lines changed: 19 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -121,15 +121,25 @@ def custom_tool_act(
121121
f"Result: {tool_result_str}"
122122
)
123123

124-
tool_summary_prompt = CUSTOM_TOOL_USE_PROMPT.build(
125-
query=branch_query, base_question=base_question, tool_response=tool_str
126-
)
127-
answer_string = str(
128-
graph_config.tooling.primary_llm.invoke(
129-
tool_summary_prompt, timeout_override=TF_DR_TIMEOUT_SHORT
130-
).content
131-
).strip()
132-
124+
# Use LLM summary if configured, otherwise use raw tool result
125+
if graph_config.tooling.dr_custom_tool_use_llm_summary:
126+
tool_summary_prompt = CUSTOM_TOOL_USE_PROMPT.build(
127+
query=branch_query, base_question=base_question, tool_response=tool_str
128+
)
129+
answer_string = str(
130+
graph_config.tooling.primary_llm.invoke(
131+
tool_summary_prompt, timeout_override=TF_DR_TIMEOUT_SHORT
132+
).content
133+
).strip()
134+
else:
135+
answer_string = tool_result_str
136+
# Format JSON response for better readability when not using LLM summary
137+
if response_summary.response_type == "json":
138+
try:
139+
parsed_json = json.loads(tool_result_str)
140+
answer_string = json.dumps(parsed_json, indent=2, ensure_ascii=False)
141+
except json.JSONDecodeError:
142+
answer_string = tool_result_str
133143
# get file_ids:
134144
file_ids = None
135145
if response_summary.response_type in {"image", "csv"} and hasattr(

backend/onyx/agents/agent_search/models.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,8 @@ class GraphTooling(BaseModel):
4040
# force tool args IF the tool is used
4141
force_use_tool: ForceUseTool
4242
using_tool_calling_llm: bool = False
43+
# Whether to use LLM to summarize custom tool responses in DR agent
44+
dr_custom_tool_use_llm_summary: bool = True
4345

4446
class Config:
4547
arbitrary_types_allowed = True

backend/onyx/chat/answer.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
from onyx.configs.agent_configs import AGENT_ALLOW_REFINEMENT
2121
from onyx.configs.agent_configs import INITIAL_SEARCH_DECOMPOSITION_ENABLED
2222
from onyx.configs.agent_configs import TF_DR_DEFAULT_FAST
23+
from onyx.configs.tool_configs import DR_CUSTOM_TOOL_USE_LLM_SUMMARY
2324
from onyx.context.search.models import RerankingDetails
2425
from onyx.db.kg_config import get_kg_config_settings
2526
from onyx.db.models import Persona
@@ -105,6 +106,7 @@ def __init__(
105106
tools=tools or [],
106107
force_use_tool=force_use_tool,
107108
using_tool_calling_llm=using_tool_calling_llm,
109+
dr_custom_tool_use_llm_summary=DR_CUSTOM_TOOL_USE_LLM_SUMMARY,
108110
)
109111
self.graph_persistence = GraphPersistence(
110112
db_session=db_session,

backend/onyx/configs/tool_configs.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,3 +24,8 @@
2424
logger.error(
2525
"Failed to parse CUSTOM_TOOL_PASS_THROUGH_HEADERS, must be a valid JSON object"
2626
)
27+
28+
# Whether to use LLM to summarize custom tool responses in DR agent
29+
DR_CUSTOM_TOOL_USE_LLM_SUMMARY = (
30+
os.environ.get("DR_CUSTOM_TOOL_USE_LLM_SUMMARY", "true").lower() == "true"
31+
)

0 commit comments

Comments
 (0)