Skip to content

Commit 449cd3f

Browse files
proper order of messages
1 parent 03eab72 commit 449cd3f

File tree

5 files changed

+90
-81
lines changed

5 files changed

+90
-81
lines changed

backend/onyx/agents/agent_search/dr/nodes/dr_a0_clarification.py

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,10 @@
8484

8585
logger = setup_logger()
8686

87+
_ANSWER_COMMENT_PROMPT = "I will now answer your question directly."
88+
89+
_CONSIDER_TOOLS_PROMPT = "I will now concier the tools and sub-agents that are available to answer your question."
90+
8791

8892
def _format_tool_name(tool_name: str) -> str:
8993
"""Convert tool name to LLM-friendly format."""
@@ -432,25 +436,19 @@ def clarifier(
432436
assistant_system_prompt = PromptTemplate(DEFAULT_DR_SYSTEM_PROMPT).build()
433437
assistant_task_prompt = ""
434438

435-
chat_history_string = (
436-
get_chat_history_string(
437-
graph_config.inputs.prompt_builder.message_history,
438-
MAX_CHAT_HISTORY_MESSAGES,
439-
)
440-
or "(No chat history yet available)"
441-
)
439+
# chat_history_string = (
440+
# get_chat_history_string(
441+
# graph_config.inputs.prompt_builder.message_history,
442+
# MAX_CHAT_HISTORY_MESSAGES,
443+
# )
444+
# or "(No chat history yet available)"
445+
# )
442446

443447
chat_history_messages = get_chat_history_messages(
444-
graph_config.inputs.prompt_builder.message_history, MAX_CHAT_HISTORY_MESSAGES
448+
graph_config.inputs.prompt_builder.raw_message_history,
449+
MAX_CHAT_HISTORY_MESSAGES,
445450
)
446451

447-
if len(chat_history_messages) > 0:
448-
chat_history_messages = [
449-
SystemMessage(content="Here are the previous messages in the chat history:")
450-
] + chat_history_messages
451-
else:
452-
chat_history_messages = []
453-
454452
uploaded_text_context = (
455453
_construct_uploaded_text_context(graph_config.inputs.files)
456454
if graph_config.inputs.files
@@ -495,7 +493,21 @@ def clarifier(
495493
message_history_for_continuation.append(SystemMessage(content=base_system_message))
496494
message_history_for_continuation.extend(chat_history_messages)
497495
message_history_for_continuation.extend(uploaded_file_messages)
498-
message_history_for_continuation.append(HumanMessage(content=original_question))
496+
497+
# Create message content that includes text and any available images
498+
message_content: list[dict[str, Any]] = [
499+
{"type": "text", "text": original_question}
500+
]
501+
if uploaded_image_context:
502+
message_content.extend(uploaded_image_context)
503+
504+
# If we only have text, use string content for backwards compatibility
505+
if len(message_content) == 1:
506+
message_history_for_continuation.append(HumanMessage(content=original_question))
507+
else:
508+
message_history_for_continuation.append(
509+
HumanMessage(content=cast(list[str | dict[Any, Any]], message_content))
510+
)
499511
message_history_for_continuation.append(AIMessage(content=QUESTION_CONFIRMATION))
500512

501513
if not (force_use_tool and force_use_tool.force_use):
@@ -508,7 +520,7 @@ def clarifier(
508520
if not use_tool_calling_llm or len(available_tools) == 1:
509521
if len(available_tools) > 1:
510522
message_history_for_continuation.append(
511-
SystemMessage(content=DECISION_PROMPT_WO_TOOL_CALLING)
523+
HumanMessage(content=DECISION_PROMPT_WO_TOOL_CALLING)
512524
)
513525

514526
llm_decision = invoke_llm_json(
@@ -535,6 +547,10 @@ def clarifier(
535547
reminder=reminder,
536548
)
537549

550+
message_history_for_continuation.append(
551+
AIMessage(content=_ANSWER_COMMENT_PROMPT)
552+
)
553+
538554
message_history_for_continuation.append(
539555
HumanMessage(content=answer_prompt)
540556
)
@@ -795,14 +811,7 @@ def clarifier(
795811
else:
796812
next_tool = DRPath.ORCHESTRATOR.value
797813

798-
if research_type == ResearchType.DEEP and clarification:
799-
message_history_for_continuation.append(
800-
AIMessage(content=clarification.clarification_question)
801-
)
802-
if clarification.clarification_response:
803-
message_history_for_continuation.append(
804-
HumanMessage(content=clarification.clarification_response)
805-
)
814+
message_history_for_continuation.append(AIMessage(content=_CONSIDER_TOOLS_PROMPT))
806815

807816
return OrchestrationSetup(
808817
original_question=original_question,

backend/onyx/agents/agent_search/dr/nodes/dr_a1_orchestrator.py

Lines changed: 33 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
from onyx.configs.agent_configs import TF_DR_TIMEOUT_LONG
3737
from onyx.configs.agent_configs import TF_DR_TIMEOUT_SHORT
3838
from onyx.prompts.dr_prompts import DEFAULLT_DECISION_PROMPT
39-
from onyx.prompts.dr_prompts import NEXT_TOOL_PURPOSE_PROMPT
4039
from onyx.prompts.dr_prompts import REPEAT_PROMPT
4140
from onyx.prompts.dr_prompts import SUFFICIENT_INFORMATION_STRING
4241
from onyx.prompts.dr_prompts import TOOL_CHOICE_WRAPPER_PROMPT
@@ -50,6 +49,10 @@
5049
_DECISION_SYSTEM_PROMPT_PREFIX = "Here are general instructions by the user, which \
5150
may or may not influence the decision what to do next:\n\n"
5251

52+
_PLAN_OF_RECORD_PROMPT = "Can you create a plan of record?"
53+
54+
_NEXT_ACTION_PROMPT = "What should be the next action?"
55+
5356

5457
def _get_implied_next_tool_based_on_tool_call_history(
5558
tools_used: list[str],
@@ -86,7 +89,7 @@ def orchestrator(
8689
clarification = state.clarification
8790
assistant_system_prompt = state.assistant_system_prompt
8891

89-
message_history_for_continuation = state.orchestration_llm_messages
92+
message_history_for_continuation = list(state.orchestration_llm_messages)
9093
new_messages: list[SystemMessage | HumanMessage | AIMessage] = []
9194

9295
if assistant_system_prompt:
@@ -126,24 +129,19 @@ def orchestrator(
126129
or "(No answer history yet available)"
127130
)
128131

129-
if (
130-
research_type == ResearchType.DEEP
131-
and most_recent_answer_history_wo_docs_string
132-
!= "(No answer history yet available)"
133-
):
134-
message_history_for_continuation.append(
135-
AIMessage(content=most_recent_answer_history_wo_docs_string)
136-
)
137-
new_messages.append(
138-
AIMessage(content=most_recent_answer_history_wo_docs_string)
139-
)
140-
elif (
141-
most_recent_answer_history_w_docs_string != "(No answer history yet available)"
142-
):
143-
message_history_for_continuation.append(
144-
AIMessage(content=most_recent_answer_history_w_docs_string)
145-
)
146-
new_messages.append(AIMessage(content=most_recent_answer_history_w_docs_string))
132+
human_text = ai_text = ""
133+
if most_recent_answer_history_wo_docs_string != "(No answer history yet available)":
134+
human_text = f"Results from Iteration {iteration_nr - 1}?"
135+
if research_type == ResearchType.DEEP:
136+
ai_text = most_recent_answer_history_wo_docs_string
137+
else:
138+
ai_text = most_recent_answer_history_w_docs_string
139+
140+
message_history_for_continuation.append(HumanMessage(content=human_text))
141+
new_messages.append(HumanMessage(content=human_text))
142+
143+
message_history_for_continuation.append(AIMessage(content=ai_text))
144+
new_messages.append(AIMessage(content=ai_text))
147145

148146
next_tool_name = None
149147

@@ -367,8 +365,6 @@ def orchestrator(
367365
uploaded_context=uploaded_context,
368366
)
369367

370-
message_history_for_continuation.append(HumanMessage(content=decision_prompt))
371-
372368
if remaining_time_budget > 0:
373369
try:
374370
orchestrator_action = invoke_llm_json(
@@ -440,6 +436,18 @@ def orchestrator(
440436
writer,
441437
)
442438

439+
message_history_for_continuation.append(
440+
HumanMessage(content=_PLAN_OF_RECORD_PROMPT)
441+
)
442+
new_messages.append(HumanMessage(content=_PLAN_OF_RECORD_PROMPT))
443+
444+
message_history_for_continuation.append(
445+
AIMessage(content=f"{HIGH_LEVEL_PLAN_PREFIX}\n\n {plan_of_record.plan}")
446+
)
447+
new_messages.append(
448+
AIMessage(content=f"{HIGH_LEVEL_PLAN_PREFIX}\n\n {plan_of_record.plan}")
449+
)
450+
443451
start_time = datetime.now()
444452

445453
repeat_plan_prompt = REPEAT_PROMPT.build(
@@ -573,16 +581,14 @@ def orchestrator(
573581
questions="\n - " + "\n - ".join(query_list or []),
574582
)
575583

584+
message_history_for_continuation.append(HumanMessage(content=_NEXT_ACTION_PROMPT))
585+
new_messages.append(HumanMessage(content=_NEXT_ACTION_PROMPT))
586+
576587
message_history_for_continuation.append(
577588
AIMessage(content=tool_choice_wrapper_prompt)
578589
)
579590
new_messages.append(AIMessage(content=tool_choice_wrapper_prompt))
580591

581-
message_history_for_continuation.append(
582-
HumanMessage(content=NEXT_TOOL_PURPOSE_PROMPT)
583-
)
584-
new_messages.append(HumanMessage(content=NEXT_TOOL_PURPOSE_PROMPT))
585-
586592
purpose_tokens: list[str] = [""]
587593
purpose = ""
588594

backend/onyx/agents/agent_search/dr/nodes/dr_a2_closer.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,11 @@
5555
from onyx.utils.logger import setup_logger
5656
from onyx.utils.threadpool_concurrency import run_with_timeout
5757

58+
5859
logger = setup_logger()
5960

61+
_SOURCE_MATERIAL_PROMPT = "Can yut please put together all of the supporting material?"
62+
6063

6164
def extract_citation_numbers(text: str) -> list[int]:
6265
"""
@@ -334,6 +337,10 @@ def closer(
334337
final_user_instructions=assistant_task_prompt
335338
or "(No final user instructions)",
336339
)
340+
341+
message_history_for_final_answer.append(
342+
HumanMessage(content=_SOURCE_MATERIAL_PROMPT)
343+
)
337344
message_history_for_final_answer.append(
338345
AIMessage(
339346
content=FINAL_ANSWER_DEEP_CITATION_PROMPT.build(

backend/onyx/agents/agent_search/dr/utils.py

Lines changed: 15 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
from langchain.schema.messages import AIMessage
55
from langchain.schema.messages import BaseMessage
66
from langchain.schema.messages import HumanMessage
7-
from langchain.schema.messages import SystemMessage
87

98
from onyx.agents.agent_search.dr.models import AggregatedDRContext
109
from onyx.agents.agent_search.dr.models import IterationAnswer
@@ -13,9 +12,11 @@
1312
from onyx.agents.agent_search.shared_graph_utils.operators import (
1413
dedup_inference_section_list,
1514
)
15+
from onyx.configs.constants import MessageType
1616
from onyx.context.search.models import InferenceSection
1717
from onyx.context.search.models import SavedSearchDoc
1818
from onyx.context.search.utils import chunks_or_sections_to_search_docs
19+
from onyx.llm.models import PreviousMessage
1920
from onyx.tools.tool_implementations.web_search.web_search_tool import (
2021
WebSearchTool,
2122
)
@@ -242,38 +243,24 @@ def get_chat_history_string(chat_history: list[BaseMessage], max_messages: int)
242243

243244

244245
def get_chat_history_messages(
245-
chat_history: list[BaseMessage], max_messages: int
246-
) -> list[SystemMessage | HumanMessage | AIMessage]:
246+
chat_history: list[PreviousMessage], max_messages: int
247+
) -> list[HumanMessage | AIMessage]:
247248
"""
248249
Get the chat history (up to max_messages) as a list of messages.
249250
"""
250-
past_messages = chat_history[-max_messages * 2 :]
251-
filtered_past_messages = copy.deepcopy(past_messages) # type: ignore
252-
for past_message_number, past_message in enumerate(past_messages):
253-
254-
if isinstance(past_message.content, list):
255-
removal_indices = []
256-
for content_piece_number, content_piece in enumerate(past_message.content):
257-
if (
258-
isinstance(content_piece, dict)
259-
and content_piece.get("type") != "text"
260-
):
261-
removal_indices.append(content_piece_number)
262-
263-
# Only rebuild the content list if there are items to remove
264-
if removal_indices:
265-
filtered_past_messages[past_message_number].content = [
266-
content_piece
267-
for content_piece_number, content_piece in enumerate(
268-
past_message.content
269-
)
270-
if content_piece_number not in removal_indices
271-
]
272-
251+
past_raw_messages = chat_history[-max_messages * 2 :]
252+
filtered_past_raw_messages = []
253+
for past_raw_message_number, past_raw_message in enumerate(past_raw_messages):
254+
if past_raw_message.message_type == MessageType.USER:
255+
filtered_past_raw_messages.append(
256+
HumanMessage(content=past_raw_message.message)
257+
)
273258
else:
274-
continue
259+
filtered_past_raw_messages.append(
260+
AIMessage(content=past_raw_message.message)
261+
)
275262

276-
return filtered_past_messages # type: ignore
263+
return filtered_past_raw_messages # type: ignore
277264

278265

279266
def get_prompt_question(

backend/onyx/prompts/dr_prompts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1857,7 +1857,7 @@
18571857

18581858
TOOL_CHOICE_WRAPPER_PROMPT = PromptTemplate(
18591859
f"""
1860-
Here are the tools/sub-agents and tool calls that were determined to be needed next to answer the user's question:
1860+
Here are the tools/sub-agent calls that were determined to be needed next to answer the user's question:
18611861
18621862
#TOOL CALLS
18631863
{SEPARATOR_LINE}

0 commit comments

Comments
 (0)