Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 7 additions & 6 deletions backend/onyx/chat/process_message.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import traceback
from collections.abc import Callable
from collections.abc import Iterator
from typing import cast
from typing import cast, List
from typing import Protocol

from sqlalchemy.orm import Session
Expand Down Expand Up @@ -253,6 +253,7 @@ def stream_chat_message_objects(
# a string which represents the history of a conversation. Used in cases like
# Slack threads where the conversation cannot be represented by a chain of User/Assistant
# messages.
message_history: List[PreviousMessage] | None = None,
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Sep 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Comment above this line is now misleading (describes a string). Update/move it to document single_message_history and add a brief comment for message_history.

Prompt for AI agents
Address the following comment on backend/onyx/chat/process_message.py at line 256:

<comment>Comment above this line is now misleading (describes a string). Update/move it to document single_message_history and add a brief comment for message_history.</comment>

<file context>
@@ -253,6 +253,7 @@ def stream_chat_message_objects(
     # a string which represents the history of a conversation. Used in cases like
     # Slack threads where the conversation cannot be represented by a chain of User/Assistant
     # messages.
+    message_history: List[PreviousMessage] | None = None,
     # NOTE: is not stored in the database at all.
     single_message_history: str | None = None,
</file context>
Fix with Cubic

# NOTE: is not stored in the database at all.
single_message_history: str | None = None,
) -> AnswerStream:
Expand Down Expand Up @@ -613,11 +614,11 @@ def stream_chat_message_objects(
force_use_tool = _get_force_search_settings(
new_msg_req, tools, search_tool_override_kwargs_for_user_files
)

# TODO: unify message history with single message history
message_history = [
PreviousMessage.from_chat_message(msg, files) for msg in history_msgs
]
if message_history is None:
# TODO: unify message history with single message history
message_history = [
PreviousMessage.from_chat_message(msg, files) for msg in history_msgs
]
if not search_tool_override_kwargs_for_user_files and in_memory_user_files:
yield UserKnowledgeFilePacket(
user_files=[
Expand Down
32 changes: 26 additions & 6 deletions backend/onyx/onyxbot/slack/handlers/handle_regular_answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@
from onyx.db.persona import get_persona_by_id
from onyx.db.persona import persona_has_search_tool
from onyx.db.users import get_user_by_email
from onyx.llm.factory import get_llms_for_persona
from onyx.llm.models import PreviousMessage
from onyx.llm.utils import check_number_of_tokens
from onyx.natural_language_processing.utils import get_tokenizer
from onyx.onyxbot.slack.blocks import build_slack_response_blocks
from onyx.onyxbot.slack.handlers.utils import send_team_member_message
from onyx.onyxbot.slack.handlers.utils import slackify_message_thread
Expand Down Expand Up @@ -130,13 +134,13 @@ def handle_regular_answer(
with get_session_with_current_tenant() as db_session:
expecting_search_result = persona_has_search_tool(persona.id, db_session)

# TODO: Add in support for Slack to truncate messages based on max LLM context
# llm, _ = get_llms_for_persona(persona)
# Get LLM configuration to create tokenizer for token counting
llm, _ = get_llms_for_persona(persona)

# llm_tokenizer = get_tokenizer(
# model_name=llm.config.model_name,
# provider_type=llm.config.model_provider,
# )
llm_tokenizer = get_tokenizer(
model_name=llm.config.model_name,
provider_type=llm.config.model_provider,
)

# # In cases of threads, split the available tokens between docs and thread context
# input_tokens = get_max_input_tokens(
Expand All @@ -153,6 +157,21 @@ def handle_regular_answer(
# who previously posted in the thread.
user_message = messages[-1]
history_messages = messages[:-1]
message_history: list[PreviousMessage] = []
for message in history_messages:
# Calculate actual token count for the message
token_count = check_number_of_tokens(message.message, llm_tokenizer.encode)
message_history.append(
PreviousMessage(
message=message.message,
message_type=message.role,
token_count=token_count,
files=[], # Empty list as default
tool_call=None, # No tool calls in Slack message history
refined_answer_improvement=None, # Not applicable for Slack messages
research_answer_purpose=None, # Not applicable for Slack messages
)
)
single_message_history = slackify_message_thread(history_messages) or None

# Always check for ACL permissions, also for documnt sets that were explicitly added
Expand Down Expand Up @@ -184,6 +203,7 @@ def _get_slack_answer(
db_session=db_session,
bypass_acl=bypass_acl,
single_message_history=single_message_history,
message_history=message_history,
)
answer = gather_stream(packets)

Expand Down