Skip to content

Commit 9e473e0

Browse files
google-genai-botcopybara-github
authored andcommitted
fix: Include current turn context when include_contents='none'
The intended behavior for include_contents='none' is to: - Exclude conversation history from previous turns - Still include current turn context (user input, tool calls/responses within current turn) https://google.github.io/adk-docs/agents/llm-agents/#managing-context-include_contents This resolves #1124 PiperOrigin-RevId: 775400036
1 parent acbdca0 commit 9e473e0

File tree

3 files changed

+296
-7
lines changed

3 files changed

+296
-7
lines changed

src/google/adk/agents/llm_agent.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -161,10 +161,12 @@ class LlmAgent(BaseAgent):
161161
# LLM-based agent transfer configs - End
162162

163163
include_contents: Literal['default', 'none'] = 'default'
164-
"""Whether to include contents in the model request.
164+
"""Controls content inclusion in model requests.
165165
166-
When set to 'none', the model request will not include any contents, such as
167-
user messages, tool results, etc.
166+
Options:
167+
default: Model receives relevant conversation history
168+
none: Model receives no prior history, operates solely on current
169+
instruction and input
168170
"""
169171

170172
# Controlled input/output configurations - Start

src/google/adk/flows/llm_flows/contents.py

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,20 @@ async def run_async(
4343
if not isinstance(agent, LlmAgent):
4444
return
4545

46-
if agent.include_contents != 'none':
46+
if agent.include_contents == 'default':
47+
# Include full conversation history
4748
llm_request.contents = _get_contents(
4849
invocation_context.branch,
4950
invocation_context.session.events,
5051
agent.name,
5152
)
53+
else:
54+
# Include current turn context only (no conversation history)
55+
llm_request.contents = _get_current_turn_contents(
56+
invocation_context.branch,
57+
invocation_context.session.events,
58+
agent.name,
59+
)
5260

5361
# Maintain async generator behavior
5462
if False: # Ensures it behaves as a generator
@@ -190,13 +198,15 @@ def _get_contents(
190198
) -> list[types.Content]:
191199
"""Get the contents for the LLM request.
192200
201+
Applies filtering, rearrangement, and content processing to events.
202+
193203
Args:
194204
current_branch: The current branch of the agent.
195-
events: A list of events.
205+
events: Events to process.
196206
agent_name: The name of the agent.
197207
198208
Returns:
199-
A list of contents.
209+
A list of processed contents.
200210
"""
201211
filtered_events = []
202212
# Parse the events, leaving the contents and the function calls and
@@ -211,25 +221,29 @@ def _get_contents(
211221
# Skip events without content, or generated neither by user nor by model
212222
# or has empty text.
213223
# E.g. events purely for mutating session states.
224+
214225
continue
215226
if not _is_event_belongs_to_branch(current_branch, event):
216227
# Skip events not belong to current branch.
217228
continue
218229
if _is_auth_event(event):
219-
# skip auth event
230+
# Skip auth events.
220231
continue
221232
filtered_events.append(
222233
_convert_foreign_event(event)
223234
if _is_other_agent_reply(agent_name, event)
224235
else event
225236
)
226237

238+
# Rearrange events for proper function call/response pairing
227239
result_events = _rearrange_events_for_latest_function_response(
228240
filtered_events
229241
)
230242
result_events = _rearrange_events_for_async_function_responses_in_history(
231243
result_events
232244
)
245+
246+
# Convert events to contents
233247
contents = []
234248
for event in result_events:
235249
content = copy.deepcopy(event.content)
@@ -238,6 +252,37 @@ def _get_contents(
238252
return contents
239253

240254

255+
def _get_current_turn_contents(
256+
current_branch: Optional[str], events: list[Event], agent_name: str = ''
257+
) -> list[types.Content]:
258+
"""Get contents for the current turn only (no conversation history).
259+
260+
When include_contents='none', we want to include:
261+
- The current user input
262+
- Tool calls and responses from the current turn
263+
But exclude conversation history from previous turns.
264+
265+
In multi-agent scenarios, the "current turn" for an agent starts from an
266+
actual user or from another agent.
267+
268+
Args:
269+
current_branch: The current branch of the agent.
270+
events: A list of all session events.
271+
agent_name: The name of the agent.
272+
273+
Returns:
274+
A list of contents for the current turn only, preserving context needed
275+
for proper tool execution while excluding conversation history.
276+
"""
277+
# Find the latest event that starts the current turn and process from there
278+
for i in range(len(events) - 1, -1, -1):
279+
event = events[i]
280+
if event.author == 'user' or _is_other_agent_reply(agent_name, event):
281+
return _get_contents(current_branch, events[i:], agent_name)
282+
283+
return []
284+
285+
241286
def _is_other_agent_reply(current_agent_name: str, event: Event) -> bool:
242287
"""Whether the event is a reply from another agent."""
243288
return bool(
Lines changed: 242 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,242 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
"""Unit tests for LlmAgent include_contents field behavior."""
16+
17+
from google.adk.agents.llm_agent import LlmAgent
18+
from google.adk.agents.sequential_agent import SequentialAgent
19+
from google.genai import types
20+
import pytest
21+
22+
from .. import testing_utils
23+
24+
25+
@pytest.mark.asyncio
26+
async def test_include_contents_default_behavior():
27+
"""Test that include_contents='default' preserves conversation history including tool interactions."""
28+
29+
def simple_tool(message: str) -> dict:
30+
return {"result": f"Tool processed: {message}"}
31+
32+
mock_model = testing_utils.MockModel.create(
33+
responses=[
34+
types.Part.from_function_call(
35+
name="simple_tool", args={"message": "first"}
36+
),
37+
"First response",
38+
types.Part.from_function_call(
39+
name="simple_tool", args={"message": "second"}
40+
),
41+
"Second response",
42+
]
43+
)
44+
45+
agent = LlmAgent(
46+
name="test_agent",
47+
model=mock_model,
48+
include_contents="default",
49+
instruction="You are a helpful assistant",
50+
tools=[simple_tool],
51+
)
52+
53+
runner = testing_utils.InMemoryRunner(agent)
54+
runner.run("First message")
55+
runner.run("Second message")
56+
57+
# First turn requests
58+
assert testing_utils.simplify_contents(mock_model.requests[0].contents) == [
59+
("user", "First message")
60+
]
61+
62+
assert testing_utils.simplify_contents(mock_model.requests[1].contents) == [
63+
("user", "First message"),
64+
(
65+
"model",
66+
types.Part.from_function_call(
67+
name="simple_tool", args={"message": "first"}
68+
),
69+
),
70+
(
71+
"user",
72+
types.Part.from_function_response(
73+
name="simple_tool", response={"result": "Tool processed: first"}
74+
),
75+
),
76+
]
77+
78+
# Second turn should include full conversation history
79+
assert testing_utils.simplify_contents(mock_model.requests[2].contents) == [
80+
("user", "First message"),
81+
(
82+
"model",
83+
types.Part.from_function_call(
84+
name="simple_tool", args={"message": "first"}
85+
),
86+
),
87+
(
88+
"user",
89+
types.Part.from_function_response(
90+
name="simple_tool", response={"result": "Tool processed: first"}
91+
),
92+
),
93+
("model", "First response"),
94+
("user", "Second message"),
95+
]
96+
97+
# Second turn with tool should include full history + current tool interaction
98+
assert testing_utils.simplify_contents(mock_model.requests[3].contents) == [
99+
("user", "First message"),
100+
(
101+
"model",
102+
types.Part.from_function_call(
103+
name="simple_tool", args={"message": "first"}
104+
),
105+
),
106+
(
107+
"user",
108+
types.Part.from_function_response(
109+
name="simple_tool", response={"result": "Tool processed: first"}
110+
),
111+
),
112+
("model", "First response"),
113+
("user", "Second message"),
114+
(
115+
"model",
116+
types.Part.from_function_call(
117+
name="simple_tool", args={"message": "second"}
118+
),
119+
),
120+
(
121+
"user",
122+
types.Part.from_function_response(
123+
name="simple_tool", response={"result": "Tool processed: second"}
124+
),
125+
),
126+
]
127+
128+
129+
@pytest.mark.asyncio
130+
async def test_include_contents_none_behavior():
131+
"""Test that include_contents='none' excludes conversation history but includes current input."""
132+
133+
def simple_tool(message: str) -> dict:
134+
return {"result": f"Tool processed: {message}"}
135+
136+
mock_model = testing_utils.MockModel.create(
137+
responses=[
138+
types.Part.from_function_call(
139+
name="simple_tool", args={"message": "first"}
140+
),
141+
"First response",
142+
"Second response",
143+
]
144+
)
145+
146+
agent = LlmAgent(
147+
name="test_agent",
148+
model=mock_model,
149+
include_contents="none",
150+
instruction="You are a helpful assistant",
151+
tools=[simple_tool],
152+
)
153+
154+
runner = testing_utils.InMemoryRunner(agent)
155+
runner.run("First message")
156+
runner.run("Second message")
157+
158+
# First turn behavior
159+
assert testing_utils.simplify_contents(mock_model.requests[0].contents) == [
160+
("user", "First message")
161+
]
162+
163+
assert testing_utils.simplify_contents(mock_model.requests[1].contents) == [
164+
("user", "First message"),
165+
(
166+
"model",
167+
types.Part.from_function_call(
168+
name="simple_tool", args={"message": "first"}
169+
),
170+
),
171+
(
172+
"user",
173+
types.Part.from_function_response(
174+
name="simple_tool", response={"result": "Tool processed: first"}
175+
),
176+
),
177+
]
178+
179+
# Second turn should only have current input, no history
180+
assert testing_utils.simplify_contents(mock_model.requests[2].contents) == [
181+
("user", "Second message")
182+
]
183+
184+
# System instruction and tools should be preserved
185+
assert (
186+
"You are a helpful assistant"
187+
in mock_model.requests[0].config.system_instruction
188+
)
189+
assert len(mock_model.requests[0].config.tools) > 0
190+
191+
192+
@pytest.mark.asyncio
193+
async def test_include_contents_none_sequential_agents():
194+
"""Test include_contents='none' with sequential agents."""
195+
196+
agent1_model = testing_utils.MockModel.create(
197+
responses=["Agent1 response: XYZ"]
198+
)
199+
agent1 = LlmAgent(
200+
name="agent1",
201+
model=agent1_model,
202+
instruction="You are Agent1",
203+
)
204+
205+
agent2_model = testing_utils.MockModel.create(
206+
responses=["Agent2 final response"]
207+
)
208+
agent2 = LlmAgent(
209+
name="agent2",
210+
model=agent2_model,
211+
include_contents="none",
212+
instruction="You are Agent2",
213+
)
214+
215+
sequential_agent = SequentialAgent(
216+
name="sequential_test_agent", sub_agents=[agent1, agent2]
217+
)
218+
219+
runner = testing_utils.InMemoryRunner(sequential_agent)
220+
events = runner.run("Original user request")
221+
222+
assert len(events) == 2
223+
assert events[0].author == "agent1"
224+
assert events[1].author == "agent2"
225+
226+
# Agent1 sees original user request
227+
agent1_contents = testing_utils.simplify_contents(
228+
agent1_model.requests[0].contents
229+
)
230+
assert ("user", "Original user request") in agent1_contents
231+
232+
# Agent2 with include_contents='none' should not see original request
233+
agent2_contents = testing_utils.simplify_contents(
234+
agent2_model.requests[0].contents
235+
)
236+
237+
assert not any(
238+
"Original user request" in str(content) for _, content in agent2_contents
239+
)
240+
assert any(
241+
"Agent1 response" in str(content) for _, content in agent2_contents
242+
)

0 commit comments

Comments
 (0)