Skip to content

Commit 2ba6782

Browse files
google-genai-botcopybara-github
authored andcommitted
fix: Treat empty-but-defined text responses as a text response
Handles an edge case in structured type generation where the JSON content was previously completed but a final, empty response is sent with a STOP finish reason. PiperOrigin-RevId: 774917500
1 parent 120cbab commit 2ba6782

File tree

2 files changed

+104
-3
lines changed

2 files changed

+104
-3
lines changed

src/google/adk/models/google_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ async def generate_content_async(
124124
if (
125125
llm_response.content
126126
and llm_response.content.parts
127-
and llm_response.content.parts[0].text
127+
and llm_response.content.parts[0].text is not None
128128
):
129129
part0 = llm_response.content.parts[0]
130130
if part0.thought:

tests/unittests/models/test_google_llm.py

Lines changed: 103 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -318,6 +318,108 @@ async def mock_coro():
318318
mock_client.aio.models.generate_content_stream.assert_called_once()
319319

320320

321+
@pytest.mark.asyncio
322+
async def test_generate_content_async_stream_handles_empty_text(
323+
gemini_llm, llm_request
324+
):
325+
with mock.patch.object(gemini_llm, "api_client") as mock_client:
326+
327+
class MockAsyncIterator:
328+
329+
def __init__(self, seq):
330+
self._iter = iter(seq)
331+
332+
def __aiter__(self):
333+
return self
334+
335+
async def __anext__(self):
336+
try:
337+
return next(self._iter)
338+
except StopIteration:
339+
raise StopAsyncIteration
340+
341+
response1 = types.GenerateContentResponse(
342+
candidates=[
343+
types.Candidate(
344+
content=Content(
345+
role="model",
346+
parts=[Part(text="Think1", thought=True)],
347+
),
348+
finish_reason=None,
349+
)
350+
]
351+
)
352+
response2 = types.GenerateContentResponse(
353+
candidates=[
354+
types.Candidate(
355+
content=Content(
356+
role="model",
357+
parts=[Part(text="", thought=True)],
358+
),
359+
finish_reason=None,
360+
)
361+
]
362+
)
363+
response3 = types.GenerateContentResponse(
364+
candidates=[
365+
types.Candidate(
366+
content=Content(
367+
role="model",
368+
parts=[Part(text="Think2", thought=True)],
369+
),
370+
finish_reason=None,
371+
)
372+
]
373+
)
374+
response4 = types.GenerateContentResponse(
375+
candidates=[
376+
types.Candidate(
377+
content=Content(
378+
role="model",
379+
parts=[Part.from_text(text="Answer.")],
380+
),
381+
finish_reason=None,
382+
)
383+
]
384+
)
385+
response5 = types.GenerateContentResponse(
386+
candidates=[
387+
types.Candidate(
388+
content=Content(
389+
role="model",
390+
parts=[Part.from_text(text="")],
391+
),
392+
finish_reason=types.FinishReason.STOP,
393+
)
394+
]
395+
)
396+
397+
async def mock_coro():
398+
return MockAsyncIterator(
399+
[response1, response2, response3, response4, response5]
400+
)
401+
402+
mock_client.aio.models.generate_content_stream.return_value = mock_coro()
403+
404+
responses = [
405+
resp
406+
async for resp in gemini_llm.generate_content_async(
407+
llm_request, stream=True
408+
)
409+
]
410+
411+
assert len(responses) == 4
412+
assert responses[0].partial is True
413+
assert responses[1].partial is True
414+
assert responses[2].partial is True
415+
assert responses[3].partial is True
416+
assert responses[4].partial is True
417+
assert responses[5].content.parts[0].text == "Think1Think2"
418+
assert responses[5].content.parts[0].thought is True
419+
assert responses[5].content.parts[1].text == "Answer."
420+
mock_client.aio.models.generate_content_stream.assert_called_once()
421+
422+
321423
@pytest.mark.asyncio
322424
async def test_connect(gemini_llm, llm_request):
323425
# Create a mock connection
@@ -619,8 +721,7 @@ def test_preprocess_request_handles_backend_specific_fields(
619721
expected_inline_display_name: Optional[str],
620722
expected_labels: Optional[str],
621723
):
622-
"""
623-
Tests that _preprocess_request correctly sanitizes fields based on the API backend.
724+
"""Tests that _preprocess_request correctly sanitizes fields based on the API backend.
624725
625726
- For GEMINI_API, it should remove 'display_name' from file/inline data
626727
and remove 'labels' from the config.

0 commit comments

Comments
 (0)