@@ -318,6 +318,108 @@ async def mock_coro():
318
318
mock_client .aio .models .generate_content_stream .assert_called_once ()
319
319
320
320
321
+ @pytest .mark .asyncio
322
+ async def test_generate_content_async_stream_handles_empty_text (
323
+ gemini_llm , llm_request
324
+ ):
325
+ with mock .patch .object (gemini_llm , "api_client" ) as mock_client :
326
+
327
+ class MockAsyncIterator :
328
+
329
+ def __init__ (self , seq ):
330
+ self ._iter = iter (seq )
331
+
332
+ def __aiter__ (self ):
333
+ return self
334
+
335
+ async def __anext__ (self ):
336
+ try :
337
+ return next (self ._iter )
338
+ except StopIteration :
339
+ raise StopAsyncIteration
340
+
341
+ response1 = types .GenerateContentResponse (
342
+ candidates = [
343
+ types .Candidate (
344
+ content = Content (
345
+ role = "model" ,
346
+ parts = [Part (text = "Think1" , thought = True )],
347
+ ),
348
+ finish_reason = None ,
349
+ )
350
+ ]
351
+ )
352
+ response2 = types .GenerateContentResponse (
353
+ candidates = [
354
+ types .Candidate (
355
+ content = Content (
356
+ role = "model" ,
357
+ parts = [Part (text = "" , thought = True )],
358
+ ),
359
+ finish_reason = None ,
360
+ )
361
+ ]
362
+ )
363
+ response3 = types .GenerateContentResponse (
364
+ candidates = [
365
+ types .Candidate (
366
+ content = Content (
367
+ role = "model" ,
368
+ parts = [Part (text = "Think2" , thought = True )],
369
+ ),
370
+ finish_reason = None ,
371
+ )
372
+ ]
373
+ )
374
+ response4 = types .GenerateContentResponse (
375
+ candidates = [
376
+ types .Candidate (
377
+ content = Content (
378
+ role = "model" ,
379
+ parts = [Part .from_text (text = "Answer." )],
380
+ ),
381
+ finish_reason = None ,
382
+ )
383
+ ]
384
+ )
385
+ response5 = types .GenerateContentResponse (
386
+ candidates = [
387
+ types .Candidate (
388
+ content = Content (
389
+ role = "model" ,
390
+ parts = [Part .from_text (text = "" )],
391
+ ),
392
+ finish_reason = types .FinishReason .STOP ,
393
+ )
394
+ ]
395
+ )
396
+
397
+ async def mock_coro ():
398
+ return MockAsyncIterator (
399
+ [response1 , response2 , response3 , response4 , response5 ]
400
+ )
401
+
402
+ mock_client .aio .models .generate_content_stream .return_value = mock_coro ()
403
+
404
+ responses = [
405
+ resp
406
+ async for resp in gemini_llm .generate_content_async (
407
+ llm_request , stream = True
408
+ )
409
+ ]
410
+
411
+ assert len (responses ) == 4
412
+ assert responses [0 ].partial is True
413
+ assert responses [1 ].partial is True
414
+ assert responses [2 ].partial is True
415
+ assert responses [3 ].partial is True
416
+ assert responses [4 ].partial is True
417
+ assert responses [5 ].content .parts [0 ].text == "Think1Think2"
418
+ assert responses [5 ].content .parts [0 ].thought is True
419
+ assert responses [5 ].content .parts [1 ].text == "Answer."
420
+ mock_client .aio .models .generate_content_stream .assert_called_once ()
421
+
422
+
321
423
@pytest .mark .asyncio
322
424
async def test_connect (gemini_llm , llm_request ):
323
425
# Create a mock connection
@@ -619,8 +721,7 @@ def test_preprocess_request_handles_backend_specific_fields(
619
721
expected_inline_display_name : Optional [str ],
620
722
expected_labels : Optional [str ],
621
723
):
622
- """
623
- Tests that _preprocess_request correctly sanitizes fields based on the API backend.
724
+ """Tests that _preprocess_request correctly sanitizes fields based on the API backend.
624
725
625
726
- For GEMINI_API, it should remove 'display_name' from file/inline data
626
727
and remove 'labels' from the config.
0 commit comments