@@ -139,9 +139,9 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> MessageV1:
139
139
elif role == "assistant" :
140
140
# Fix for azure
141
141
# Also OpenAI returns None for tool invocations
142
- content : list [types .ContentBlock ] = [
143
- { "type" : "text" , "text" : _dict .get ("content" , "" ) or "" }
144
- ]
142
+ content : list [types .ContentBlock ] = []
143
+ if ( oai_content := _dict .get ("content" )) and isinstance ( oai_content , str ):
144
+ content . append ({ "type" : "text" , "text" : oai_content })
145
145
tool_calls = []
146
146
invalid_tool_calls = []
147
147
if raw_tool_calls := _dict .get ("tool_calls" ):
@@ -314,7 +314,9 @@ def _convert_delta_to_message_chunk(_dict: Mapping[str, Any]) -> AIMessageChunkV
314
314
except KeyError :
315
315
pass
316
316
317
- return AIMessageChunkV1 (content = content , id = id_ , tool_call_chunks = tool_call_chunks )
317
+ return AIMessageChunkV1 (
318
+ content = content or [], id = id_ , tool_call_chunks = tool_call_chunks
319
+ )
318
320
319
321
320
322
def _update_token_usage (
@@ -838,7 +840,7 @@ def _stream_responses(
838
840
if generation_chunk :
839
841
if run_manager :
840
842
run_manager .on_llm_new_token (
841
- generation_chunk .text or "" , chunk = generation_chunk
843
+ generation_chunk .text , chunk = generation_chunk
842
844
)
843
845
is_first_chunk = False
844
846
yield generation_chunk
@@ -888,7 +890,7 @@ async def _astream_responses(
888
890
if generation_chunk :
889
891
if run_manager :
890
892
await run_manager .on_llm_new_token (
891
- generation_chunk .text or "" , chunk = generation_chunk
893
+ generation_chunk .text , chunk = generation_chunk
892
894
)
893
895
is_first_chunk = False
894
896
yield generation_chunk
@@ -959,9 +961,7 @@ def _stream(
959
961
logprobs = message_chunk .response_metadata .get ("logprobs" )
960
962
if run_manager :
961
963
run_manager .on_llm_new_token (
962
- message_chunk .text or "" ,
963
- chunk = message_chunk ,
964
- logprobs = logprobs ,
964
+ message_chunk .text , chunk = message_chunk , logprobs = logprobs
965
965
)
966
966
is_first_chunk = False
967
967
yield message_chunk
@@ -971,9 +971,7 @@ def _stream(
971
971
final_completion = response .get_final_completion ()
972
972
message_chunk = self ._get_message_chunk_from_completion (final_completion )
973
973
if run_manager :
974
- run_manager .on_llm_new_token (
975
- message_chunk .text or "" , chunk = message_chunk
976
- )
974
+ run_manager .on_llm_new_token (message_chunk .text , chunk = message_chunk )
977
975
yield message_chunk
978
976
979
977
def _invoke (
@@ -1187,9 +1185,7 @@ async def _astream(
1187
1185
logprobs = message_chunk .response_metadata .get ("logprobs" )
1188
1186
if run_manager :
1189
1187
await run_manager .on_llm_new_token (
1190
- message_chunk .text or "" ,
1191
- chunk = message_chunk ,
1192
- logprobs = logprobs ,
1188
+ message_chunk .text , chunk = message_chunk , logprobs = logprobs
1193
1189
)
1194
1190
is_first_chunk = False
1195
1191
yield message_chunk
@@ -1200,7 +1196,7 @@ async def _astream(
1200
1196
message_chunk = self ._get_message_chunk_from_completion (final_completion )
1201
1197
if run_manager :
1202
1198
await run_manager .on_llm_new_token (
1203
- message_chunk .text or "" , chunk = message_chunk
1199
+ message_chunk .text , chunk = message_chunk
1204
1200
)
1205
1201
yield message_chunk
1206
1202
@@ -1940,7 +1936,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
1940
1936
.. code-block:: python
1941
1937
1942
1938
for chunk in llm.stream(messages):
1943
- print(chunk.text() , end="")
1939
+ print(chunk.text, end="")
1944
1940
1945
1941
.. code-block:: python
1946
1942
@@ -2165,7 +2161,7 @@ class GetPopulation(BaseModel):
2165
2161
2166
2162
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
2167
2163
response = llm.invoke("Hi, I'm Bob.")
2168
- response.text()
2164
+ response.text
2169
2165
2170
2166
.. code-block:: python
2171
2167
@@ -2177,7 +2173,7 @@ class GetPopulation(BaseModel):
2177
2173
"What is my name?",
2178
2174
previous_response_id=response.response_metadata["id"],
2179
2175
)
2180
- second_response.text()
2176
+ second_response.text
2181
2177
2182
2178
.. code-block:: python
2183
2179
@@ -2226,7 +2222,7 @@ class GetPopulation(BaseModel):
2226
2222
response = llm.invoke("What is 3^3?")
2227
2223
2228
2224
# Response text
2229
- print(f"Output: {response.text() }")
2225
+ print(f"Output: {response.text}")
2230
2226
2231
2227
# Reasoning summaries
2232
2228
for block in response.content:
@@ -3799,7 +3795,7 @@ def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
3799
3795
and (content_block .get ("index" ) or - 1 ) > current_index # type: ignore[operator]
3800
3796
):
3801
3797
# blocks were added for v1
3802
- current_index = content_block ["index" ]
3798
+ current_index = cast ( int , content_block ["index" ])
3803
3799
3804
3800
message = AIMessageChunkV1 (
3805
3801
content = content_v1 ,
0 commit comments