Skip to content

Commit 2a268f1

Browse files
authored
fix(openai): use empty list in v1 messages instead of empty string for chat completions tool calls (#32392)
1 parent ff3153c commit 2a268f1

File tree

1 file changed

+17
-21
lines changed
  • libs/partners/openai/langchain_openai/chat_models

1 file changed

+17
-21
lines changed

libs/partners/openai/langchain_openai/chat_models/base_v1.py

Lines changed: 17 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -139,9 +139,9 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> MessageV1:
139139
elif role == "assistant":
140140
# Fix for azure
141141
# Also OpenAI returns None for tool invocations
142-
content: list[types.ContentBlock] = [
143-
{"type": "text", "text": _dict.get("content", "") or ""}
144-
]
142+
content: list[types.ContentBlock] = []
143+
if (oai_content := _dict.get("content")) and isinstance(oai_content, str):
144+
content.append({"type": "text", "text": oai_content})
145145
tool_calls = []
146146
invalid_tool_calls = []
147147
if raw_tool_calls := _dict.get("tool_calls"):
@@ -314,7 +314,9 @@ def _convert_delta_to_message_chunk(_dict: Mapping[str, Any]) -> AIMessageChunkV
314314
except KeyError:
315315
pass
316316

317-
return AIMessageChunkV1(content=content, id=id_, tool_call_chunks=tool_call_chunks)
317+
return AIMessageChunkV1(
318+
content=content or [], id=id_, tool_call_chunks=tool_call_chunks
319+
)
318320

319321

320322
def _update_token_usage(
@@ -838,7 +840,7 @@ def _stream_responses(
838840
if generation_chunk:
839841
if run_manager:
840842
run_manager.on_llm_new_token(
841-
generation_chunk.text or "", chunk=generation_chunk
843+
generation_chunk.text, chunk=generation_chunk
842844
)
843845
is_first_chunk = False
844846
yield generation_chunk
@@ -888,7 +890,7 @@ async def _astream_responses(
888890
if generation_chunk:
889891
if run_manager:
890892
await run_manager.on_llm_new_token(
891-
generation_chunk.text or "", chunk=generation_chunk
893+
generation_chunk.text, chunk=generation_chunk
892894
)
893895
is_first_chunk = False
894896
yield generation_chunk
@@ -959,9 +961,7 @@ def _stream(
959961
logprobs = message_chunk.response_metadata.get("logprobs")
960962
if run_manager:
961963
run_manager.on_llm_new_token(
962-
message_chunk.text or "",
963-
chunk=message_chunk,
964-
logprobs=logprobs,
964+
message_chunk.text, chunk=message_chunk, logprobs=logprobs
965965
)
966966
is_first_chunk = False
967967
yield message_chunk
@@ -971,9 +971,7 @@ def _stream(
971971
final_completion = response.get_final_completion()
972972
message_chunk = self._get_message_chunk_from_completion(final_completion)
973973
if run_manager:
974-
run_manager.on_llm_new_token(
975-
message_chunk.text or "", chunk=message_chunk
976-
)
974+
run_manager.on_llm_new_token(message_chunk.text, chunk=message_chunk)
977975
yield message_chunk
978976

979977
def _invoke(
@@ -1187,9 +1185,7 @@ async def _astream(
11871185
logprobs = message_chunk.response_metadata.get("logprobs")
11881186
if run_manager:
11891187
await run_manager.on_llm_new_token(
1190-
message_chunk.text or "",
1191-
chunk=message_chunk,
1192-
logprobs=logprobs,
1188+
message_chunk.text, chunk=message_chunk, logprobs=logprobs
11931189
)
11941190
is_first_chunk = False
11951191
yield message_chunk
@@ -1200,7 +1196,7 @@ async def _astream(
12001196
message_chunk = self._get_message_chunk_from_completion(final_completion)
12011197
if run_manager:
12021198
await run_manager.on_llm_new_token(
1203-
message_chunk.text or "", chunk=message_chunk
1199+
message_chunk.text, chunk=message_chunk
12041200
)
12051201
yield message_chunk
12061202

@@ -1940,7 +1936,7 @@ class ChatOpenAI(BaseChatOpenAI): # type: ignore[override]
19401936
.. code-block:: python
19411937
19421938
for chunk in llm.stream(messages):
1943-
print(chunk.text(), end="")
1939+
print(chunk.text, end="")
19441940
19451941
.. code-block:: python
19461942
@@ -2165,7 +2161,7 @@ class GetPopulation(BaseModel):
21652161
21662162
llm = ChatOpenAI(model="gpt-4.1-mini", use_responses_api=True)
21672163
response = llm.invoke("Hi, I'm Bob.")
2168-
response.text()
2164+
response.text
21692165
21702166
.. code-block:: python
21712167
@@ -2177,7 +2173,7 @@ class GetPopulation(BaseModel):
21772173
"What is my name?",
21782174
previous_response_id=response.response_metadata["id"],
21792175
)
2180-
second_response.text()
2176+
second_response.text
21812177
21822178
.. code-block:: python
21832179
@@ -2226,7 +2222,7 @@ class GetPopulation(BaseModel):
22262222
response = llm.invoke("What is 3^3?")
22272223
22282224
# Response text
2229-
print(f"Output: {response.text()}")
2225+
print(f"Output: {response.text}")
22302226
22312227
# Reasoning summaries
22322228
for block in response.content:
@@ -3799,7 +3795,7 @@ def _advance(output_idx: int, sub_idx: Optional[int] = None) -> None:
37993795
and (content_block.get("index") or -1) > current_index # type: ignore[operator]
38003796
):
38013797
# blocks were added for v1
3802-
current_index = content_block["index"]
3798+
current_index = cast(int, content_block["index"])
38033799

38043800
message = AIMessageChunkV1(
38053801
content=content_v1,

0 commit comments

Comments
 (0)