Skip to content

Commit 1a09da7

Browse files
authored
Merge branch 'wip-v1.0' into vb/add-on-tool-error
2 parents 39da478 + ced9fc2 commit 1a09da7

File tree

2 files changed

+30
-26
lines changed

2 files changed

+30
-26
lines changed

libs/core/langchain_core/callbacks/base.py

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,9 @@ def on_llm_new_token(
7171
parent_run_id: Optional[UUID] = None,
7272
**kwargs: Any,
7373
) -> Any:
74-
"""Run on new LLM token. Only available when streaming is enabled.
74+
"""Run on new output token. Only available when streaming is enabled.
75+
76+
For both chat models and non-chat models (legacy LLMs).
7577
7678
Args:
7779
token (str): The new token.
@@ -243,7 +245,7 @@ def on_llm_start(
243245
) -> Any:
244246
"""Run when LLM starts running.
245247
246-
.. ATTENTION::
248+
.. warning::
247249
This method is called for non-chat models (regular LLMs). If you're
248250
implementing a handler for a chat model, you should use
249251
``on_chat_model_start`` instead.
@@ -271,8 +273,9 @@ def on_chat_model_start(
271273
) -> Any:
272274
"""Run when a chat model starts running.
273275
274-
**ATTENTION**: This method is called for chat models. If you're implementing
275-
a handler for a non-chat model, you should use ``on_llm_start`` instead.
276+
.. warning::
277+
This method is called for chat models. If you're implementing a handler for
278+
a non-chat model, you should use ``on_llm_start`` instead.
276279
277280
Args:
278281
serialized (dict[str, Any]): The serialized chat model.
@@ -489,9 +492,9 @@ async def on_llm_start(
489492
metadata: Optional[dict[str, Any]] = None,
490493
**kwargs: Any,
491494
) -> None:
492-
"""Run when LLM starts running.
495+
"""Run when the model starts running.
493496
494-
.. ATTENTION::
497+
.. warning::
495498
This method is called for non-chat models (regular LLMs). If you're
496499
implementing a handler for a chat model, you should use
497500
``on_chat_model_start`` instead.
@@ -519,8 +522,9 @@ async def on_chat_model_start(
519522
) -> Any:
520523
"""Run when a chat model starts running.
521524
522-
**ATTENTION**: This method is called for chat models. If you're implementing
523-
a handler for a non-chat model, you should use ``on_llm_start`` instead.
525+
.. warning::
526+
This method is called for chat models. If you're implementing a handler for
527+
a non-chat model, you should use ``on_llm_start`` instead.
524528
525529
Args:
526530
serialized (dict[str, Any]): The serialized chat model.
@@ -546,7 +550,9 @@ async def on_llm_new_token(
546550
tags: Optional[list[str]] = None,
547551
**kwargs: Any,
548552
) -> None:
549-
"""Run on new LLM token. Only available when streaming is enabled.
553+
"""Run on new output token. Only available when streaming is enabled.
554+
555+
For both chat models and non-chat models (legacy LLMs).
550556
551557
Args:
552558
token (str): The new token.
@@ -567,7 +573,7 @@ async def on_llm_end(
567573
tags: Optional[list[str]] = None,
568574
**kwargs: Any,
569575
) -> None:
570-
"""Run when LLM ends running.
576+
"""Run when the model ends running.
571577
572578
Args:
573579
response (LLMResult): The response which was generated.
@@ -867,7 +873,7 @@ async def on_custom_event(
867873
metadata: Optional[dict[str, Any]] = None,
868874
**kwargs: Any,
869875
) -> None:
870-
"""Override to define a handler for a custom event.
876+
"""Override to define a handler for custom events.
871877
872878
Args:
873879
name: The name of the custom event.

libs/core/langchain_core/tracers/event_stream.py

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ async def tap_output_aiter(
224224
yield chunk
225225

226226
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
227-
"""Tap the output aiter.
227+
"""Tap the output iter.
228228
229229
Args:
230230
run_id: The ID of the run.
@@ -315,7 +315,7 @@ async def on_chat_model_start(
315315
name: Optional[str] = None,
316316
**kwargs: Any,
317317
) -> None:
318-
"""Start a trace for an LLM run."""
318+
"""Start a trace for a chat model run."""
319319
name_ = _assign_name(name, serialized)
320320
run_type = "chat_model"
321321

@@ -357,7 +357,7 @@ async def on_llm_start(
357357
name: Optional[str] = None,
358358
**kwargs: Any,
359359
) -> None:
360-
"""Start a trace for an LLM run."""
360+
"""Start a trace for a (non-chat model) LLM run."""
361361
name_ = _assign_name(name, serialized)
362362
run_type = "llm"
363363

@@ -421,6 +421,10 @@ async def on_llm_new_token(
421421
parent_run_id: Optional[UUID] = None,
422422
**kwargs: Any,
423423
) -> None:
424+
"""Run on new output token. Only available when streaming is enabled.
425+
426+
For both chat models and non-chat models (legacy LLMs).
427+
"""
424428
run_info = self.run_map.get(run_id)
425429
chunk_: Union[GenerationChunk, BaseMessageChunk]
426430

@@ -466,17 +470,15 @@ async def on_llm_new_token(
466470
async def on_llm_end(
467471
self, response: LLMResult, *, run_id: UUID, **kwargs: Any
468472
) -> None:
469-
"""End a trace for an LLM run.
473+
"""End a trace for a model run.
470474
471-
Args:
472-
response (LLMResult): The response which was generated.
473-
run_id (UUID): The run ID. This is the ID of the current run.
475+
For both chat models and non-chat models (legacy LLMs).
474476
475477
Raises:
476478
ValueError: If the run type is not ``'llm'`` or ``'chat_model'``.
477479
"""
478480
run_info = self.run_map.pop(run_id)
479-
inputs_ = run_info["inputs"]
481+
inputs_ = run_info.get("inputs")
480482

481483
generations: Union[list[list[GenerationChunk]], list[list[ChatGenerationChunk]]]
482484
output: Union[dict, BaseMessage] = {}
@@ -705,10 +707,6 @@ async def on_tool_error(
705707
async def on_tool_end(self, output: Any, *, run_id: UUID, **kwargs: Any) -> None:
706708
"""End a trace for a tool run.
707709
708-
Args:
709-
output: The output of the tool.
710-
run_id: The run ID. This is the ID of the current run.
711-
712710
Raises:
713711
AssertionError: If the run ID is a tool call and does not have inputs
714712
"""
@@ -786,7 +784,7 @@ async def on_retriever_end(
786784
"event": "on_retriever_end",
787785
"data": {
788786
"output": documents,
789-
"input": run_info["inputs"],
787+
"input": run_info.get("inputs"),
790788
},
791789
"run_id": str(run_id),
792790
"name": run_info["name"],
@@ -898,12 +896,12 @@ async def _astream_events_implementation_v1(
898896
# Usually they will NOT be available for components that operate
899897
# on streams, since those components stream the input and
900898
# don't know its final value until the end of the stream.
901-
inputs = log_entry["inputs"]
899+
inputs = log_entry.get("inputs")
902900
if inputs is not None:
903901
data["input"] = inputs
904902

905903
if event_type == "end":
906-
inputs = log_entry["inputs"]
904+
inputs = log_entry.get("inputs")
907905
if inputs is not None:
908906
data["input"] = inputs
909907

0 commit comments

Comments
 (0)