We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d8a7eda commit 921573eCopy full SHA for 921573e
libs/core/langchain_core/language_models/chat_models.py
@@ -412,8 +412,8 @@ def _should_stream(
412
**kwargs: Any,
413
) -> bool:
414
"""Determine if a given model call should hit the streaming API."""
415
- sync_not_implemented = type(self)._stream == BaseChatModel._stream
416
- async_not_implemented = type(self)._astream == BaseChatModel._astream
+ sync_not_implemented = type(self)._stream == BaseChatModel._stream # noqa: SLF001
+ async_not_implemented = type(self)._astream == BaseChatModel._astream # noqa: SLF001
417
418
# Check if streaming is implemented.
419
if (not async_api) and sync_not_implemented:
libs/core/langchain_core/language_models/llms.py
@@ -522,7 +522,7 @@ def stream(
522
stop: Optional[list[str]] = None,
523
524
) -> Iterator[str]:
525
- if type(self)._stream == BaseLLM._stream:
+ if type(self)._stream == BaseLLM._stream: # noqa: SLF001
526
# model doesn't implement streaming, so use default implementation
527
yield self.invoke(input, config=config, stop=stop, **kwargs)
528
else:
@@ -590,8 +590,8 @@ async def astream(
590
591
) -> AsyncIterator[str]:
592
if (
593
- type(self)._astream is BaseLLM._astream
594
- and type(self)._stream is BaseLLM._stream
+ type(self)._astream is BaseLLM._astream # noqa: SLF001
+ and type(self)._stream is BaseLLM._stream # noqa: SLF001
595
):
596
yield await self.ainvoke(input, config=config, stop=stop, **kwargs)
597
return
libs/core/langchain_core/runnables/configurable.py
@@ -131,7 +131,7 @@ def prepare(
131
"""
132
runnable: Runnable[Input, Output] = self
133
while isinstance(runnable, DynamicRunnable):
134
- runnable, config = runnable._prepare(merge_configs(runnable.config, config))
+ runnable, config = runnable._prepare(merge_configs(runnable.config, config)) # noqa: SLF001
135
return runnable, cast("RunnableConfig", config)
136
137
@abstractmethod
libs/core/langchain_core/tools/base.py
@@ -845,7 +845,7 @@ async def arun(
845
child_config = patch_config(config, callbacks=run_manager.get_child())
846
with set_config_context(child_config) as context:
847
func_to_check = (
848
- self._run if self.__class__._arun is BaseTool._arun else self._arun
+ self._run if self.__class__._arun is BaseTool._arun else self._arun # noqa: SLF001
849
)
850
if signature(func_to_check).parameters.get("run_manager"):
851
tool_kwargs["run_manager"] = run_manager
libs/core/langchain_core/tracers/langchain.py
@@ -50,8 +50,8 @@ def log_error_once(method: str, exception: Exception) -> None:
50
51
def wait_for_all_tracers() -> None:
52
"""Wait for all tracers to finish."""
53
- if rt._CLIENT is not None:
54
- rt._CLIENT.flush()
+ if rt._CLIENT is not None: # noqa: SLF001
+ rt._CLIENT.flush() # noqa: SLF001
55
56
57
def get_client() -> Client:
@@ -123,8 +123,8 @@ def _start_trace(self, run: Run) -> None:
123
run.tags = self.tags.copy()
124
125
super()._start_trace(run)
126
- if run._client is None:
127
- run._client = self.client # type: ignore[misc]
+ if run.ls_client is None:
+ run.ls_client = self.client
128
129
def on_chat_model_start(
130
self,
libs/core/langchain_core/utils/mustache.py
@@ -379,7 +379,7 @@ def _get_key(
379
try:
380
# This allows for custom falsy data types
381
# https://github.com/noahmorrison/chevron/issues/35
382
- if resolved_scope._CHEVRON_return_scope_when_falsy: # type: ignore[union-attr]
+ if resolved_scope._CHEVRON_return_scope_when_falsy: # type: ignore[union-attr] # noqa: SLF001
383
return resolved_scope
384
except AttributeError:
385
if resolved_scope in (0, False):
libs/core/pyproject.toml
@@ -7,7 +7,7 @@ authors = []
7
license = {text = "MIT"}
8
requires-python = ">=3.9"
9
dependencies = [
10
- "langsmith<0.4,>=0.1.125",
+ "langsmith<0.4,>=0.1.126",
11
"tenacity!=8.4.0,<10.0.0,>=8.1.0",
12
"jsonpatch<2.0,>=1.33",
13
"PyYAML>=5.3",
@@ -105,7 +105,6 @@ ignore = [
105
"ERA",
106
"PLR2004",
107
"RUF",
108
- "SLF",
109
]
110
flake8-type-checking.runtime-evaluated-base-classes = ["pydantic.BaseModel","langchain_core.load.serializable.Serializable","langchain_core.runnables.base.RunnableSerializable"]
111
flake8-annotations.allow-star-arg-any = true
@@ -132,5 +131,5 @@ classmethod-decorators = [ "classmethod", "langchain_core.utils.pydantic.pre_ini
"tests/unit_tests/runnables/test_runnable.py" = [ "E501",]
"tests/unit_tests/runnables/test_graph.py" = [ "E501",]
"tests/unit_tests/test_tools.py" = [ "ARG",]
-"tests/**" = [ "D", "S",]
+"tests/**" = [ "D", "S", "SLF",]
"scripts/**" = [ "INP", "S",]
0 commit comments