-
Notifications
You must be signed in to change notification settings - Fork 3.5k
Description
Checked other resources
- This is a bug, not a usage question. For questions, please use the LangChain Forum (https://forum.langchain.com/).
- I added a clear and detailed title that summarizes the issue.
- I read what a minimal reproducible example is (https://stackoverflow.com/help/minimal-reproducible-example).
- I included a self-contained, minimal example that demonstrates the issue INCLUDING all the relevant imports. The code run AS IS to reproduce the issue.
Example Code
from langgraph.graph import END, START, StateGraph
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.prebuilt import create_react_agent
from langgraph.prebuilt.chat_agent_executor import (
AgentState,
AgentStateWithStructuredResponse,
AgentStateWithStructuredResponsePydantic,
)
from pydantic import BaseModel
from typing_extensions import TypedDict
# class State(TypedDict):
class State(BaseModel):
a: str
# class PrivateState(AgentStateWithStructuredResponse):
class PrivateState(AgentStateWithStructuredResponsePydantic):
private_data: int
def pre_agent(state: State) -> PrivateState:
output = {"private_data": 100}
print(f"Entered node `pre_agent`:\n\tInput: {state}.\n\tReturned: {output}")
# s = PrivateState(private_data=100, structured_response={}, messages=[])
# return s
return output
agent = create_react_agent(
name="agent",
model="openai:gpt-5",
tools=[],
state_schema=PrivateState,
prompt="Say hello. and put your response in the structured response object",
response_format=State,
)
def post_agent(state: PrivateState) -> State:
# print(state)
# output = {"a": "set by post_agent"}
output = state["structured_response"]
print(f"Entered node `post_agent`:\n\tInput: {state}.\n\tReturned: {output}")
return output
builder = StateGraph(State).add_sequence([pre_agent, agent, post_agent])
builder.add_edge(START, "pre_agent")
graph = builder.compile()
response = graph.invoke(
{
"a": "set at start",
}
)
print()
print(f"Output of graph invocation: {response}")
Error Message and Stack Trace (if applicable)
# Stack Trace
# Invoke the graph with the initial state
> response = graph.invoke(
{
"a": "set at start",
}
)
test/test_llm.py:229:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/main.py:3026: in invoke
for chunk in self.stream(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/main.py:2647: in stream
for _ in runner.tick(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_runner.py:162: in tick
run_with_retry(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_retry.py:42: in run_with_retry
return task.proc.invoke(task.input, config)
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/_internal/_runnable.py:657: in invoke
input = context.run(step.invoke, input, config, **kwargs)
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/main.py:3026: in invoke
for chunk in self.stream(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/main.py:2647: in stream
for _ in runner.tick(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_runner.py:162: in tick
run_with_retry(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_retry.py:42: in run_with_retry
return task.proc.invoke(task.input, config)
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/_internal/_runnable.py:657: in invoke
input = context.run(step.invoke, input, config, **kwargs)
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/main.py:3026: in invoke
for chunk in self.stream(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/main.py:2644: in stream
while loop.tick():
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_loop.py:455: in tick
self.tasks = prepare_next_tasks(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_algo.py:470: in prepare_next_tasks
if task := prepare_single_task(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_algo.py:806: in prepare_single_task
val = _proc_input(
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/pregel/_algo.py:1054: in _proc_input
val = proc.mapper(val)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
schema = <class 'test.test_llm.test_private_state_agent.<locals>.PrivateState'>, input = {'messages': [], 'remaining_steps': 24}
def _coerce_state(schema: type[Any], input: dict[str, Any]) -> dict[str, Any]:
> return schema(**input)
E pydantic_core._pydantic_core.ValidationError: 2 validation errors for PrivateState
E structured_response
E Field required [type=missing, input_value={'messages': [], 'remaining_steps': 24}, input_type=dict]
E For further information visit https://errors.pydantic.dev/2.10/v/missing
E private_data
E Field required [type=missing, input_value={'messages': [], 'remaining_steps': 24}, input_type=dict]
E For further information visit https://errors.pydantic.dev/2.10/v/missing
../../software/miniconda/envs/car310/lib/python3.10/site-packages/langgraph/graph/state.py:1230: ValidationError
======================================================================= short test summary info ========================================================================
FAILED test/test_llm.py::test_private_state_agent - pydantic_core._pydantic_core.ValidationError: 2 validation errors for PrivateState
Description
When calling create_react_agent, I encountered a Pydantic validation error. The traceback shows it is caused by AgentStateWithStructuredResponsePydantic.
Expected behavior: The agent should return a valid structured response that matches the schema.
Actual behavior: Validation fails inside AgentStateWithStructuredResponsePydantic.
PS: The code works correctly if State
inherits from TypedDict
, and PrivateState
inherits from AgentStateWithStructuredResponse
.
System Info
System Information
OS: Linux
OS Version: #61~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Apr 15 17:03:15 UTC 2
Python Version: 3.10.16 (main, Dec 11 2024, 16:24:50) [GCC 11.2.0]
Package Information
langchain_core: 0.3.75
langchain: 0.3.27
langchain_community: 0.3.25
langsmith: 0.3.45
langchain_modelscope: Installed. No version info available.
langchain_ollama: 0.3.3
langchain_openai: 0.2.14
langchain_text_splitters: 0.3.11
langgraph_sdk: 0.2.4
Update 1: create_react_agent
does not retain private properties passed along from earlier nodes in the graph
Example Code:
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.prompts import SystemMessagePromptTemplate
from langgraph.graph import END, START, StateGraph
from langgraph.graph.message import AnyMessage, add_messages
from langgraph.prebuilt import create_react_agent
from langgraph.prebuilt.chat_agent_executor import (
AgentState,
AgentStateWithStructuredResponse,
AgentStateWithStructuredResponsePydantic,
)
from pydantic import BaseModel
from typing_extensions import TypedDict
class State(TypedDict):
# class State(BaseModel):
name: str
class PrivateState(AgentStateWithStructuredResponse):
# class PrivateState(AgentStateWithStructuredResponsePydantic):
id: int
name: str
class Response(TypedDict):
greeting: str
def pre_agent(state: State) -> PrivateState:
id = 65535
output = PrivateState(name=state["name"], id=id, messages=[])
print(f"[pre_agent] id set to {id}")
return output
def get_prompt(state):
if "id" not in state:
print("[agent/get prompt] id not loaded")
return [
SystemMessage("Say hello, and add his id in your greeting."),
HumanMessage(f"current state: {state}"),
]
def pre_model_hook(state):
if "id" in state:
print(f"[agent/pre_model_hook] id = {state['id']}")
else:
print("[agent/pre_model_hook] id not loaded")
def post_model_hook(state):
if "id" in state:
print(f"[agent/post_model_hook] id = {state['id']}")
else:
print("[agent/post_model_hook] id not loaded")
agent = create_react_agent(
name="agent",
model="openai:gpt-5",
tools=[],
state_schema=PrivateState,
prompt=get_prompt,
pre_model_hook=pre_model_hook,
post_model_hook=post_model_hook,
response_format=Response,
)
def post_agent(state: PrivateState) -> State:
print(f"[post_agent] {type(state)}")
print(f"[post_agent] state keys = {state.keys()}")
if "id" in state:
print(f"[post_agent] id = {state['id']}")
output = state["structured_response"]
print(f"[post_agent] output = {output}")
return output
builder = StateGraph(State).add_sequence([pre_agent, agent, post_agent])
builder.add_edge(START, "pre_agent")
graph = builder.compile()
response = graph.invoke(
{
"name": "Jack",
}
)
print()
print(f"Output of graph invocation: {response}")
Outputs:
[pre_agent] id set to 65535
[agent/pre_model_hook] id not loaded
[agent/get prompt] id not loaded
[agent/post_model_hook] id not loaded
[post_agent] <class 'dict'>
[post_agent] state keys = dict_keys(['messages', 'structured_response', 'id', 'name', 'remaining_steps'])
[post_agent] id = 65535
[post_agent] output = {'greeting': 'Hello, Jack! How can I assist you today?'}
Output of graph invocation: {'name': 'Jack'}
Description
Expected behavior: agent
should have access to private property id
.
Current behavior:
the private state is correctly constructed in pre_agent
node and private property id
can be accessed in the post_agent
node. However, it remains hidden from the agent
node.
Langgraph v1-alpha seems to have the same issue.