Skip to content

Commit f3b942d

Browse files
committed
Remove invoke methods and model validation from ReasoningAgent
Deleted the streaming and non-streaming invoke methods, as well as the model validation logic, from ReasoningAgentTemplate. Also updated the create_reasoning_agent function to use the correct endpoint parameter. These changes simplify the agent interface and remove redundant or unused code.
1 parent 810ddc6 commit f3b942d

File tree

1 file changed

+2
-91
lines changed

1 file changed

+2
-91
lines changed

src/backend/v4/magentic_agents/reasoning_agent.py

Lines changed: 2 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -78,14 +78,6 @@ def __init__(
7878

7979
self.logger = logging.getLogger(__name__)
8080

81-
# Validate reasoning model
82-
if self.model_deployment_name not in {"o1", "o1-mini", "o1-preview", "o3-mini"}:
83-
self.logger.warning(
84-
"Model '%s' may not support reasoning features. "
85-
"Recommended models: o1, o1-mini, o3-mini",
86-
self.model_deployment_name
87-
)
88-
8981
async def _after_open(self) -> None:
9082
"""Initialize Azure client and search after base setup."""
9183
try:
@@ -241,84 +233,6 @@ def _prepare_tools(self) -> list:
241233

242234
return tools
243235

244-
async def invoke(self, prompt: str):
245-
"""
246-
Stream model output for a prompt with optional search augmentation.
247-
248-
For reasoning models, this will include:
249-
- Reasoning content (thinking process)
250-
- Final answer content
251-
252-
Args:
253-
prompt: User prompt/question
254-
255-
Yields:
256-
ChatResponseUpdate objects with incremental updates
257-
"""
258-
if not self._client:
259-
raise RuntimeError("Agent not initialized; call open() first.")
260-
261-
# Augment instructions with search results if available
262-
instructions = await self._augment_with_search(prompt)
263-
264-
# Build message
265-
messages = [ChatMessage(role=Role.USER, text=prompt)]
266-
267-
# Prepare tools
268-
tools = self._prepare_tools()
269-
270-
try:
271-
# Stream response from reasoning model
272-
async for update in self._client.get_streaming_response(
273-
messages=messages,
274-
instructions=instructions,
275-
tools=tools if tools else None,
276-
tool_choice="auto" if tools else "none",
277-
temperature=1.0, # Reasoning models use fixed temperature
278-
):
279-
yield update
280-
281-
except Exception as ex:
282-
self.logger.error("Error during reasoning agent invocation: %s", ex)
283-
raise
284-
285-
async def invoke_non_streaming(self, prompt: str):
286-
"""
287-
Get complete response (non-streaming) with search augmentation.
288-
289-
Args:
290-
prompt: User prompt/question
291-
292-
Returns:
293-
ChatResponse with complete response
294-
"""
295-
if not self._client:
296-
raise RuntimeError("Agent not initialized; call open() first.")
297-
298-
# Augment instructions with search results
299-
instructions = await self._augment_with_search(prompt)
300-
301-
# Build message
302-
messages = [ChatMessage(role=Role.USER, text=prompt)]
303-
304-
# Prepare tools
305-
tools = self._prepare_tools()
306-
307-
try:
308-
# Get response from reasoning model
309-
response = await self._client.get_response(
310-
messages=messages,
311-
instructions=instructions,
312-
tools=tools if tools else None,
313-
tool_choice="auto" if tools else "none",
314-
temperature=1.0,
315-
)
316-
return response
317-
318-
except Exception as ex:
319-
self.logger.error("Error during reasoning agent invocation: %s", ex)
320-
raise
321-
322236
@property
323237
def client(self) -> Optional[AzureAIAgentClient]:
324238
"""Access to underlying client for compatibility."""
@@ -374,13 +288,10 @@ async def create_reasoning_agent(
374288
),
375289
)
376290
377-
async with agent:
378-
async for update in agent.invoke("Explain quantum entanglement"):
379-
print(update.text, end="")
380291
```
381292
"""
382293
# Get endpoint from env if not provided
383-
endpoint = azure_ai_project_endpoint or os.getenv("AZURE_AI_PROJECT_ENDPOINT")
294+
endpoint = azure_ai_project_endpoint
384295
if not endpoint:
385296
raise RuntimeError(
386297
"AZURE_AI_PROJECT_ENDPOINT must be provided or set as environment variable"
@@ -391,7 +302,7 @@ async def create_reasoning_agent(
391302
agent_description=agent_description,
392303
agent_instructions=agent_instructions,
393304
model_deployment_name=model_deployment_name,
394-
azure_ai_project_endpoint=endpoint,
305+
project_endpoint=endpoint,
395306
search_config=search_config,
396307
mcp_config=mcp_config,
397308
)

0 commit comments

Comments
 (0)