Skip to content

Commit ea83343

Browse files
authored
Add support for different models using litellm (#6)
1 parent 78ed361 commit ea83343

33 files changed

+2422
-658
lines changed

config/gni/devtools_grd_files.gni

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -608,10 +608,13 @@ grd_files_release_sources = [
608608
"front_end/panels/ai_chat/core/ConfigurableGraph.js",
609609
"front_end/panels/ai_chat/core/GraphConfigs.js",
610610
"front_end/panels/ai_chat/core/OpenAIClient.js",
611+
"front_end/panels/ai_chat/core/LiteLLMClient.js",
612+
"front_end/panels/ai_chat/core/UnifiedLLMClient.js",
611613
"front_end/panels/ai_chat/core/BaseOrchestratorAgent.js",
612614
"front_end/panels/ai_chat/core/PageInfoManager.js",
613615
"front_end/panels/ai_chat/core/AgentNodes.js",
614616
"front_end/panels/ai_chat/core/ChatOpenAI.js",
617+
"front_end/panels/ai_chat/core/ChatLiteLLM.js",
615618
"front_end/panels/ai_chat/core/GraphHelpers.js",
616619
"front_end/panels/ai_chat/core/StateGraph.js",
617620
"front_end/panels/ai_chat/tools/Tools.js",

front_end/panels/ai_chat/BUILD.gn

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,14 @@ devtools_module("ai_chat") {
2525
"core/AgentService.ts",
2626
"core/GraphConfigs.ts",
2727
"core/OpenAIClient.ts",
28+
"core/LiteLLMClient.ts",
29+
"core/UnifiedLLMClient.ts",
2830
"core/ConfigurableGraph.ts",
2931
"core/BaseOrchestratorAgent.ts",
3032
"core/PageInfoManager.ts",
3133
"core/AgentNodes.ts",
3234
"core/ChatOpenAI.ts",
35+
"core/ChatLiteLLM.ts",
3336
"core/GraphHelpers.ts",
3437
"core/StateGraph.ts",
3538
"tools/Tools.ts",
@@ -77,11 +80,14 @@ _ai_chat_sources = [
7780
"core/AgentService.ts",
7881
"core/GraphConfigs.ts",
7982
"core/OpenAIClient.ts",
83+
"core/LiteLLMClient.ts",
84+
"core/UnifiedLLMClient.ts",
8085
"core/ConfigurableGraph.ts",
8186
"core/BaseOrchestratorAgent.ts",
8287
"core/PageInfoManager.ts",
8388
"core/AgentNodes.ts",
8489
"core/ChatOpenAI.ts",
90+
"core/ChatLiteLLM.ts",
8591
"core/GraphHelpers.ts",
8692
"core/StateGraph.ts",
8793
"tools/Tools.ts",

front_end/panels/ai_chat/agent_framework/AgentRunner.ts

Lines changed: 38 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,13 @@
22
// Use of this source code is governed by a BSD-style license that can be
33
// found in the LICENSE file.
44

5-
import { Tool } from '../tools/Tools.js';
6-
import { OpenAIClient, OpenAIResponse } from '../core/OpenAIClient.js';
7-
import { ChatMessage, ChatMessageEntity, ModelChatMessage, ToolResultMessage } from '../ui/ChatView.js';
85
import { ChatPromptFormatter } from '../core/Graph.js';
96
import { enhancePromptWithPageContext } from '../core/PageInfoManager.js';
10-
import { ConfigurableAgentArgs, ConfigurableAgentResult, AgentRunTerminationReason, ConfigurableAgentTool, ToolRegistry, HandoffConfig, HandoffTrigger /*, HandoffContextTransform, ContextFilterRegistry*/ } from './ConfigurableAgentTool.js';
7+
import { UnifiedLLMClient, type UnifiedLLMResponse, type ParsedLLMAction } from '../core/UnifiedLLMClient.js';
8+
import type { Tool } from '../tools/Tools.js';
9+
import { ChatMessageEntity, type ChatMessage, type ModelChatMessage, type ToolResultMessage } from '../ui/ChatView.js';
10+
11+
import { ConfigurableAgentTool, ToolRegistry, HandoffTrigger, type ConfigurableAgentArgs, type ConfigurableAgentResult, type AgentRunTerminationReason, type HandoffConfig /* , HandoffContextTransform, ContextFilterRegistry*/ } from './ConfigurableAgentTool.js';
1112

1213
/**
1314
* Configuration for the AgentRunner
@@ -16,7 +17,7 @@ export interface AgentRunnerConfig {
1617
apiKey: string;
1718
modelName: string;
1819
systemPrompt: string;
19-
tools: Tool<any, any>[];
20+
tools: Array<Tool<any, any>>;
2021
maxIterations: number;
2122
temperature?: number;
2223
}
@@ -103,11 +104,13 @@ export class AgentRunner {
103104

104105
// Enhance the target agent's system prompt with page context
105106
const enhancedSystemPrompt = await enhancePromptWithPageContext(targetConfig.systemPrompt);
106-
107+
107108
// Construct Runner Config & Hooks for the target agent
108109
const targetRunnerConfig: AgentRunnerConfig = {
109-
apiKey: apiKey,
110-
modelName: targetConfig.modelName || defaultModelName,
110+
apiKey,
111+
modelName: typeof targetConfig.modelName === 'function'
112+
? targetConfig.modelName()
113+
: (targetConfig.modelName || defaultModelName),
111114
systemPrompt: enhancedSystemPrompt,
112115
tools: targetConfig.tools
113116
.map(toolName => ToolRegistry.getRegisteredTool(toolName))
@@ -153,21 +156,21 @@ export class AgentRunner {
153156
intermediateSteps: combinedIntermediateSteps,
154157
terminationReason: handoffResult.terminationReason || 'handed_off',
155158
};
156-
} else {
157-
// Otherwise (default), omit the target's intermediate steps
158-
console.log(`[AgentRunner] Omitting intermediateSteps from ${targetAgentTool.name} based on its config (default or flag set to false).`);
159-
// Return result from target, ensuring intermediateSteps are omitted
160-
const finalResult = {
161-
...handoffResult,
162-
terminationReason: handoffResult.terminationReason || 'handed_off',
163-
};
164-
// Explicitly delete intermediateSteps if they somehow exist on handoffResult (shouldn't due to target config)
165-
delete finalResult.intermediateSteps;
166-
return finalResult;
167159
}
160+
// Otherwise (default), omit the target's intermediate steps
161+
console.log(`[AgentRunner] Omitting intermediateSteps from ${targetAgentTool.name} based on its config (default or flag set to false).`);
162+
// Return result from target, ensuring intermediateSteps are omitted
163+
const finalResult = {
164+
...handoffResult,
165+
terminationReason: handoffResult.terminationReason || 'handed_off',
166+
};
167+
// Explicitly delete intermediateSteps if they somehow exist on handoffResult (shouldn't due to target config)
168+
delete finalResult.intermediateSteps;
169+
return finalResult;
170+
168171
}
169172

170-
public static async run(
173+
static async run(
171174
initialMessages: ChatMessage[],
172175
args: ConfigurableAgentArgs,
173176
config: AgentRunnerConfig,
@@ -195,7 +198,7 @@ export class AgentRunner {
195198
}));
196199

197200
// Add handoff tools based on the executing agent's config
198-
if (executingAgent && executingAgent.config.handoffs) {
201+
if (executingAgent?.config.handoffs) {
199202
// Iterate over the configured handoffs
200203
for (const handoffConfig of executingAgent.config.handoffs) {
201204
// Only add handoffs triggered by LLM tool calls to the schema
@@ -226,7 +229,7 @@ export class AgentRunner {
226229

227230
for (iteration = 0; iteration < maxIterations; iteration++) {
228231
console.log(`[AgentRunner] ${agentName} Iteration ${iteration + 1}/${maxIterations}`);
229-
232+
230233
// Prepare prompt and call LLM
231234
const iterationInfo = `
232235
## Current Progress
@@ -238,10 +241,10 @@ export class AgentRunner {
238241
const currentSystemPrompt = await enhancePromptWithPageContext(systemPrompt + iterationInfo);
239242

240243
const promptText = promptFormatter.format({ messages });
241-
let openAIResponse: OpenAIResponse;
244+
let llmResponse: UnifiedLLMResponse;
242245
try {
243246
console.log(`[AgentRunner] ${agentName} Calling LLM. Prompt size: ${promptText.length}`);
244-
openAIResponse = await OpenAIClient.callOpenAI(
247+
llmResponse = await UnifiedLLMClient.callLLMWithResponse(
245248
apiKey,
246249
modelName,
247250
promptText,
@@ -268,7 +271,7 @@ export class AgentRunner {
268271
}
269272

270273
// Parse LLM response
271-
const parsedAction = OpenAIClient.parseOpenAIResponse(openAIResponse);
274+
const parsedAction = UnifiedLLMClient.parseResponse(llmResponse);
272275

273276
// Process parsed action
274277
try {
@@ -279,10 +282,10 @@ export class AgentRunner {
279282
newModelMessage = {
280283
entity: ChatMessageEntity.MODEL,
281284
action: 'tool',
282-
toolName: toolName,
283-
toolArgs: toolArgs,
285+
toolName,
286+
toolArgs,
284287
isFinalAnswer: false,
285-
reasoning: openAIResponse.reasoning?.summary,
288+
reasoning: llmResponse.reasoning?.summary,
286289
};
287290
messages.push(newModelMessage);
288291
console.log(`[AgentRunner] ${agentName} LLM requested tool: ${toolName}`);
@@ -324,7 +327,8 @@ export class AgentRunner {
324327
// LLM tool handoff replaces the current agent's execution entirely
325328
return handoffResult;
326329

327-
} else if (!toolToExecute) { // Regular tool, but not found
330+
}
331+
if (!toolToExecute) { // Regular tool, but not found
328332
throw new Error(`Agent requested unknown tool: ${toolName}`);
329333
} else {
330334
// *** Regular tool execution ***
@@ -356,7 +360,7 @@ export class AgentRunner {
356360
// Add tool result message
357361
const toolResultMessage: ToolResultMessage = {
358362
entity: ChatMessageEntity.TOOL_RESULT,
359-
toolName: toolName,
363+
toolName,
360364
resultText: toolResultText,
361365
isError: toolIsError,
362366
...(toolIsError && { error: toolResultText }), // Include raw error message if error occurred
@@ -370,9 +374,9 @@ export class AgentRunner {
370374
newModelMessage = {
371375
entity: ChatMessageEntity.MODEL,
372376
action: 'final',
373-
answer: answer,
377+
answer,
374378
isFinalAnswer: true,
375-
reasoning: openAIResponse.reasoning?.summary,
379+
reasoning: llmResponse.reasoning?.summary,
376380
};
377381
messages.push(newModelMessage);
378382
console.log(`[AgentRunner] ${agentName} LLM provided final answer.`);
@@ -403,7 +407,7 @@ export class AgentRunner {
403407
// Max iterations reached - Check for 'max_iterations' handoff trigger
404408
console.warn(`[AgentRunner] ${agentName} Reached max iterations (${maxIterations}) without completion.`);
405409

406-
if (executingAgent && executingAgent.config.handoffs) {
410+
if (executingAgent?.config.handoffs) {
407411
const maxIterHandoffConfig = executingAgent.config.handoffs.find(h => h.trigger === 'max_iterations');
408412

409413
if (maxIterHandoffConfig) {
@@ -427,4 +431,4 @@ export class AgentRunner {
427431
console.warn(`[AgentRunner] ${agentName} No 'max_iterations' handoff configured. Returning error.`);
428432
return createErrorResult(`Agent reached maximum iterations (${maxIterations})`, messages, 'max_iterations');
429433
}
430-
}
434+
}

0 commit comments

Comments
 (0)