Skip to content

Commit 0a1d33f

Browse files
authored
Refactor code to smaller files and code cleanup (#8)
- Refactor code to include two new dialog components - Fix config bugs - Add support for open source thinking models
1 parent ea83343 commit 0a1d33f

File tree

12 files changed

+2829
-1547
lines changed

12 files changed

+2829
-1547
lines changed

front_end/panels/ai_chat/BUILD.gn

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ devtools_module("ai_chat") {
1818
sources = [
1919
"ui/AIChatPanel.ts",
2020
"ui/ChatView.ts",
21+
"ui/HelpDialog.ts",
22+
"ui/SettingsDialog.ts",
2123
"ai_chat_impl.ts",
2224
"core/Graph.ts",
2325
"core/State.ts",
@@ -73,6 +75,8 @@ devtools_module("ai_chat") {
7375
_ai_chat_sources = [
7476
"ui/AIChatPanel.ts",
7577
"ui/ChatView.ts",
78+
"ui/HelpDialog.ts",
79+
"ui/SettingsDialog.ts",
7680
"ai_chat_impl.ts",
7781
"core/Graph.ts",
7882
"core/State.ts",

front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.ts

Lines changed: 16 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ export function initializeConfiguredAgents(): void {
8181
function createResearchAgentConfig(): AgentToolConfig {
8282
return {
8383
name: 'research_agent',
84-
description: 'Performs in-depth research on a specific query autonomously using multiple steps and internal tool calls (navigation, fetching, extraction). It aims to produce a comprehensive final report validated by a critique process.',
84+
description: 'Performs in-depth research on a specific query autonomously using multiple steps and internal tool calls (navigation, fetching, extraction). It always hands off to the content writer agent to produce a comprehensive final report.',
8585
systemPrompt: `You are a singular task research agent designed to conduct in-depth research on a single topic provided by the user. Your task is to leverage browser capabilities to gather comprehensive information, following these steps:
8686
8787
Here is an example of steps you can take to complete your research (go in this order):
@@ -90,48 +90,37 @@ Here is an example of steps you can take to complete your research (go in this o
9090
3. Call fetcher_tool to fetch the content of the all the URLs you have found from the search results
9191
4. Focus on collecting comprehensive data rather than writing the final report yourself
9292
93-
## IMPORTANT: Hand off to content writer for final report
94-
Once you've collected sufficient research data (at least 3-5 different sources with substantial information), hand off to the content_writer_agent via the handoff_to_content_writer_agent tool. The content writer is specifically trained to organize research data into coherent, well-structured reports.
93+
## MANDATORY: Hand off to content writer for final report
94+
Once you've collected sufficient research data (at least 3-5 different sources with substantial information), you MUST hand off to the content_writer_agent via the handoff_to_content_writer_agent tool. The content writer is specifically trained to organize research data into coherent, well-structured reports.
9595
96-
When to use the handoff:
96+
When to use the handoff (REQUIRED for ALL cases):
9797
- After you've collected enough diverse, high-quality information
9898
- When you have explored multiple perspectives on the topic
9999
- When you're ready for the information to be organized into a final report
100-
- Instead of using finalize_with_critique directly on your own report
101100
102101
What happens during handoff:
103102
- The content_writer_agent will receive your research data
104103
- It will analyze the information and create a well-structured report
105-
- It will handle the finalize_with_critique stage for you
104+
- It will handle the finalize_with_critique stage automatically
106105
107-
Only use finalize_with_critique yourself if you decide NOT to use the handoff for some reason.
106+
## Research Quality Guidelines
108107
109-
## If Not Using Handoff
110-
If you decide to complete the research yourself without handoff, synthesize all collected information into a comprehensive research report in markdown and use the 'finalize_with_critique' tool to submit your final answer for quality evaluation.
108+
When collecting research data, focus on:
109+
1. Gathering information from diverse, reliable sources
110+
2. Exploring multiple perspectives on the topic
111+
3. Collecting detailed data, including statistics, expert opinions, case studies, etc.
112+
4. Organizing information into logical categories
113+
5. Including important context, historical background, and current trends
111114
112-
The 'finalize_with_critique' tool will ensure your research meets the user's requirements. If it provides feedback, incorporate it and try again until your answer is accepted.
115+
Collect sufficient information to enable the content writer to create a comprehensive report of at least 5 different sections. If there is not enough content, do more research.
113116
114-
## Here is an example of the final report structure (you can come up with your own structure that is better for the user's query):
115-
116-
Present your findings in a structured markdown report with:
117-
118-
1. **Executive Summary**: Brief overview of key findings
119-
2. **Research Question**: Clear restatement of what you investigated
120-
3. **Methodology**: Sources consulted and selection criteria
121-
4. **Key Findings**: Organized by main themes or questions
122-
5. **Analysis**: Synthesis of information, highlighting consensus and contradictions
123-
6. **Limitations**: Gaps in available information
124-
7. **Conclusions**: Summary of the most reliable answers based on the research
125-
8. **References**: Full citation list of all sources consulted
126-
127-
Maintain objectivity throughout your research process and clearly distinguish between well-established facts and more speculative information. When appropriate, note areas where more research might be needed. Note: the final report should be at least 5000 words or even longer based on the topic, if there is not enough content do more research.`,
117+
Maintain objectivity throughout your research process and clearly distinguish between well-established facts and more speculative information.`,
128118
tools: [
129119
'navigate_url',
130120
'navigate_back',
131121
'fetcher_tool',
132122
'schema_based_extractor',
133-
'node_ids_to_urls',
134-
'finalize_with_critique'
123+
'node_ids_to_urls'
135124
],
136125
maxIterations: 15,
137126
modelName: () => AIChatPanel.getMiniModel(),
@@ -206,7 +195,7 @@ Your process should follow these steps:
206195
207196
The final output should be in markdown format, and it should be lengthy and detailed. Aim for 5-10 pages of content, at least 1000 words.`,
208197
tools: [],
209-
maxIterations: 2,
198+
maxIterations: 3,
210199
modelName: () => AIChatPanel.getMiniModel(),
211200
temperature: 0.3,
212201
schema: {

front_end/panels/ai_chat/core/AgentService.ts

Lines changed: 48 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,16 +78,27 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{
7878
if (!modelName) {
7979
throw new Error('Model name is required for initialization');
8080
}
81+
82+
// Check if the configuration requires an API key
83+
const requiresApiKey = this.#doesCurrentConfigRequireApiKey();
84+
85+
// If API key is required but not provided, throw error
86+
if (requiresApiKey && !apiKey) {
87+
const provider = localStorage.getItem('ai_chat_provider') || 'openai';
88+
throw new Error(`${provider === 'openai' ? 'OpenAI' : 'LiteLLM'} API key is required for this configuration`);
89+
}
8190

8291
// Will throw error if OpenAI model is used without API key
8392
this.#graph = createAgentGraph(apiKey, modelName);
8493

8594
this.#isInitialized = true;
8695
} catch (error) {
8796
console.error('Failed to initialize agent:', error);
88-
// Check if it's a specific API key error
89-
if (error instanceof Error && error.message === 'OpenAI API key is required for OpenAI models') {
90-
throw new Error('OpenAI API key is required for this model');
97+
// Pass through specific errors
98+
if (error instanceof Error &&
99+
(error.message.includes('API key is required') ||
100+
error.message.includes('endpoint is required'))) {
101+
throw error;
91102
}
92103
throw new Error(i18nString(UIStrings.agentInitFailed));
93104
}
@@ -118,7 +129,10 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{
118129
* Sends a message to the AI agent
119130
*/
120131
async sendMessage(text: string, imageInput?: ImageInputData, selectedAgentType?: string | null): Promise<ChatMessage> {
121-
if (!this.#apiKey) {
132+
// Check if the current configuration requires an API key
133+
const requiresApiKey = this.#doesCurrentConfigRequireApiKey();
134+
135+
if (requiresApiKey && !this.#apiKey) {
122136
throw new Error('API key not set. Please set the API key in settings.');
123137
}
124138

@@ -270,6 +284,36 @@ export class AgentService extends Common.ObjectWrapper.ObjectWrapper<{
270284
}
271285
return pageTitle;
272286
}
287+
288+
/**
289+
* Helper to determine if the current configuration requires an API key
290+
* LiteLLM with an endpoint doesn't require an API key, other providers do
291+
*/
292+
#doesCurrentConfigRequireApiKey(): boolean {
293+
try {
294+
// Check the selected provider
295+
const selectedProvider = localStorage.getItem('ai_chat_provider') || 'openai';
296+
297+
// OpenAI provider always requires an API key
298+
if (selectedProvider === 'openai') {
299+
return true;
300+
}
301+
302+
// For LiteLLM, only require API key if no endpoint is configured
303+
if (selectedProvider === 'litellm') {
304+
const hasLiteLLMEndpoint = Boolean(localStorage.getItem('ai_chat_litellm_endpoint'));
305+
// If we have an endpoint, API key is optional
306+
return !hasLiteLLMEndpoint;
307+
}
308+
309+
// Default to requiring API key for any unknown provider
310+
return true;
311+
} catch (error) {
312+
console.error('Error checking if API key is required:', error);
313+
// Default to requiring API key in case of errors
314+
return true;
315+
}
316+
}
273317
}
274318

275319
// Define UI strings object to manage i18n strings

front_end/panels/ai_chat/core/Graph.ts

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import { ChatLiteLLM } from './ChatLiteLLM.js';
1111
import { ChatOpenAI } from './ChatOpenAI.js';
1212
import { createAgentGraphFromConfig } from './ConfigurableGraph.js';
1313
import { defaultAgentGraphConfig } from './GraphConfigs.js';
14+
import { AIChatPanel } from '../ui/AIChatPanel.js';
1415
import {
1516
ChatPromptFormatter,
1617
createSystemPrompt,
@@ -27,25 +28,37 @@ export function createAgentGraph(apiKey: string | null, modelName: string): Comp
2728
}
2829

2930
let model;
30-
// Get model options to check type
31-
const modelOptions = JSON.parse(localStorage.getItem('ai_chat_model_options') || '[]');
31+
// Get model options using the centralized method
32+
const modelOptions = AIChatPanel.getModelOptions();
33+
3234
const modelOption = modelOptions.find((opt: {value: string, type: string}) => opt.value === modelName);
3335
const isLiteLLMModel = modelOption?.type === 'litellm' || modelName.startsWith('litellm/');
3436

3537
if (isLiteLLMModel) {
3638
// Get LiteLLM configuration from localStorage
3739
const liteLLMEndpoint = localStorage.getItem('ai_chat_litellm_endpoint');
40+
41+
// Check if endpoint is configured
42+
if (!liteLLMEndpoint) {
43+
throw new Error('LiteLLM endpoint is required for LiteLLM models');
44+
}
3845

3946
// Handle both cases: models with and without 'litellm/' prefix
4047
const actualModelName = modelName.startsWith('litellm/') ?
4148
modelName.substring('litellm/'.length) :
4249
modelName;
50+
51+
console.log('Creating ChatLiteLLM model:', {
52+
modelName: actualModelName,
53+
endpoint: liteLLMEndpoint,
54+
hasApiKey: Boolean(apiKey)
55+
});
4356

4457
model = new ChatLiteLLM({
4558
liteLLMApiKey: apiKey,
4659
modelName: actualModelName,
4760
temperature: 0,
48-
baseUrl: liteLLMEndpoint || undefined,
61+
baseUrl: liteLLMEndpoint,
4962
});
5063
} else {
5164
// Standard OpenAI model - requires API key

front_end/panels/ai_chat/core/LiteLLMClient.ts

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,18 @@ export class LiteLLMClient {
6969
* Constructs the full endpoint URL based on provided options
7070
*/
7171
private static getEndpoint(options?: LiteLLMCallOptions): string {
72+
// Check if we have a valid endpoint or baseUrl
73+
if (!options?.endpoint && !options?.baseUrl) {
74+
// Check localStorage as a fallback for endpoint
75+
const localStorageEndpoint = localStorage.getItem('ai_chat_litellm_endpoint');
76+
if (!localStorageEndpoint) {
77+
throw new Error('LiteLLM endpoint not configured. Please set endpoint in settings.');
78+
}
79+
console.log(`[LiteLLMClient] Using endpoint from localStorage: ${localStorageEndpoint}`);
80+
const baseUrl = localStorageEndpoint.replace(/\/$/, '');
81+
return `${baseUrl}${this.CHAT_COMPLETIONS_PATH}`;
82+
}
83+
7284
// If full endpoint is provided, check if it includes the chat completions path
7385
if (options?.endpoint) {
7486
// Check if the endpoint already includes the chat completions path
@@ -88,7 +100,7 @@ export class LiteLLMClient {
88100
return `${baseUrl}${this.CHAT_COMPLETIONS_PATH}`;
89101
}
90102

91-
// Default to local LiteLLM
103+
// Default to local LiteLLM (should not reach here due to the check at the top)
92104
return `${this.DEFAULT_BASE_URL}${this.CHAT_COMPLETIONS_PATH}`;
93105
}
94106

front_end/panels/ai_chat/core/UnifiedLLMClient.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ export class UnifiedLLMClient {
242242
if ('function' in tool) {
243243
return tool;
244244
}
245-
245+
246246
// Transform OpenAI format to Anthropic format
247247
// OpenAI: { type: 'function', name: '...', description: '...', parameters: {...} }
248248
// Anthropic expects: { type: 'function', function: { name: '...', description: '...', parameters: {...} } }
@@ -256,7 +256,7 @@ export class UnifiedLLMClient {
256256
}
257257
};
258258
}
259-
259+
260260
// Default: return as is if we don't recognize the format
261261
return tool;
262262
});

0 commit comments

Comments
 (0)