Skip to content

Commit c8d9fbd

Browse files
committed
Revert some changes to agentic framework
1 parent 874a92e commit c8d9fbd

File tree

3 files changed

+71
-251
lines changed

3 files changed

+71
-251
lines changed

front_end/panels/ai_chat/tools/CritiqueTool.ts

Lines changed: 27 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ import { AgentService } from '../core/AgentService.js';
66
import { createLogger } from '../core/Logger.js';
77
import { LLMClient } from '../LLM/LLMClient.js';
88
import { AIChatPanel } from '../ui/AIChatPanel.js';
9-
import { tracedLLMCall } from '../tracing/TracingConfig.js';
109

1110
import type { Tool } from './Tools.js';
1211

@@ -170,30 +169,15 @@ Return a JSON array of requirement statements. Example format:
170169
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
171170
const llm = LLMClient.getInstance();
172171

173-
const response = await tracedLLMCall(
174-
() => llm.call({
175-
provider,
176-
model,
177-
messages: [
178-
{ role: 'user', content: userPrompt }
179-
],
180-
systemPrompt,
181-
temperature: 0.1,
182-
}),
183-
{
184-
toolName: this.name,
185-
model,
186-
provider,
187-
temperature: 0.1,
188-
input: {
189-
systemPrompt: systemPrompt.substring(0, 500) + '...',
190-
userPrompt: userPrompt.substring(0, 500) + '...'
191-
},
192-
metadata: {
193-
phase: 'extract_requirements'
194-
}
195-
}
196-
);
172+
const response = await llm.call({
173+
provider,
174+
model,
175+
messages: [
176+
{ role: 'user', content: userPrompt }
177+
],
178+
systemPrompt,
179+
temperature: 0.1,
180+
});
197181

198182
if (!response.text) {
199183
return { success: false, requirements: [], error: 'No response received' };
@@ -278,30 +262,15 @@ ${JSON.stringify(evaluationSchema, null, 2)}`;
278262
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
279263
const llm = LLMClient.getInstance();
280264

281-
const response = await tracedLLMCall(
282-
() => llm.call({
283-
provider,
284-
model,
285-
messages: [
286-
{ role: 'user', content: userPrompt }
287-
],
288-
systemPrompt,
289-
temperature: 0.1,
290-
}),
291-
{
292-
toolName: this.name,
293-
model,
294-
provider,
295-
temperature: 0.1,
296-
input: {
297-
systemPrompt: systemPrompt.substring(0, 200) + '...',
298-
userPrompt: userPrompt.substring(0, 200) + '...'
299-
},
300-
metadata: {
301-
phase: 'evaluate_response'
302-
}
303-
}
304-
);
265+
const response = await llm.call({
266+
provider,
267+
model,
268+
messages: [
269+
{ role: 'user', content: userPrompt }
270+
],
271+
systemPrompt,
272+
temperature: 0.1,
273+
});
305274

306275
if (!response.text) {
307276
return { success: false, error: 'No response received' };
@@ -353,30 +322,15 @@ Be concise, specific, and constructive.`;
353322
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
354323
const llm = LLMClient.getInstance();
355324

356-
const response = await tracedLLMCall(
357-
() => llm.call({
358-
provider,
359-
model,
360-
messages: [
361-
{ role: 'user', content: userPrompt }
362-
],
363-
systemPrompt,
364-
temperature: 0.7,
365-
}),
366-
{
367-
toolName: this.name,
368-
model,
369-
provider,
370-
temperature: 0.7,
371-
input: {
372-
systemPrompt: systemPrompt.substring(0, 200) + '...',
373-
userPrompt: userPrompt.substring(0, 200) + '...'
374-
},
375-
metadata: {
376-
phase: 'generate_feedback'
377-
}
378-
}
379-
);
325+
const response = await llm.call({
326+
provider,
327+
model,
328+
messages: [
329+
{ role: 'user', content: userPrompt }
330+
],
331+
systemPrompt,
332+
temperature: 0.7,
333+
});
380334

381335
return response.text || 'The plan does not meet all requirements, but no specific feedback could be generated.';
382336
} catch (error: any) {

front_end/panels/ai_chat/tools/StreamlinedSchemaExtractorTool.ts

Lines changed: 22 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ import { AgentService } from '../core/AgentService.js';
1010
import { createLogger } from '../core/Logger.js';
1111
import { LLMClient } from '../LLM/LLMClient.js';
1212
import { AIChatPanel } from '../ui/AIChatPanel.js';
13-
import { tracedLLMCall } from '../tracing/TracingConfig.js';
1413

1514
import type { Tool } from './Tools.js';
1615

@@ -241,34 +240,17 @@ IMPORTANT: Only extract data that you can see in the accessibility tree above. D
241240

242241
const { model, provider } = AIChatPanel.getMiniModelWithProvider();
243242
const llm = LLMClient.getInstance();
244-
const llmResponse = await tracedLLMCall(
245-
() => llm.call({
246-
provider,
247-
model,
248-
messages: [
249-
{ role: 'system', content: systemPrompt },
250-
{ role: 'user', content: extractionPrompt }
251-
],
252-
systemPrompt: systemPrompt,
253-
temperature: 0.1,
254-
retryConfig: { maxRetries: 3, baseDelayMs: 1500 }
255-
}),
256-
{
257-
toolName: this.name,
258-
model,
259-
provider,
260-
temperature: 0.1,
261-
input: {
262-
systemPrompt: systemPrompt.substring(0, 500) + '...',
263-
extractionPrompt: extractionPrompt.substring(0, 500) + '...',
264-
attempt
265-
},
266-
metadata: {
267-
phase: 'data_extraction',
268-
attempt
269-
}
270-
}
271-
);
243+
const llmResponse = await llm.call({
244+
provider,
245+
model,
246+
messages: [
247+
{ role: 'system', content: systemPrompt },
248+
{ role: 'user', content: extractionPrompt }
249+
],
250+
systemPrompt: systemPrompt,
251+
temperature: 0.1,
252+
retryConfig: { maxRetries: 3, baseDelayMs: 1500 }
253+
});
272254
const result = llmResponse.text;
273255

274256
logger.debug(`JSON extraction successful on attempt ${attempt}`);
@@ -385,34 +367,17 @@ CRITICAL: Only use nodeIds that you can actually see in the accessibility tree a
385367
try {
386368
const { model, provider } = AIChatPanel.getMiniModelWithProvider();
387369
const llm = LLMClient.getInstance();
388-
const llmResponse = await tracedLLMCall(
389-
() => llm.call({
390-
provider,
391-
model,
392-
messages: [
393-
{ role: 'system', content: systemPrompt },
394-
{ role: 'user', content: extractionPrompt }
395-
],
396-
systemPrompt: systemPrompt,
397-
temperature: 0.1,
398-
retryConfig: { maxRetries: 3, baseDelayMs: 1500 }
399-
}),
400-
{
401-
toolName: this.name,
402-
model,
403-
provider,
404-
temperature: 0.1,
405-
input: {
406-
systemPrompt: systemPrompt.substring(0, 500) + '...',
407-
extractionPrompt: extractionPrompt.substring(0, 500) + '...',
408-
unresolvedNodeIds
409-
},
410-
metadata: {
411-
phase: 'url_resolution',
412-
unresolvedNodeIdsCount: unresolvedNodeIds.length
413-
}
414-
}
415-
);
370+
const llmResponse = await llm.call({
371+
provider,
372+
model,
373+
messages: [
374+
{ role: 'system', content: systemPrompt },
375+
{ role: 'user', content: extractionPrompt }
376+
],
377+
systemPrompt: systemPrompt,
378+
temperature: 0.1,
379+
retryConfig: { maxRetries: 3, baseDelayMs: 1500 }
380+
});
416381
const result = llmResponse.text;
417382

418383
return result;

0 commit comments

Comments
 (0)