Skip to content

Commit 92c1eb5

Browse files
authored
Extended tracing to all tool calls (#38)
## Pull Request Overview This PR extends tracing capabilities to all tool calls by introducing a centralized LLM tracing wrapper that ensures consistent observability across the AI chat system. - Introduces `LLMTracingWrapper.ts` to wrap LLM calls with tracing metadata - Updates all tool implementations to use the new `callLLMWithTracing` function - Refactors direct `LLMClient.call()` usage to include proper tracing context
1 parent c2970e4 commit 92c1eb5

File tree

10 files changed

+425
-148
lines changed

10 files changed

+425
-148
lines changed

front_end/panels/ai_chat/BUILD.gn

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ devtools_module("ai_chat") {
5050
"LLM/OpenRouterProvider.ts",
5151
"LLM/LLMClient.ts",
5252
"tools/Tools.ts",
53+
"tools/LLMTracingWrapper.ts",
5354
"tools/CritiqueTool.ts",
5455
"tools/FetcherTool.ts",
5556
"tools/FinalizeWithCritiqueTool.ts",

front_end/panels/ai_chat/agent_framework/AgentRunner.ts

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import { createLogger } from '../core/Logger.js';
1212
import { createTracingProvider, getCurrentTracingContext } from '../tracing/TracingConfig.js';
1313
import type { AgentSession, AgentMessage } from './AgentSessionTypes.js';
1414
import { AgentErrorHandler } from '../core/AgentErrorHandler.js';
15+
import { callLLMWithTracing } from '../tools/LLMTracingWrapper.js';
1516

1617
const logger = createLogger('AgentRunner');
1718

@@ -1142,18 +1143,29 @@ Format your response as a clear, informative summary that would help a calling a
11421143
content: summaryPrompt
11431144
});
11441145

1145-
// Use existing LLM infrastructure
1146-
const llm = LLMClient.getInstance();
11471146
const provider = AIChatPanel.getProviderForModel(modelName);
11481147

1149-
const response = await llm.call({
1150-
provider,
1151-
model: modelName,
1152-
messages: llmMessages,
1153-
systemPrompt: '', // Empty string instead of undefined
1154-
// Omit tools parameter entirely to avoid tool_choice conflicts
1155-
temperature: 0.1 // Lower temperature for more consistent summaries
1156-
});
1148+
const response = await callLLMWithTracing(
1149+
{
1150+
provider,
1151+
model: modelName,
1152+
messages: llmMessages,
1153+
systemPrompt: '', // Empty string instead of undefined
1154+
temperature: 0.1,
1155+
// Omit tools parameter entirely to avoid tool_choice conflicts
1156+
},
1157+
{
1158+
toolName: 'AgentRunner',
1159+
operationName: 'summarize_agent_progress',
1160+
context: 'agent_analysis',
1161+
additionalMetadata: {
1162+
agentName,
1163+
completionType,
1164+
maxIterations,
1165+
messageCount: messages.length
1166+
}
1167+
}
1168+
);
11571169

11581170
logger.info(`Generated summary for agent "${agentName}":`, response.text || 'No summary generated.');
11591171
return response.text || 'No summary generated.';

front_end/panels/ai_chat/tools/CritiqueTool.ts

Lines changed: 60 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44

55
import { AgentService } from '../core/AgentService.js';
66
import { createLogger } from '../core/Logger.js';
7-
import { LLMClient } from '../LLM/LLMClient.js';
87
import { AIChatPanel } from '../ui/AIChatPanel.js';
8+
import { callLLMWithTracing } from './LLMTracingWrapper.js';
99

1010
import type { Tool } from './Tools.js';
1111

@@ -167,17 +167,26 @@ Return a JSON array of requirement statements. Example format:
167167

168168
try {
169169
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
170-
const llm = LLMClient.getInstance();
171170

172-
const response = await llm.call({
173-
provider,
174-
model,
175-
messages: [
176-
{ role: 'user', content: userPrompt }
177-
],
178-
systemPrompt,
179-
temperature: 0.1,
180-
});
171+
const response = await callLLMWithTracing(
172+
{
173+
provider,
174+
model,
175+
messages: [
176+
{ role: 'user', content: userPrompt }
177+
],
178+
systemPrompt,
179+
temperature: 0.1,
180+
},
181+
{
182+
toolName: this.name,
183+
operationName: 'extract_requirements',
184+
context: 'requirement_analysis',
185+
additionalMetadata: {
186+
inputLength: userInput.length
187+
}
188+
}
189+
);
181190

182191
if (!response.text) {
183192
return { success: false, requirements: [], error: 'No response received' };
@@ -260,17 +269,27 @@ ${JSON.stringify(evaluationSchema, null, 2)}`;
260269

261270
try {
262271
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
263-
const llm = LLMClient.getInstance();
264272

265-
const response = await llm.call({
266-
provider,
267-
model,
268-
messages: [
269-
{ role: 'user', content: userPrompt }
270-
],
271-
systemPrompt,
272-
temperature: 0.1,
273-
});
273+
const response = await callLLMWithTracing(
274+
{
275+
provider,
276+
model,
277+
messages: [
278+
{ role: 'user', content: userPrompt }
279+
],
280+
systemPrompt,
281+
temperature: 0.1,
282+
},
283+
{
284+
toolName: this.name,
285+
operationName: 'evaluate_response',
286+
context: 'plan_evaluation',
287+
additionalMetadata: {
288+
requirementCount: requirements.length,
289+
responseLength: finalResponse.length
290+
}
291+
}
292+
);
274293

275294
if (!response.text) {
276295
return { success: false, error: 'No response received' };
@@ -320,17 +339,27 @@ Be concise, specific, and constructive.`;
320339

321340
try {
322341
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
323-
const llm = LLMClient.getInstance();
324342

325-
const response = await llm.call({
326-
provider,
327-
model,
328-
messages: [
329-
{ role: 'user', content: userPrompt }
330-
],
331-
systemPrompt,
332-
temperature: 0.7,
333-
});
343+
const response = await callLLMWithTracing(
344+
{
345+
provider,
346+
model,
347+
messages: [
348+
{ role: 'user', content: userPrompt }
349+
],
350+
systemPrompt,
351+
temperature: 0.7,
352+
},
353+
{
354+
toolName: this.name,
355+
operationName: 'generate_feedback',
356+
context: 'feedback_generation',
357+
additionalMetadata: {
358+
satisfiesCriteria: criteria.satisfiesCriteria,
359+
missedRequirements: criteria.missedRequirements?.length || 0
360+
}
361+
}
362+
);
334363

335364
return response.text || 'The plan does not meet all requirements, but no specific feedback could be generated.';
336365
} catch (error: any) {

front_end/panels/ai_chat/tools/FullPageAccessibilityTreeToMarkdownTool.ts

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
// found in the LICENSE file.
44

55
import { AgentService } from '../core/AgentService.js';
6-
import { LLMClient } from '../LLM/LLMClient.js';
76
import { AIChatPanel } from '../ui/AIChatPanel.js';
7+
import { callLLMWithTracing } from './LLMTracingWrapper.js';
88

99
import { GetAccessibilityTreeTool, type Tool, type ErrorResult } from './Tools.js';
1010

@@ -55,17 +55,27 @@ export class FullPageAccessibilityTreeToMarkdownTool implements Tool<Record<stri
5555
const prompt = `Accessibility Tree:\n\n\`\`\`\n${accessibilityTreeString}\n\`\`\``;
5656

5757
try {
58-
const llm = LLMClient.getInstance();
59-
const llmResponse = await llm.call({
60-
provider,
61-
model,
62-
messages: [
63-
{ role: 'system', content: this.getSystemPrompt() },
64-
{ role: 'user', content: prompt }
65-
],
66-
systemPrompt: this.getSystemPrompt(),
67-
temperature: 0.7
68-
});
58+
const llmResponse = await callLLMWithTracing(
59+
{
60+
provider,
61+
model,
62+
messages: [
63+
{ role: 'system', content: this.getSystemPrompt() },
64+
{ role: 'user', content: prompt }
65+
],
66+
systemPrompt: this.getSystemPrompt(),
67+
temperature: 0.7
68+
},
69+
{
70+
toolName: this.name,
71+
operationName: 'accessibility_to_markdown',
72+
context: 'accessibility_tree_conversion',
73+
additionalMetadata: {
74+
treeLength: accessibilityTreeString.length
75+
}
76+
}
77+
);
78+
6979
const response = llmResponse.text;
7080
if (response) {
7181
return {

front_end/panels/ai_chat/tools/HTMLToMarkdownTool.ts

Lines changed: 22 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ import * as Protocol from '../../../generated/protocol.js';
77
import * as Utils from '../common/utils.js';
88
import { AgentService } from '../core/AgentService.js';
99
import { createLogger } from '../core/Logger.js';
10-
import { LLMClient } from '../LLM/LLMClient.js';
1110
import { AIChatPanel } from '../ui/AIChatPanel.js';
11+
import { callLLMWithTracing } from './LLMTracingWrapper.js';
1212

1313
import { waitForPageLoad, type Tool } from './Tools.js';
1414

@@ -319,19 +319,28 @@ ${instruction}
319319
}): Promise<{
320320
markdownContent: string,
321321
}> {
322-
// Call LLM using the unified client
322+
// Call LLM using the unified client with tracing
323323
const { model, provider } = AIChatPanel.getNanoModelWithProvider();
324-
const llm = LLMClient.getInstance();
325-
const llmResponse = await llm.call({
326-
provider,
327-
model,
328-
messages: [
329-
{ role: 'system', content: params.systemPrompt },
330-
{ role: 'user', content: params.userPrompt }
331-
],
332-
systemPrompt: params.systemPrompt,
333-
temperature: 0.2 // Lower temperature for more deterministic results
334-
});
324+
const llmResponse = await callLLMWithTracing(
325+
{
326+
provider,
327+
model,
328+
messages: [
329+
{ role: 'system', content: params.systemPrompt },
330+
{ role: 'user', content: params.userPrompt }
331+
],
332+
systemPrompt: params.systemPrompt,
333+
temperature: 0.2 // Lower temperature for more deterministic results
334+
},
335+
{
336+
toolName: this.name,
337+
operationName: 'html_to_markdown',
338+
context: 'content_extraction',
339+
additionalMetadata: {
340+
promptLength: params.userPrompt.length
341+
}
342+
}
343+
);
335344
const response = llmResponse.text;
336345

337346
// Process the response - UnifiedLLMClient returns string directly

0 commit comments

Comments
 (0)