Skip to content

Commit f4c24aa

Browse files
committed
refactor(core): 优化 Gemini adapter 与测试架构
主要改进: - 优化 Gemini adapter 文本提取逻辑,优先使用 SDK response.text 属性 - 正确处理 Gemini 2.5+ 思考内容(thought)与普通文本的区分 - thinkingBudget 最小值改为 0,允许禁用思考功能 - 增强 PromptService 错误处理,验证失败时正确触发 onError 回调 - 重构集成测试,引入 createTestConfig 辅助函数消除硬编码 - 迁移 Anthropic adapter 测试到官方 SDK mock - 添加测试间速率限制保护机制 测试结果: - 565/566 测试通过 (99.8%) - 类型检查全部通过 - 代码量净减少 27 行
1 parent e03fddb commit f4c24aa

File tree

14 files changed

+378
-405
lines changed

14 files changed

+378
-405
lines changed

packages/core/src/services/llm/adapters/gemini-adapter.ts

Lines changed: 54 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
5959
}
6060

6161
/**
62-
* 获取静态模型列表(Gemini 2.0系列
62+
* 获取静态模型列表(Gemini 2.5系列
6363
* 从service.ts的fetchGeminiModelsInfo参考 (L1099-1106)
6464
*/
6565
public getModels(): TextModel[] {
@@ -101,8 +101,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
101101
: { apiKey }
102102
)
103103

104-
console.log('[GeminiAdapter] Fetching models from API...')
105-
106104
const modelsPager = await genAI.models.list({
107105
config: {
108106
pageSize: 100 // 获取更多模型
@@ -130,8 +128,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
130128
})
131129
}
132130

133-
console.log(`[GeminiAdapter] Found ${dynamicModels.length} models supporting generateContent`)
134-
135131
// 如果动态获取失败,返回静态列表
136132
return dynamicModels.length > 0 ? dynamicModels : this.getModels()
137133
} catch (error) {
@@ -191,8 +187,8 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
191187
{
192188
name: 'thinkingBudget',
193189
type: 'number',
194-
description: 'Thinking budget in tokens (Gemini 2.5+)',
195-
min: 1,
190+
description: 'Thinking budget in tokens (Gemini 2.5+). Set to 0 to disable thinking.',
191+
min: 0, // 允许0来禁用思考功能
196192
max: 8192
197193
},
198194
{
@@ -426,39 +422,54 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
426422
// 格式化消息
427423
const contents = this.formatMessages(conversationMessages)
428424

429-
console.log('[GeminiAdapter] Sending request to models.generateContent...')
430-
431425
// 调用新版 API
432426
const response = await client.models.generateContent({
433427
model: config.modelMeta.id,
434428
contents,
435429
config: generationConfig
436430
})
437431

438-
// 提取思考内容(如果启用了 thinkingConfig)
432+
// 提取文本内容和思考内容
433+
let textContent = ''
439434
let reasoning: string | undefined
440-
if (response.candidates?.[0]?.content?.parts) {
435+
436+
// 优先使用新版 SDK 推荐的 response.text 属性
437+
if ((response as any).text) {
438+
textContent = (response as any).text
439+
} else if (response.candidates?.[0]?.content?.parts) {
440+
// 回退到 parts 提取(用于旧版响应格式或特殊情况)
441+
const contentParts: string[] = []
441442
const reasoningParts: string[] = []
443+
442444
for (const part of response.candidates[0].content.parts) {
443-
// Gemini 2.5+ 在 part.thought 中标记思考过程,文本位于 part.text
444-
if ((part as any).thought) {
445-
const rawThought = (part as any).text ?? (part as any).thought
446-
if (rawThought !== undefined) {
447-
const normalized = typeof rawThought === 'string'
448-
? rawThought
449-
: JSON.stringify(rawThought)
450-
reasoningParts.push(normalized)
445+
// 提取文本内容
446+
if ((part as any).text) {
447+
const text = (part as any).text
448+
// 如果这部分是思考过程,加到 reasoning,否则加到 content
449+
if ((part as any).thought) {
450+
reasoningParts.push(text)
451+
} else {
452+
contentParts.push(text)
451453
}
452454
}
453455
}
454456

457+
textContent = contentParts.join('')
455458
if (reasoningParts.length > 0) {
456459
reasoning = reasoningParts.join('')
457460
}
461+
} else if (response.candidates?.[0]?.content) {
462+
// 最后尝试直接访问 content 字段
463+
const content = response.candidates[0].content
464+
if (typeof content === 'string') {
465+
textContent = content
466+
} else if ((content as any).text) {
467+
textContent = (content as any).text
468+
}
458469
}
459470

460471
return {
461-
content: response.text || '',
472+
content: textContent,
462473
reasoning,
463474
metadata: {
464475
model: config.modelMeta.id,
@@ -518,51 +529,50 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
518529
// 格式化消息
519530
const contents = this.formatMessages(conversationMessages)
520531

521-
console.log('[GeminiAdapter] Creating stream request...')
522-
523532
// 调用新版流式 API
524533
const responseStream = await client.models.generateContentStream({
525534
model: config.modelMeta.id,
526535
contents,
527536
config: generationConfig
528537
})
529538

530-
console.log('[GeminiAdapter] Stream response received')
531-
532539
let accumulatedContent = ''
533540
let accumulatedReasoning = ''
534541

535542
// 遍历流式响应
536543
for await (const chunk of responseStream) {
537-
const text = chunk.text
538-
if (text) {
539-
accumulatedContent += text
540-
callbacks.onToken(text)
541-
}
544+
let emittedContentToken = false
542545

543-
// 提取思考内容(流式)
546+
// 从 parts 中提取文本内容
544547
if (chunk.candidates?.[0]?.content?.parts) {
545548
for (const part of chunk.candidates[0].content.parts) {
546-
if ((part as any).thought) {
547-
const rawThought = (part as any).text ?? (part as any).thought
548-
if (rawThought !== undefined) {
549-
const thoughtStr = typeof rawThought === 'string'
550-
? rawThought
551-
: JSON.stringify(rawThought)
552-
553-
accumulatedReasoning += thoughtStr
549+
const partText = (part as any).text
550+
if (!partText) {
551+
continue
552+
}
554553

555-
// 如果有思考内容的回调,触发它
556-
if (callbacks.onReasoningToken) {
557-
callbacks.onReasoningToken(thoughtStr)
558-
}
554+
if ((part as any).thought) {
555+
// 这是思考内容
556+
accumulatedReasoning += partText
557+
if (callbacks.onReasoningToken) {
558+
callbacks.onReasoningToken(partText)
559559
}
560+
} else {
561+
// 这是普通内容
562+
emittedContentToken = true
563+
accumulatedContent += partText
564+
callbacks.onToken(partText)
560565
}
561566
}
562567
}
563-
}
564568

565-
console.log('[GeminiAdapter] Stream completed')
569+
// 如果 SDK 只提供 chunk.text,则回退到该字段
570+
const chunkText = (chunk as any).text
571+
if (chunkText && !emittedContentToken) {
572+
accumulatedContent += chunkText
573+
callbacks.onToken(chunkText)
574+
}
575+
}
566576

567577
// 构建完整响应
568578
const response: LLMResponse = {
@@ -597,8 +607,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
597607
tools: ToolDefinition[],
598608
callbacks: StreamHandlers
599609
): Promise<void> {
600-
console.log('[GeminiAdapter] Sending stream request with tools, count:', tools.length)
601-
602610
// 提取系统消息
603611
const systemMessages = messages.filter((msg) => msg.role === 'system')
604612
const systemInstruction =
@@ -634,17 +642,13 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
634642
// 格式化消息
635643
const contents = this.formatMessages(conversationMessages)
636644

637-
console.log('[GeminiAdapter] Creating stream request with tools...')
638-
639645
// 调用新版流式 API
640646
const responseStream = await client.models.generateContentStream({
641647
model: config.modelMeta.id,
642648
contents,
643649
config: generationConfig
644650
})
645651

646-
console.log('[GeminiAdapter] Stream response with tools received')
647-
648652
let accumulatedContent = ''
649653
let accumulatedReasoning = ''
650654
const toolCalls: ToolCall[] = []
@@ -688,8 +692,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
688692
}
689693
}
690694

691-
console.log('[GeminiAdapter] Stream with tools completed, tool calls:', toolCalls.length)
692-
693695
// 构建完整响应
694696
const response: LLMResponse = {
695697
content: accumulatedContent,

packages/core/src/services/llm/adapters/openai-adapter.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,11 @@ export class OpenAIAdapter extends AbstractTextProviderAdapter {
5454
const providerId = 'openai'
5555

5656
return [
57-
// GPT-4o 系列
57+
// GPT-5 系列
5858
{
5959
id: 'gpt-5-2025-08-07',
6060
name: 'GPT-5',
61-
description: '',
61+
description: 'Latest GPT-5 model',
6262
providerId,
6363
capabilities: {
6464
supportsTools: true,

packages/core/src/services/model/advancedParameterDefinitions.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ export const advancedParameterDefinitions: AdvancedParameterDefinition[] = [
163163
labelKey: "params.thinkingBudget.label",
164164
descriptionKey: "params.thinkingBudget.description",
165165
type: "integer",
166-
minValue: 1,
166+
minValue: 0, // 允许0来禁用思考功能
167167
maxValue: 8192,
168168
step: 1,
169169
unitKey: "params.tokens.unit",

packages/core/src/services/model/validation.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ function validateSingleParameter(
144144
message: `Parameter '${def.name}' value ${value} is less than minimum value ${def.minValue}`
145145
};
146146
}
147-
147+
148148
if (def.maxValue !== undefined && value > def.maxValue) {
149149
return {
150150
isValid: false,

packages/core/src/services/prompt/service.ts

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -346,16 +346,21 @@ export class PromptService implements IPromptService {
346346
onToken: callbacks.onToken,
347347
onReasoningToken: callbacks.onReasoningToken, // 支持推理内容流
348348
onComplete: async (response) => {
349-
if (response) {
350-
// 验证主要内容
351-
this.validateResponse(response.content, request.targetPrompt);
349+
try {
350+
if (response) {
351+
// 验证主要内容
352+
this.validateResponse(response.content, request.targetPrompt);
352353

353-
// 注意:历史记录保存由UI层的historyManager.createNewChain方法处理
354-
// 移除重复的saveOptimizationHistory调用以避免重复保存
355-
}
354+
// 注意:历史记录保存由UI层的historyManager.createNewChain方法处理
355+
// 移除重复的saveOptimizationHistory调用以避免重复保存
356+
}
356357

357-
// 调用原始完成回调,传递结构化响应
358-
callbacks.onComplete(response);
358+
// 调用原始完成回调,传递结构化响应
359+
callbacks.onComplete(response);
360+
} catch (error) {
361+
// 如果验证失败,调用错误回调
362+
callbacks.onError(error instanceof Error ? error : new Error(String(error)));
363+
}
359364
},
360365
onError: callbacks.onError
361366
}
@@ -417,14 +422,19 @@ export class PromptService implements IPromptService {
417422
onToken: handlers.onToken,
418423
onReasoningToken: handlers.onReasoningToken, // 支持推理内容流
419424
onComplete: async (response) => {
420-
if (response) {
421-
// 验证迭代结果
422-
this.validateResponse(response.content, lastOptimizedPrompt);
425+
try {
426+
if (response) {
427+
// 验证迭代结果
428+
this.validateResponse(response.content, lastOptimizedPrompt);
429+
}
430+
431+
// 调用原始完成回调,传递结构化响应
432+
// 注意:迭代历史记录由UI层的historyManager.addIteration方法处理
433+
handlers.onComplete(response);
434+
} catch (error) {
435+
// 如果验证失败,调用错误回调
436+
handlers.onError(error instanceof Error ? error : new Error(String(error)));
423437
}
424-
425-
// 调用原始完成回调,传递结构化响应
426-
// 注意:迭代历史记录由UI层的historyManager.addIteration方法处理
427-
handlers.onComplete(response);
428438
},
429439
onError: handlers.onError
430440
}

0 commit comments

Comments
 (0)