@@ -59,7 +59,7 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
5959 }
6060
6161 /**
62- * 获取静态模型列表(Gemini 2.0系列 )
62+ * 获取静态模型列表(Gemini 2.5系列 )
6363 * 从service.ts的fetchGeminiModelsInfo参考 (L1099-1106)
6464 */
6565 public getModels ( ) : TextModel [ ] {
@@ -101,8 +101,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
101101 : { apiKey }
102102 )
103103
104- console . log ( '[GeminiAdapter] Fetching models from API...' )
105-
106104 const modelsPager = await genAI . models . list ( {
107105 config : {
108106 pageSize : 100 // 获取更多模型
@@ -130,8 +128,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
130128 } )
131129 }
132130
133- console . log ( `[GeminiAdapter] Found ${ dynamicModels . length } models supporting generateContent` )
134-
135131 // 如果动态获取失败,返回静态列表
136132 return dynamicModels . length > 0 ? dynamicModels : this . getModels ( )
137133 } catch ( error ) {
@@ -191,8 +187,8 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
191187 {
192188 name : 'thinkingBudget' ,
193189 type : 'number' ,
194- description : 'Thinking budget in tokens (Gemini 2.5+)' ,
195- min : 1 ,
190+ description : 'Thinking budget in tokens (Gemini 2.5+). Set to 0 to disable thinking. ' ,
191+ min : 0 , // 允许0来禁用思考功能
196192 max : 8192
197193 } ,
198194 {
@@ -426,39 +422,54 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
426422 // 格式化消息
427423 const contents = this . formatMessages ( conversationMessages )
428424
429- console . log ( '[GeminiAdapter] Sending request to models.generateContent...' )
430-
431425 // 调用新版 API
432426 const response = await client . models . generateContent ( {
433427 model : config . modelMeta . id ,
434428 contents,
435429 config : generationConfig
436430 } )
437431
438- // 提取思考内容(如果启用了 thinkingConfig)
432+ // 提取文本内容和思考内容
433+ let textContent = ''
439434 let reasoning : string | undefined
440- if ( response . candidates ?. [ 0 ] ?. content ?. parts ) {
435+
436+ // 优先使用新版 SDK 推荐的 response.text 属性
437+ if ( ( response as any ) . text ) {
438+ textContent = ( response as any ) . text
439+ } else if ( response . candidates ?. [ 0 ] ?. content ?. parts ) {
440+ // 回退到 parts 提取(用于旧版响应格式或特殊情况)
441+ const contentParts : string [ ] = [ ]
441442 const reasoningParts : string [ ] = [ ]
443+
442444 for ( const part of response . candidates [ 0 ] . content . parts ) {
443- // Gemini 2.5+ 在 part.thought 中标记思考过程,文本位于 part.text
444- if ( ( part as any ) . thought ) {
445- const rawThought = ( part as any ) . text ?? ( part as any ) . thought
446- if ( rawThought !== undefined ) {
447- const normalized = typeof rawThought === 'string'
448- ? rawThought
449- : JSON . stringify ( rawThought )
450- reasoningParts . push ( normalized )
445+ // 提取文本内容
446+ if ( ( part as any ) . text ) {
447+ const text = ( part as any ) . text
448+ // 如果这部分是思考过程,加到 reasoning,否则加到 content
449+ if ( ( part as any ) . thought ) {
450+ reasoningParts . push ( text )
451+ } else {
452+ contentParts . push ( text )
451453 }
452454 }
453455 }
454456
457+ textContent = contentParts . join ( '' )
455458 if ( reasoningParts . length > 0 ) {
456459 reasoning = reasoningParts . join ( '' )
457460 }
461+ } else if ( response . candidates ?. [ 0 ] ?. content ) {
462+ // 最后尝试直接访问 content 字段
463+ const content = response . candidates [ 0 ] . content
464+ if ( typeof content === 'string' ) {
465+ textContent = content
466+ } else if ( ( content as any ) . text ) {
467+ textContent = ( content as any ) . text
468+ }
458469 }
459470
460471 return {
461- content : response . text || '' ,
472+ content : textContent ,
462473 reasoning,
463474 metadata : {
464475 model : config . modelMeta . id ,
@@ -518,51 +529,50 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
518529 // 格式化消息
519530 const contents = this . formatMessages ( conversationMessages )
520531
521- console . log ( '[GeminiAdapter] Creating stream request...' )
522-
523532 // 调用新版流式 API
524533 const responseStream = await client . models . generateContentStream ( {
525534 model : config . modelMeta . id ,
526535 contents,
527536 config : generationConfig
528537 } )
529538
530- console . log ( '[GeminiAdapter] Stream response received' )
531-
532539 let accumulatedContent = ''
533540 let accumulatedReasoning = ''
534541
535542 // 遍历流式响应
536543 for await ( const chunk of responseStream ) {
537- const text = chunk . text
538- if ( text ) {
539- accumulatedContent += text
540- callbacks . onToken ( text )
541- }
544+ let emittedContentToken = false
542545
543- // 提取思考内容(流式)
546+ // 从 parts 中提取文本内容
544547 if ( chunk . candidates ?. [ 0 ] ?. content ?. parts ) {
545548 for ( const part of chunk . candidates [ 0 ] . content . parts ) {
546- if ( ( part as any ) . thought ) {
547- const rawThought = ( part as any ) . text ?? ( part as any ) . thought
548- if ( rawThought !== undefined ) {
549- const thoughtStr = typeof rawThought === 'string'
550- ? rawThought
551- : JSON . stringify ( rawThought )
552-
553- accumulatedReasoning += thoughtStr
549+ const partText = ( part as any ) . text
550+ if ( ! partText ) {
551+ continue
552+ }
554553
555- // 如果有思考内容的回调,触发它
556- if ( callbacks . onReasoningToken ) {
557- callbacks . onReasoningToken ( thoughtStr )
558- }
554+ if ( ( part as any ) . thought ) {
555+ // 这是思考内容
556+ accumulatedReasoning += partText
557+ if ( callbacks . onReasoningToken ) {
558+ callbacks . onReasoningToken ( partText )
559559 }
560+ } else {
561+ // 这是普通内容
562+ emittedContentToken = true
563+ accumulatedContent += partText
564+ callbacks . onToken ( partText )
560565 }
561566 }
562567 }
563- }
564568
565- console . log ( '[GeminiAdapter] Stream completed' )
569+ // 如果 SDK 只提供 chunk.text,则回退到该字段
570+ const chunkText = ( chunk as any ) . text
571+ if ( chunkText && ! emittedContentToken ) {
572+ accumulatedContent += chunkText
573+ callbacks . onToken ( chunkText )
574+ }
575+ }
566576
567577 // 构建完整响应
568578 const response : LLMResponse = {
@@ -597,8 +607,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
597607 tools : ToolDefinition [ ] ,
598608 callbacks : StreamHandlers
599609 ) : Promise < void > {
600- console . log ( '[GeminiAdapter] Sending stream request with tools, count:' , tools . length )
601-
602610 // 提取系统消息
603611 const systemMessages = messages . filter ( ( msg ) => msg . role === 'system' )
604612 const systemInstruction =
@@ -634,17 +642,13 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
634642 // 格式化消息
635643 const contents = this . formatMessages ( conversationMessages )
636644
637- console . log ( '[GeminiAdapter] Creating stream request with tools...' )
638-
639645 // 调用新版流式 API
640646 const responseStream = await client . models . generateContentStream ( {
641647 model : config . modelMeta . id ,
642648 contents,
643649 config : generationConfig
644650 } )
645651
646- console . log ( '[GeminiAdapter] Stream response with tools received' )
647-
648652 let accumulatedContent = ''
649653 let accumulatedReasoning = ''
650654 const toolCalls : ToolCall [ ] = [ ]
@@ -688,8 +692,6 @@ export class GeminiAdapter extends AbstractTextProviderAdapter {
688692 }
689693 }
690694
691- console . log ( '[GeminiAdapter] Stream with tools completed, tool calls:' , toolCalls . length )
692-
693695 // 构建完整响应
694696 const response : LLMResponse = {
695697 content : accumulatedContent ,
0 commit comments