11import * as dotenv from 'dotenv'
22import OpenAI from 'openai'
33import { HttpsProxyAgent } from 'https-proxy-agent'
4+ import { tavily } from '@tavily/core'
5+ import dayjs from 'dayjs'
46import type { AuditConfig , KeyConfig , UserInfo } from '../storage/model'
57import { Status , UsageResponse } from '../storage/model'
68import { convertImageUrl } from '../utils/image'
@@ -10,7 +12,7 @@ import { getCacheApiKeys, getCacheConfig, getOriginConfig } from '../storage/con
1012import { sendResponse } from '../utils'
1113import { hasAnyRole , isNotEmptyString } from '../utils/is'
1214import type { ModelConfig } from '../types'
13- import { getChatByMessageId , updateRoomChatModel } from '../storage/mongo'
15+ import { getChatByMessageId , updateChatSearchQuery , updateChatSearchResult } from '../storage/mongo'
1416import type { ChatMessage , RequestOptions } from './types'
1517
1618dotenv . config ( )
@@ -49,17 +51,16 @@ export async function initApi(key: KeyConfig) {
4951const processThreads : { userId : string ; abort : AbortController ; messageId : string } [ ] = [ ]
5052
5153async function chatReplyProcess ( options : RequestOptions ) {
54+ const globalConfig = await getCacheConfig ( )
5255 const model = options . room . chatModel
56+ const searchEnabled = options . room . searchEnabled
5357 const key = await getRandomApiKey ( options . user , model )
5458 const userId = options . user . _id . toString ( )
5559 const maxContextCount = options . user . advanced . maxContextCount ?? 20
5660 const messageId = options . messageId
5761 if ( key == null || key === undefined )
5862 throw new Error ( '没有对应的apikeys配置。请再试一次 | No available apikeys configuration. Please try again.' )
5963
60- // Add Chat Record
61- updateRoomChatModel ( userId , options . room . roomId , model )
62-
6364 const { message, uploadFileKeys, parentMessageId, process, systemMessage, temperature, top_p } = options
6465
6566 try {
@@ -93,6 +94,52 @@ async function chatReplyProcess(options: RequestOptions) {
9394 content,
9495 } )
9596
97+ const searchConfig = globalConfig . searchConfig
98+ if ( searchConfig . enabled && searchConfig ?. options ?. apiKey && searchEnabled ) {
99+ messages [ 0 ] . content = `Before you formally answer the question, you have the option to search the web to get more context from the web.
100+ Please judge whether you need to search the Internet.
101+ If you need to search, please return the query word to be submitted to the search engine.
102+ If you do not need to search, the result will be empty.
103+ Please do not actually answer the question.
104+ Just wrap the result in <search_query></search_query>> and return it in plain text, such as <search_query>example search query</search_query> or <search_query></search_query>`
105+ const completion = await openai . chat . completions . create ( {
106+ model,
107+ messages,
108+ } )
109+ let searchQuery : string = completion . choices [ 0 ] . message . content
110+ const match = searchQuery . match ( / < s e a r c h _ q u e r y > ( [ \s \S ] * ) < \/ s e a r c h _ q u e r y > / i)
111+ if ( match )
112+ searchQuery = match [ 1 ] . trim ( )
113+ else
114+ searchQuery = ''
115+
116+ if ( searchQuery ) {
117+ await updateChatSearchQuery ( messageId , searchQuery )
118+
119+ const tvly = tavily ( { apiKey : searchConfig . options ?. apiKey } )
120+ const response = await tvly . search (
121+ searchQuery ,
122+ {
123+ includeRawContent : true ,
124+ timeout : 300 ,
125+ } ,
126+ )
127+
128+ const searchResult = JSON . stringify ( response )
129+ await updateChatSearchResult ( messageId , searchResult )
130+
131+ messages . push ( {
132+ role : 'user' ,
133+ content : `Additional information from web searche engine.
134+ search query: <search_query>${ searchQuery } </search_query>
135+ search result: <search_result>${ searchResult } </search_result>
136+ current time: <date>${ dayjs ( ) . format ( 'YYYY-MM-DD HH:mm:ss' ) } </date>` ,
137+ } )
138+ }
139+ }
140+
141+ messages [ 0 ] . content = systemMessage
142+
96143 // Create the chat completion with streaming
97144 const stream = await openai . chat . completions . create ( {
98145 model,
0 commit comments