diff --git a/.gitignore b/.gitignore index be33f58c2..01cc79858 100644 --- a/.gitignore +++ b/.gitignore @@ -160,4 +160,6 @@ test.* temp.* objdump* *.min.*.js -TODO \ No newline at end of file +TODO +experimental_mods +search_results diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 9e719a4e4..59337be57 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -341,7 +341,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith # 前者是API2D的结束条件,后者是OPENAI的结束条件 if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0): # 判定为数据流的结束,gpt_replying_buffer也写完了 - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) break # 处理数据流的主体 status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}" @@ -375,7 +375,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history): try: chunkjson = json.loads(response.content.decode()) gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"] - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/request_llms/bridge_chatgpt_vision.py b/request_llms/bridge_chatgpt_vision.py index dfa6e064c..585fcd5e6 100644 --- a/request_llms/bridge_chatgpt_vision.py +++ b/request_llms/bridge_chatgpt_vision.py @@ -184,7 +184,7 @@ def make_media_input(inputs, image_paths): # 判定为数据流的结束,gpt_replying_buffer也写完了 lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」" yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1) - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) break # 处理数据流的主体 status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}" diff --git a/request_llms/bridge_claude.py b/request_llms/bridge_claude.py index a08fadc8b..f0b25bacc 100644 --- a/request_llms/bridge_claude.py +++ b/request_llms/bridge_claude.py @@ -216,7 +216,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if need_to_pass: pass elif is_last_chunk: - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) # logger.info(f'[response] {gpt_replying_buffer}') break else: diff --git a/request_llms/bridge_cohere.py b/request_llms/bridge_cohere.py index f5ab5070e..2503fc997 100644 --- a/request_llms/bridge_cohere.py +++ b/request_llms/bridge_cohere.py @@ -223,7 +223,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith chatbot[-1] = (history[-2], history[-1]) yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面 if chunkjson['event_type'] == 'stream-end': - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面 diff --git a/request_llms/bridge_google_gemini.py b/request_llms/bridge_google_gemini.py index 3697b8fcc..45d3dc479 100644 --- a/request_llms/bridge_google_gemini.py +++ b/request_llms/bridge_google_gemini.py @@ -109,7 +109,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理 chatbot[-1] = (inputs, gpt_replying_buffer) history[-1] = gpt_replying_buffer - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) yield from update_ui(chatbot=chatbot, history=history) if error_match: history = history[-2] # 错误的不纳入对话 diff --git a/request_llms/bridge_moonshot.py b/request_llms/bridge_moonshot.py index e1b3cd484..a1a8ec2f6 100644 --- a/request_llms/bridge_moonshot.py +++ b/request_llms/bridge_moonshot.py @@ -166,7 +166,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith history = history[:-2] yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 break - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result, user_name=chatbot.get_user()) def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): diff --git a/request_llms/bridge_openrouter.py b/request_llms/bridge_openrouter.py index 10dfe57f8..f162fe6ce 100644 --- a/request_llms/bridge_openrouter.py +++ b/request_llms/bridge_openrouter.py @@ -337,7 +337,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith # 前者是API2D的结束条件,后者是OPENAI的结束条件 if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0): # 判定为数据流的结束,gpt_replying_buffer也写完了 - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) break # 处理数据流的主体 status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}" @@ -371,7 +371,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history): try: chunkjson = json.loads(response.content.decode()) gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"] - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user()) history[-1] = gpt_replying_buffer chatbot[-1] = (history[-2], history[-1]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 0a06545bb..e295b31e7 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -59,7 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user()) # 总结输出 if response == f"[Local Message] 等待{model_name}响应中 ...": response = f"[Local Message] {model_name}响应异常 ..." diff --git a/request_llms/bridge_taichu.py b/request_llms/bridge_taichu.py index 2c47363f6..e0f8db067 100644 --- a/request_llms/bridge_taichu.py +++ b/request_llms/bridge_taichu.py @@ -68,5 +68,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith chatbot[-1] = [inputs, response] yield from update_ui(chatbot=chatbot, history=history) history.extend([inputs, response]) - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user()) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py index e82229132..c282f59be 100644 --- a/request_llms/bridge_zhipu.py +++ b/request_llms/bridge_zhipu.py @@ -97,5 +97,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith chatbot[-1] = [inputs, response] yield from update_ui(chatbot=chatbot, history=history) history.extend([inputs, response]) - log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response) + log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user()) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 060bf8dc0..8da79f294 100644 --- a/toolbox.py +++ b/toolbox.py @@ -1029,7 +1029,7 @@ def check_repeat_upload(new_pdf_path, pdf_hash): # 如果所有页的内容都相同,返回 True return False, None -def log_chat(llm_model: str, input_str: str, output_str: str): +def log_chat(llm_model: str, input_str: str, output_str: str, user_name: str=default_user_name): try: if output_str and input_str and llm_model: uid = str(uuid.uuid4().hex) @@ -1038,8 +1038,8 @@ def log_chat(llm_model: str, input_str: str, output_str: str): logger.bind(chat_msg=True).info(dedent( """ ╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ - [UID] - {uid} + [UID/USER] + {uid}/{user_name} [Model] {llm_model} [Query] @@ -1047,6 +1047,6 @@ def log_chat(llm_model: str, input_str: str, output_str: str): [Response] {output_str} ╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ - """).format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str)) + """).format(uid=uid, user_name=user_name, llm_model=llm_model, input_str=input_str, output_str=output_str)) except: logger.error(trimmed_format_exc())