From e9bd3196ba1f6f393d6eb97ebd6d6df5c27cb388 Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Thu, 20 Mar 2025 16:47:50 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E6=AD=A3=E7=A1=AE=E4=BF=9D=E5=AD=98?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E5=90=8D=E7=A7=B0=E5=88=B0Database?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/llm_generator.py | 5 +++-- src/plugins/models/utils_model.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index bcd0b9e87..73cd12ed7 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -37,6 +37,7 @@ class ResponseGenerator: self.model_r1_distill = LLM_request(model=global_config.llm_reasoning_minor, temperature=0.7, max_tokens=3000) self.model_v25 = LLM_request(model=global_config.llm_normal_minor, temperature=0.7, max_tokens=3000) self.current_model_type = "r1" # 默认使用 R1 + self.current_model_name = "unknown model" async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]: """根据当前模型类型选择对应的生成函数""" @@ -107,7 +108,7 @@ class ResponseGenerator: # 生成回复 try: - content, reasoning_content = await model.generate_response(prompt) + content, reasoning_content, self.current_model_name = await model.generate_response(prompt) except Exception: logger.exception("生成回复时出错") return None @@ -144,7 +145,7 @@ class ResponseGenerator: "chat_id": message.chat_stream.stream_id, "user": sender_name, "message": message.processed_plain_text, - "model": self.current_model_type, + "model": self.current_model_name, # 'reasoning_check': reasoning_content_check, # 'response_check': content_check, "reasoning": reasoning_content, diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py index d915b3759..ba85a1bd2 100644 --- a/src/plugins/models/utils_model.py +++ b/src/plugins/models/utils_model.py @@ -526,7 +526,7 @@ class LLM_request: """根据输入的提示生成模型的异步响应""" content, reasoning_content = await self._execute_request(endpoint="/chat/completions", prompt=prompt) - return content, reasoning_content + return content, reasoning_content, self.model_name async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple[str, str]: """根据输入的提示和图片生成模型的异步响应""" From a9730575792db81b95afd97a105156f9df147ced Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Thu, 20 Mar 2025 17:08:53 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E5=AE=8C=E6=88=90=E5=85=B6=E4=BB=96?= =?UTF-8?q?=E9=83=A8=E5=88=86=E5=AF=B9=E8=BF=94=E5=9B=9E=E5=80=BC=E5=A4=84?= =?UTF-8?q?=E7=90=86=E7=9A=84=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/llm_generator.py | 6 +++--- src/plugins/chat/topic_identifier.py | 2 +- src/plugins/models/utils_model.py | 2 +- src/plugins/schedule/schedule_generator.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 73cd12ed7..80daa250b 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -175,7 +175,7 @@ class ResponseGenerator: """ # 调用模型生成结果 - result, _ = await self.model_v25.generate_response(prompt) + result, _, _ = await self.model_v25.generate_response(prompt) result = result.strip() # 解析模型输出的结果 @@ -216,7 +216,7 @@ class InitiativeMessageGenerate: topic_select_prompt, dots_for_select, prompt_template = prompt_builder._build_initiative_prompt_select( message.group_id ) - content_select, reasoning = self.model_v3.generate_response(topic_select_prompt) + content_select, reasoning, _ = self.model_v3.generate_response(topic_select_prompt) logger.debug(f"{content_select} {reasoning}") topics_list = [dot[0] for dot in dots_for_select] if content_select: @@ -227,7 +227,7 @@ class InitiativeMessageGenerate: else: return None prompt_check, memory = prompt_builder._build_initiative_prompt_check(select_dot[1], prompt_template) - content_check, reasoning_check = self.model_v3.generate_response(prompt_check) + content_check, reasoning_check, _ = self.model_v3.generate_response(prompt_check) logger.info(f"{content_check} {reasoning_check}") if "yes" not in content_check.lower(): return None diff --git a/src/plugins/chat/topic_identifier.py b/src/plugins/chat/topic_identifier.py index c87c37155..6e11bc9d7 100644 --- a/src/plugins/chat/topic_identifier.py +++ b/src/plugins/chat/topic_identifier.py @@ -33,7 +33,7 @@ class TopicIdentifier: 消息内容:{text}""" # 使用 LLM_request 类进行请求 - topic, _ = await self.llm_topic_judge.generate_response(prompt) + topic, _, _ = await self.llm_topic_judge.generate_response(prompt) if not topic: logger.error("LLM API 返回为空") diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py index ba85a1bd2..91e43fd4f 100644 --- a/src/plugins/models/utils_model.py +++ b/src/plugins/models/utils_model.py @@ -522,7 +522,7 @@ class LLM_request: return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"} # 防止小朋友们截图自己的key - async def generate_response(self, prompt: str) -> Tuple[str, str]: + async def generate_response(self, prompt: str) -> Tuple[str, str, str]: """根据输入的提示生成模型的异步响应""" content, reasoning_content = await self._execute_request(endpoint="/chat/completions", prompt=prompt) diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py index fe9f77b90..11db6664d 100644 --- a/src/plugins/schedule/schedule_generator.py +++ b/src/plugins/schedule/schedule_generator.py @@ -73,7 +73,7 @@ class ScheduleGenerator: ) try: - schedule_text, _ = await self.llm_scheduler.generate_response(prompt) + schedule_text, _, _ = await self.llm_scheduler.generate_response(prompt) db.schedule.insert_one({"date": date_str, "schedule": schedule_text}) self.enable_output = True except Exception as e: