Merge pull request #505 from UnCLAS-Prommer/main-fix

fix: 修正MongoDB数据库中模型名称与实际不匹配的问题
This commit is contained in:
SengokuCola
2025-03-21 08:13:00 +08:00
committed by GitHub
4 changed files with 10 additions and 9 deletions

View File

@@ -37,6 +37,7 @@ class ResponseGenerator:
self.model_r1_distill = LLM_request(model=global_config.llm_reasoning_minor, temperature=0.7, max_tokens=3000) self.model_r1_distill = LLM_request(model=global_config.llm_reasoning_minor, temperature=0.7, max_tokens=3000)
self.model_v25 = LLM_request(model=global_config.llm_normal_minor, temperature=0.7, max_tokens=3000) self.model_v25 = LLM_request(model=global_config.llm_normal_minor, temperature=0.7, max_tokens=3000)
self.current_model_type = "r1" # 默认使用 R1 self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model"
async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]: async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数""" """根据当前模型类型选择对应的生成函数"""
@@ -107,7 +108,7 @@ class ResponseGenerator:
# 生成回复 # 生成回复
try: try:
content, reasoning_content = await model.generate_response(prompt) content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
except Exception: except Exception:
logger.exception("生成回复时出错") logger.exception("生成回复时出错")
return None return None
@@ -144,7 +145,7 @@ class ResponseGenerator:
"chat_id": message.chat_stream.stream_id, "chat_id": message.chat_stream.stream_id,
"user": sender_name, "user": sender_name,
"message": message.processed_plain_text, "message": message.processed_plain_text,
"model": self.current_model_type, "model": self.current_model_name,
# 'reasoning_check': reasoning_content_check, # 'reasoning_check': reasoning_content_check,
# 'response_check': content_check, # 'response_check': content_check,
"reasoning": reasoning_content, "reasoning": reasoning_content,
@@ -174,7 +175,7 @@ class ResponseGenerator:
""" """
# 调用模型生成结果 # 调用模型生成结果
result, _ = await self.model_v25.generate_response(prompt) result, _, _ = await self.model_v25.generate_response(prompt)
result = result.strip() result = result.strip()
# 解析模型输出的结果 # 解析模型输出的结果
@@ -215,7 +216,7 @@ class InitiativeMessageGenerate:
topic_select_prompt, dots_for_select, prompt_template = prompt_builder._build_initiative_prompt_select( topic_select_prompt, dots_for_select, prompt_template = prompt_builder._build_initiative_prompt_select(
message.group_id message.group_id
) )
content_select, reasoning = self.model_v3.generate_response(topic_select_prompt) content_select, reasoning, _ = self.model_v3.generate_response(topic_select_prompt)
logger.debug(f"{content_select} {reasoning}") logger.debug(f"{content_select} {reasoning}")
topics_list = [dot[0] for dot in dots_for_select] topics_list = [dot[0] for dot in dots_for_select]
if content_select: if content_select:
@@ -226,7 +227,7 @@ class InitiativeMessageGenerate:
else: else:
return None return None
prompt_check, memory = prompt_builder._build_initiative_prompt_check(select_dot[1], prompt_template) prompt_check, memory = prompt_builder._build_initiative_prompt_check(select_dot[1], prompt_template)
content_check, reasoning_check = self.model_v3.generate_response(prompt_check) content_check, reasoning_check, _ = self.model_v3.generate_response(prompt_check)
logger.info(f"{content_check} {reasoning_check}") logger.info(f"{content_check} {reasoning_check}")
if "yes" not in content_check.lower(): if "yes" not in content_check.lower():
return None return None

View File

@@ -33,7 +33,7 @@ class TopicIdentifier:
消息内容:{text}""" 消息内容:{text}"""
# 使用 LLM_request 类进行请求 # 使用 LLM_request 类进行请求
topic, _ = await self.llm_topic_judge.generate_response(prompt) topic, _, _ = await self.llm_topic_judge.generate_response(prompt)
if not topic: if not topic:
logger.error("LLM API 返回为空") logger.error("LLM API 返回为空")

View File

@@ -522,11 +522,11 @@ class LLM_request:
return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"} return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 防止小朋友们截图自己的key # 防止小朋友们截图自己的key
async def generate_response(self, prompt: str) -> Tuple[str, str]: async def generate_response(self, prompt: str) -> Tuple[str, str, str]:
"""根据输入的提示生成模型的异步响应""" """根据输入的提示生成模型的异步响应"""
content, reasoning_content = await self._execute_request(endpoint="/chat/completions", prompt=prompt) content, reasoning_content = await self._execute_request(endpoint="/chat/completions", prompt=prompt)
return content, reasoning_content return content, reasoning_content, self.model_name
async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple[str, str]: async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple[str, str]:
"""根据输入的提示和图片生成模型的异步响应""" """根据输入的提示和图片生成模型的异步响应"""

View File

@@ -73,7 +73,7 @@ class ScheduleGenerator:
) )
try: try:
schedule_text, _ = await self.llm_scheduler.generate_response(prompt) schedule_text, _, _ = await self.llm_scheduler.generate_response(prompt)
db.schedule.insert_one({"date": date_str, "schedule": schedule_text}) db.schedule.insert_one({"date": date_str, "schedule": schedule_text})
self.enable_output = True self.enable_output = True
except Exception as e: except Exception as e: