正确保存模型名称到Database
This commit is contained in:
@@ -37,6 +37,7 @@ class ResponseGenerator:
|
|||||||
self.model_r1_distill = LLM_request(model=global_config.llm_reasoning_minor, temperature=0.7, max_tokens=3000)
|
self.model_r1_distill = LLM_request(model=global_config.llm_reasoning_minor, temperature=0.7, max_tokens=3000)
|
||||||
self.model_v25 = LLM_request(model=global_config.llm_normal_minor, temperature=0.7, max_tokens=3000)
|
self.model_v25 = LLM_request(model=global_config.llm_normal_minor, temperature=0.7, max_tokens=3000)
|
||||||
self.current_model_type = "r1" # 默认使用 R1
|
self.current_model_type = "r1" # 默认使用 R1
|
||||||
|
self.current_model_name = "unknown model"
|
||||||
|
|
||||||
async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]:
|
async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]:
|
||||||
"""根据当前模型类型选择对应的生成函数"""
|
"""根据当前模型类型选择对应的生成函数"""
|
||||||
@@ -107,7 +108,7 @@ class ResponseGenerator:
|
|||||||
|
|
||||||
# 生成回复
|
# 生成回复
|
||||||
try:
|
try:
|
||||||
content, reasoning_content = await model.generate_response(prompt)
|
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("生成回复时出错")
|
logger.exception("生成回复时出错")
|
||||||
return None
|
return None
|
||||||
@@ -144,7 +145,7 @@ class ResponseGenerator:
|
|||||||
"chat_id": message.chat_stream.stream_id,
|
"chat_id": message.chat_stream.stream_id,
|
||||||
"user": sender_name,
|
"user": sender_name,
|
||||||
"message": message.processed_plain_text,
|
"message": message.processed_plain_text,
|
||||||
"model": self.current_model_type,
|
"model": self.current_model_name,
|
||||||
# 'reasoning_check': reasoning_content_check,
|
# 'reasoning_check': reasoning_content_check,
|
||||||
# 'response_check': content_check,
|
# 'response_check': content_check,
|
||||||
"reasoning": reasoning_content,
|
"reasoning": reasoning_content,
|
||||||
|
|||||||
@@ -526,7 +526,7 @@ class LLM_request:
|
|||||||
"""根据输入的提示生成模型的异步响应"""
|
"""根据输入的提示生成模型的异步响应"""
|
||||||
|
|
||||||
content, reasoning_content = await self._execute_request(endpoint="/chat/completions", prompt=prompt)
|
content, reasoning_content = await self._execute_request(endpoint="/chat/completions", prompt=prompt)
|
||||||
return content, reasoning_content
|
return content, reasoning_content, self.model_name
|
||||||
|
|
||||||
async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple[str, str]:
|
async def generate_response_for_image(self, prompt: str, image_base64: str, image_format: str) -> Tuple[str, str]:
|
||||||
"""根据输入的提示和图片生成模型的异步响应"""
|
"""根据输入的提示和图片生成模型的异步响应"""
|
||||||
|
|||||||
Reference in New Issue
Block a user