fix: 修复表情动作模型调用并完善视频分析结果存储
- 表情动作: 将模型调用从 `planner` 切换到 `utils`,以使用更合适的模型进行表情推荐。 - 视频分析: 增加检查逻辑,仅当分析成功且结果不为错误提示时,才将结果存入数据库,防止存储无效记录。
This commit is contained in:
@@ -747,7 +747,7 @@ class VideoAnalyzer:
|
||||
os.unlink(temp_path)
|
||||
|
||||
# 保存分析结果到数据库(仅保存成功的结果)
|
||||
if success:
|
||||
if success and not result.startswith("❌"):
|
||||
metadata = {"filename": filename, "file_size": len(video_bytes), "analysis_timestamp": time.time()}
|
||||
self._store_video_result(video_hash=video_hash, description=result, metadata=metadata)
|
||||
logger.info("✅ 分析结果已保存到数据库")
|
||||
|
||||
@@ -152,10 +152,10 @@ class EmojiAction(BaseAction):
|
||||
|
||||
# 调用LLM
|
||||
models = llm_api.get_available_models()
|
||||
chat_model_config = models.get("planner")
|
||||
chat_model_config = models.get("utils")
|
||||
if not chat_model_config:
|
||||
logger.error(f"{self.log_prefix} 未找到'planner'模型配置,无法调用LLM")
|
||||
return False, "未找到'planner'模型配置"
|
||||
logger.error(f"{self.log_prefix} 未找到'utils'模型配置,无法调用LLM")
|
||||
return False, "未找到'utils'模型配置"
|
||||
|
||||
success, chosen_emotion, _, _ = await llm_api.generate_with_model(
|
||||
prompt, model_config=chat_model_config, request_type="emoji"
|
||||
@@ -212,10 +212,10 @@ class EmojiAction(BaseAction):
|
||||
|
||||
# 调用LLM
|
||||
models = llm_api.get_available_models()
|
||||
chat_model_config = models.get("planner")
|
||||
chat_model_config = models.get("utils")
|
||||
if not chat_model_config:
|
||||
logger.error(f"{self.log_prefix} 未找到'planner'模型配置,无法调用LLM")
|
||||
return False, "未找到'planner'模型配置"
|
||||
logger.error(f"{self.log_prefix} 未找到'utils'模型配置,无法调用LLM")
|
||||
return False, "未找到'utils'模型配置"
|
||||
|
||||
success, chosen_description, _, _ = await llm_api.generate_with_model(
|
||||
prompt, model_config=chat_model_config, request_type="emoji"
|
||||
|
||||
Reference in New Issue
Block a user