diff --git a/src/chat/utils/utils_video.py b/src/chat/utils/utils_video.py index 1e186f058..c78acd89a 100644 --- a/src/chat/utils/utils_video.py +++ b/src/chat/utils/utils_video.py @@ -747,7 +747,7 @@ class VideoAnalyzer: os.unlink(temp_path) # 保存分析结果到数据库(仅保存成功的结果) - if success: + if success and not result.startswith("❌"): metadata = {"filename": filename, "file_size": len(video_bytes), "analysis_timestamp": time.time()} self._store_video_result(video_hash=video_hash, description=result, metadata=metadata) logger.info("✅ 分析结果已保存到数据库") diff --git a/src/plugins/built_in/core_actions/emoji.py b/src/plugins/built_in/core_actions/emoji.py index fe03f4478..69a236159 100644 --- a/src/plugins/built_in/core_actions/emoji.py +++ b/src/plugins/built_in/core_actions/emoji.py @@ -152,10 +152,10 @@ class EmojiAction(BaseAction): # 调用LLM models = llm_api.get_available_models() - chat_model_config = models.get("planner") + chat_model_config = models.get("utils") if not chat_model_config: - logger.error(f"{self.log_prefix} 未找到'planner'模型配置,无法调用LLM") - return False, "未找到'planner'模型配置" + logger.error(f"{self.log_prefix} 未找到'utils'模型配置,无法调用LLM") + return False, "未找到'utils'模型配置" success, chosen_emotion, _, _ = await llm_api.generate_with_model( prompt, model_config=chat_model_config, request_type="emoji" @@ -212,10 +212,10 @@ class EmojiAction(BaseAction): # 调用LLM models = llm_api.get_available_models() - chat_model_config = models.get("planner") + chat_model_config = models.get("utils") if not chat_model_config: - logger.error(f"{self.log_prefix} 未找到'planner'模型配置,无法调用LLM") - return False, "未找到'planner'模型配置" + logger.error(f"{self.log_prefix} 未找到'utils'模型配置,无法调用LLM") + return False, "未找到'utils'模型配置" success, chosen_description, _, _ = await llm_api.generate_with_model( prompt, model_config=chat_model_config, request_type="emoji"