diff --git a/src/config/api_ada_configs.py b/src/config/api_ada_configs.py index 33fe8fa77..329244145 100644 --- a/src/config/api_ada_configs.py +++ b/src/config/api_ada_configs.py @@ -130,7 +130,8 @@ class ModelTaskConfig(ValidatedConfigBase): # 必需配置项 utils: TaskConfig = Field(..., description="组件模型配置") utils_small: TaskConfig = Field(..., description="组件小模型配置") - replyer: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置") + replyer: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置(群聊使用)") + replyer_private: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置(私聊使用)") maizone: TaskConfig = Field(..., description="maizone专用模型") emotion: TaskConfig = Field(..., description="情绪模型配置") vlm: TaskConfig = Field(..., description="视觉语言模型配置") diff --git a/src/plugins/built_in/kokoro_flow_chatter/chatter.py b/src/plugins/built_in/kokoro_flow_chatter/chatter.py index b2f8dd866..b14d5a3a7 100644 --- a/src/plugins/built_in/kokoro_flow_chatter/chatter.py +++ b/src/plugins/built_in/kokoro_flow_chatter/chatter.py @@ -206,7 +206,8 @@ class KokoroFlowChatter(BaseChatter): exec_results = [] has_reply = False - for action in plan_response.actions: + for idx, action in enumerate(plan_response.actions, 1): + logger.debug(f"[KFC] 执行第 {idx}/{len(plan_response.actions)} 个动作: {action.type}") action_data = action.params.copy() result = await self.action_manager.execute_action( @@ -218,6 +219,7 @@ class KokoroFlowChatter(BaseChatter): thinking_id=None, log_prefix="[KFC]", ) + logger.debug(f"[KFC] 动作 {action.type} 执行结果: success={result.get('success')}, reply_text={result.get('reply_text', '')[:50]}") exec_results.append(result) if result.get("success") and action.type in ("kfc_reply", "respond"): has_reply = True diff --git a/src/plugins/built_in/kokoro_flow_chatter/replyer.py b/src/plugins/built_in/kokoro_flow_chatter/replyer.py index 7d84eec0b..33dcaa597 100644 --- a/src/plugins/built_in/kokoro_flow_chatter/replyer.py +++ b/src/plugins/built_in/kokoro_flow_chatter/replyer.py @@ -61,12 +61,12 @@ async def generate_reply_text( if global_config and global_config.debug.show_prompt: logger.info(f"[KFC Replyer] 生成的回复提示词:\n{prompt}") - # 2. 获取 replyer 模型配置并调用 LLM + # 2. 获取 replyer_private 模型配置并调用 LLM(KFC私聊专用) models = llm_api.get_available_models() - replyer_config = models.get("replyer") + replyer_config = models.get("replyer_private") if not replyer_config: - logger.error("[KFC Replyer] 未找到 replyer 模型配置") + logger.error("[KFC Replyer] 未找到 replyer_private 模型配置") return False, "(回复生成失败:未找到模型配置)" success, raw_response, _reasoning, _model_name = await llm_api.generate_with_model( diff --git a/src/plugins/built_in/kokoro_flow_chatter/unified.py b/src/plugins/built_in/kokoro_flow_chatter/unified.py index 6b52c1494..a5dae1d0a 100644 --- a/src/plugins/built_in/kokoro_flow_chatter/unified.py +++ b/src/plugins/built_in/kokoro_flow_chatter/unified.py @@ -389,13 +389,13 @@ async def generate_unified_response( f"--- PROMPT END ---" ) - # 获取 replyer 模型配置并调用 LLM + # 获取 replyer_private 模型配置并调用 LLM(KFC私聊专用) models = llm_api.get_available_models() - replyer_config = models.get("replyer") + replyer_config = models.get("replyer_private") if not replyer_config: - logger.error("[KFC Unified] 未找到 replyer 模型配置") - return LLMResponse.create_error_response("未找到 replyer 模型配置") + logger.error("[KFC Unified] 未找到 replyer_private 模型配置") + return LLMResponse.create_error_response("未找到 replyer_private 模型配置") # 调用 LLM(使用合并后的提示词) success, raw_response, _reasoning, _model_name = await llm_api.generate_with_model( diff --git a/template/model_config_template.toml b/template/model_config_template.toml index cec804053..d5e828bdd 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.4.1" +version = "1.4.2" # 配置文件版本号迭代规则同bot_config.toml @@ -68,8 +68,8 @@ price_out = 8.0 # 输出价格(用于API调用统计,单 #enable_semantic_variants = false # [可选] 启用语义变体。作为一种扰动策略,生成语义上相似但表达不同的提示。默认为 false。 [[models]] -model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp" -name = "siliconflow-deepseek-ai/DeepSeek-V3.2-Exp" +model_identifier = "deepseek-ai/DeepSeek-V3." +name = "siliconflow-deepseek-ai/DeepSeek-V3.2" api_provider = "SiliconFlow" price_in = 2.0 price_out = 8.0 @@ -170,7 +170,7 @@ thinking_budget = 256 # Gemini2.5系列旧版参数,不同模型范围 #price_out = 0.0 [model_task_config.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,是麦麦必须的模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] # 使用的模型列表,每个子项对应上面的模型名称(name) +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] # 使用的模型列表,每个子项对应上面的模型名称(name) temperature = 0.2 # 模型温度,新V3建议0.1-0.3 max_tokens = 800 # 最大输出token数 #concurrency_count = 2 # 并发请求数量,默认为1(不并发),设置为2或更高启用并发 @@ -180,29 +180,34 @@ model_list = ["qwen3-8b"] temperature = 0.7 max_tokens = 800 -[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +[model_task_config.replyer] # 首要回复模型(群聊使用),还用于表达器和表达方式学习 +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] +temperature = 0.2 # 模型温度,新V3建议0.1-0.3 +max_tokens = 800 + +[model_task_config.replyer_private] # 私聊回复模型(KFC私聊专用) +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] # 可以配置不同的模型用于私聊 temperature = 0.2 # 模型温度,新V3建议0.1-0.3 max_tokens = 800 [model_task_config.planner] #决策:负责决定麦麦该做什么的模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.3 max_tokens = 800 [model_task_config.emotion] #负责麦麦的情绪变化 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.3 max_tokens = 800 [model_task_config.mood] #负责麦麦的心情变化 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.3 max_tokens = 800 [model_task_config.maizone] # maizone模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.7 max_tokens = 800 @@ -229,22 +234,22 @@ temperature = 0.7 max_tokens = 800 [model_task_config.schedule_generator]#日程表生成模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.7 max_tokens = 1000 [model_task_config.anti_injection] # 反注入检测专用模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] # 使用快速的小模型进行检测 +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] # 使用快速的小模型进行检测 temperature = 0.1 # 低温度确保检测结果稳定 max_tokens = 200 # 检测结果不需要太长的输出 [model_task_config.monthly_plan_generator] # 月层计划生成模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.7 max_tokens = 1000 [model_task_config.relationship_tracker] # 用户关系追踪模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.7 max_tokens = 1000 @@ -258,12 +263,12 @@ embedding_dimension = 1024 #------------LPMM知识库模型------------ [model_task_config.lpmm_entity_extract] # 实体提取模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.2 max_tokens = 800 [model_task_config.lpmm_rdf_build] # RDF构建模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.2 max_tokens = 800 @@ -285,7 +290,7 @@ temperature = 0.2 max_tokens = 1000 [model_task_config.memory_long_term_builder] # 长期记忆构建模型(短期→长期图结构) -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2"] temperature = 0.2 max_tokens = 1500