From f94f14cce27e51827efd4ed99d8f670afd68bc5f Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 3 Jun 2025 19:43:54 +0800 Subject: [PATCH] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E6=94=B9=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E5=90=8D=E7=A7=B0=EF=BC=8C=E7=A7=BB=E9=99=A4chat=5Fmi?= =?UTF-8?q?nd=E5=A4=84=E7=90=86=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/070configexe.py | 4 --- .../info_processors/mind_processor.py | 3 +- .../info_processors/relationship_processor.py | 3 +- .../info_processors/self_processor.py | 3 +- .../working_memory_processor.py | 3 +- src/config/official_configs.py | 13 +++----- src/person_info/impression_update_task.py | 6 ++-- src/person_info/relationship_manager.py | 8 ++--- template/bot_config_template.toml | 30 +++++++------------ 9 files changed, 25 insertions(+), 48 deletions(-) diff --git a/scripts/070configexe.py b/scripts/070configexe.py index d66e857c0..7eaa6cefd 100644 --- a/scripts/070configexe.py +++ b/scripts/070configexe.py @@ -429,11 +429,9 @@ class ConfigEditor: # "model.normal_chat_1": "普通聊天:主要聊天模型", # "model.normal_chat_2": "普通聊天:次要聊天模型", # "model.focus_working_memory": "专注模式:工作记忆模型", - # "model.focus_chat_mind": "专注模式:聊天思考模型", # "model.focus_tool_use": "专注模式:工具调用模型", # "model.focus_planner": "专注模式:决策模型", # "model.focus_expressor": "专注模式:表达器模型", - # "model.focus_self_recognize": "专注模式:自我识别模型" # } # 获取当前节的名称 # current_section = ".".join(path[:-1]) # 去掉最后一个key @@ -494,11 +492,9 @@ class ConfigEditor: "model.normal_chat_1": "主要聊天模型", "model.normal_chat_2": "次要聊天模型", "model.focus_working_memory": "工作记忆模型", - "model.focus_chat_mind": "聊天规划模型", "model.focus_tool_use": "工具调用模型", "model.focus_planner": "决策模型", "model.focus_expressor": "表达器模型", - "model.focus_self_recognize": "自我识别模型", } section_trans = self.translations.get("sections", {}).get(full_section_path, {}) section_name = section_trans.get("name") or section_translations.get(full_section_path) or section diff --git a/src/chat/focus_chat/info_processors/mind_processor.py b/src/chat/focus_chat/info_processors/mind_processor.py index 9beee5ccb..39acc2eb9 100644 --- a/src/chat/focus_chat/info_processors/mind_processor.py +++ b/src/chat/focus_chat/info_processors/mind_processor.py @@ -68,8 +68,7 @@ class MindProcessor(BaseProcessor): self.subheartflow_id = subheartflow_id self.llm_model = LLMRequest( - model=global_config.model.focus_chat_mind, - # temperature=global_config.model.focus_chat_mind["temp"], + model=global_config.model.planner, max_tokens=800, request_type="focus.processor.chat_mind", ) diff --git a/src/chat/focus_chat/info_processors/relationship_processor.py b/src/chat/focus_chat/info_processors/relationship_processor.py index ec68b4ad3..79a97dd69 100644 --- a/src/chat/focus_chat/info_processors/relationship_processor.py +++ b/src/chat/focus_chat/info_processors/relationship_processor.py @@ -49,8 +49,7 @@ class RelationshipProcessor(BaseProcessor): self.subheartflow_id = subheartflow_id self.llm_model = LLMRequest( - model=global_config.model.focus_self_recognize, - temperature=global_config.model.focus_self_recognize["temp"], + model=global_config.model.relation, max_tokens=800, request_type="focus.processor.self_identify", ) diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py index 45666119a..e993c4f48 100644 --- a/src/chat/focus_chat/info_processors/self_processor.py +++ b/src/chat/focus_chat/info_processors/self_processor.py @@ -51,8 +51,7 @@ class SelfProcessor(BaseProcessor): self.subheartflow_id = subheartflow_id self.llm_model = LLMRequest( - model=global_config.model.focus_self_recognize, - temperature=global_config.model.focus_self_recognize["temp"], + model=global_config.model.relation, max_tokens=800, request_type="focus.processor.self_identify", ) diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/focus_chat/info_processors/working_memory_processor.py index c05826c82..d40b3c93b 100644 --- a/src/chat/focus_chat/info_processors/working_memory_processor.py +++ b/src/chat/focus_chat/info_processors/working_memory_processor.py @@ -60,8 +60,7 @@ class WorkingMemoryProcessor(BaseProcessor): self.subheartflow_id = subheartflow_id self.llm_model = LLMRequest( - model=global_config.model.focus_chat_mind, - temperature=global_config.model.focus_chat_mind["temp"], + model=global_config.model.planner, max_tokens=800, request_type="focus.processor.working_memory", ) diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 7436de970..a0b320757 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -165,7 +165,7 @@ class FocusChatConfig(ConfigBase): class FocusChatProcessorConfig(ConfigBase): """专注聊天处理器配置类""" - mind_processor: bool = True + mind_processor: bool = False """是否启用思维处理器""" self_identify_processor: bool = True @@ -180,9 +180,6 @@ class FocusChatProcessorConfig(ConfigBase): working_memory_processor: bool = True """是否启用工作记忆处理器""" - lite_chat_mind_processor: bool = False - """是否启用轻量级聊天思维处理器,可以节省token消耗和时间""" - @dataclass class ExpressionConfig(ConfigBase): @@ -445,11 +442,6 @@ class ModelConfig(ConfigBase): focus_working_memory: dict[str, Any] = field(default_factory=lambda: {}) """专注工作记忆模型配置""" - focus_chat_mind: dict[str, Any] = field(default_factory=lambda: {}) - """专注聊天规划模型配置""" - - focus_self_recognize: dict[str, Any] = field(default_factory=lambda: {}) - """专注自我识别模型配置""" focus_tool_use: dict[str, Any] = field(default_factory=lambda: {}) """专注工具使用模型配置""" @@ -457,6 +449,9 @@ class ModelConfig(ConfigBase): planner: dict[str, Any] = field(default_factory=lambda: {}) """规划模型配置""" + relation: dict[str, Any] = field(default_factory=lambda: {}) + """关系模型配置""" + focus_expressor: dict[str, Any] = field(default_factory=lambda: {}) """专注表达器模型配置""" diff --git a/src/person_info/impression_update_task.py b/src/person_info/impression_update_task.py index 4ab23b29e..c28be7b6d 100644 --- a/src/person_info/impression_update_task.py +++ b/src/person_info/impression_update_task.py @@ -17,8 +17,8 @@ class ImpressionUpdateTask(AsyncTask): def __init__(self): super().__init__( task_name="impression_update", - wait_before_start=10, # 启动后等待10秒 - run_interval=30 # 每1分钟运行一次 + wait_before_start=2, # 启动后等待10秒 + run_interval=20 # 每1分钟运行一次 ) async def run(self): @@ -27,7 +27,7 @@ class ImpressionUpdateTask(AsyncTask): # 获取最近10分钟的消息 current_time = int(time.time()) - start_time = current_time - 600 # 10分钟前 + start_time = current_time - 6000 # 10分钟前 logger.debug(f"获取时间范围: {start_time} -> {current_time}") # 获取所有消息 diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py index 4d2fb995d..b47e1a051 100644 --- a/src/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -320,19 +320,19 @@ class RelationshipManager: messages_before = get_raw_msg_by_timestamp_with_chat( chat_id=chat_id, - timestamp_start=timestamp - 600, # 前10分钟 + timestamp_start=timestamp - 6000, # 前10分钟 timestamp_end=timestamp, # person_ids=[user_id], - limit=200, + limit=100, limit_mode="latest" ) messages_after = get_raw_msg_by_timestamp_with_chat( chat_id=chat_id, timestamp_start=timestamp, - timestamp_end=timestamp + 600, # 后10分钟 + timestamp_end=timestamp + 6000, # 后10分钟 # person_ids=[user_id], - limit=200, + limit=100, limit_mode="earliest" ) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 489baeb78..667a4bd62 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -102,14 +102,15 @@ observation_context_size = 20 # 观察到的最长上下文大小 compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5 compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 +# 不建议更改 planner_type = "simple" # 规划器类型,可选值:complex(复杂规划器), simple(简单规划器) [focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗 -mind_processor = false # 是否启用思维处理器 self_identify_processor = true # 是否启用自我识别处理器 relation_processor = true # 是否启用关系识别处理器 tool_use_processor = false # 是否启用工具使用处理器 -working_memory_processor = false # 是否启用工作记忆处理器,不稳定,消耗量大 +mind_processor = false # 是否启用思维处理器 +working_memory_processor = false # 是否启用工作记忆处理器,消耗量大 [emoji] max_reg_num = 60 # 表情包最大注册数量 @@ -220,6 +221,13 @@ pri_in = 2 pri_out = 8 temp = 0.3 +[model.relation] #用于处理和麦麦和其他人的关系 +name = "Pro/deepseek-ai/DeepSeek-V3" +provider = "SILICONFLOW" +pri_in = 2 +pri_out = 8 +temp = 0.3 + #嵌入模型 [model.embedding] @@ -255,14 +263,6 @@ pri_in = 0.7 pri_out = 2.8 temp = 0.7 -[model.focus_chat_mind] #聊天规划:认真聊天时,生成麦麦对聊天的规划想法 -name = "Pro/deepseek-ai/DeepSeek-V3" -# name = "Qwen/Qwen3-30B-A3B" -provider = "SILICONFLOW" -# enable_thinking = false # 是否启用思考 -pri_in = 2 -pri_out = 8 -temp = 0.3 [model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型 name = "Qwen/Qwen3-14B" @@ -283,16 +283,6 @@ pri_in = 2 pri_out = 8 temp = 0.3 -#自我识别模型,用于自我认知和身份识别 -[model.focus_self_recognize] -# name = "Pro/deepseek-ai/DeepSeek-V3" -name = "Qwen/Qwen3-30B-A3B" -provider = "SILICONFLOW" -pri_in = 0.7 -pri_out = 2.8 -temp = 0.7 -enable_thinking = false # 是否启用思考(qwen3 only) - [maim_message]