From 12a88dcd994b98489847fb3be19a5a1c44796bed Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 2 Jun 2025 12:43:36 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E5=85=81=E8=AE=B8=E5=85=B3?= =?UTF-8?q?=E9=97=AD=E8=81=8A=E5=A4=A9=E8=A7=84=E5=88=92=E5=A4=84=E7=90=86?= =?UTF-8?q?=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 2 +- src/chat/focus_chat/info_processors/mind_processor.py | 6 ++---- src/config/official_configs.py | 3 +++ template/bot_config_template.toml | 3 ++- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 2fef2a448..7988f860a 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -40,7 +40,7 @@ install(extra_lines=3) # 如果配置键名为 None,则该处理器默认启用且不能通过 focus_chat_processor 配置禁用 PROCESSOR_CLASSES = { "ChattingInfoProcessor": (ChattingInfoProcessor, None), - "MindProcessor": (MindProcessor, None), + "MindProcessor": (MindProcessor, "mind_processor"), "ToolProcessor": (ToolProcessor, "tool_use_processor"), "WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"), "SelfProcessor": (SelfProcessor, "self_identify_processor"), diff --git a/src/chat/focus_chat/info_processors/mind_processor.py b/src/chat/focus_chat/info_processors/mind_processor.py index 910b5c759..d930065a6 100644 --- a/src/chat/focus_chat/info_processors/mind_processor.py +++ b/src/chat/focus_chat/info_processors/mind_processor.py @@ -23,7 +23,6 @@ logger = get_logger("processor") def init_prompt(): group_prompt = """ -你的名字是{bot_name} {memory_str}{extra_info}{relation_prompt} {cycle_info_block} 现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: @@ -37,9 +36,8 @@ def init_prompt(): 现在请你继续输出观察和规划,输出要求: 1. 先关注未读新消息的内容和近期回复历史 2. 根据新信息,修改和删除之前的观察和规划 -3. 根据聊天内容继续输出观察和规划 -4. 注意群聊的时间线索,话题由谁发起,进展状况如何,思考聊天的时间线。 -6. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出思考内容就好""" +3. 注意群聊的时间线索,话题由谁发起,进展状况如何。 +4. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出内容就好""" Prompt(group_prompt, "sub_heartflow_prompt_before") private_prompt = """ diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 274ec99e6..3ec6c6884 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -157,6 +157,9 @@ class FocusChatConfig(ConfigBase): @dataclass class FocusChatProcessorConfig(ConfigBase): """专注聊天处理器配置类""" + + mind_processor: bool = True + """是否启用思维处理器""" self_identify_processor: bool = True """是否启用自我识别处理器""" diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 439a6e121..d4cdbd15d 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "2.7.0" +version = "2.8.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -103,6 +103,7 @@ compressed_length = 8 # 不能大于observation_context_size,心流上下文压 compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 [focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗 +mind_processor = false # 是否启用思维处理器 self_identify_processor = true # 是否启用自我识别处理器 tool_use_processor = false # 是否启用工具使用处理器 working_memory_processor = false # 是否启用工作记忆处理器,不稳定,消耗量大