From 178feb3c5a28cff176ea01b54fc1c52254127643 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 3 Jun 2025 15:54:55 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E6=96=B0=E5=A2=9E=E5=85=B3?= =?UTF-8?q?=E7=B3=BB=E5=A4=84=E7=90=86=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 4 +- src/chat/focus_chat/info/relation_info.py | 40 ++++ .../info_processors/mind_processor.py | 4 +- .../info_processors/relationship_processor.py | 175 ++++++++++++++++++ .../info_processors/self_processor.py | 42 ++--- .../info_processors/tool_processor.py | 3 +- .../planners/actions/emoji_action.py | 2 +- .../focus_chat/planners/planner_simple.py | 28 ++- src/chat/normal_chat/normal_chat.py | 30 +-- src/chat/normal_chat/normal_chat_planner.py | 2 +- src/config/official_configs.py | 3 + src/main.py | 4 - template/bot_config_template.toml | 15 +- 13 files changed, 279 insertions(+), 73 deletions(-) create mode 100644 src/chat/focus_chat/info/relation_info.py create mode 100644 src/chat/focus_chat/info_processors/relationship_processor.py diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 2ebc5e25b..2e1747fc5 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -14,6 +14,7 @@ from src.chat.heart_flow.observation.observation import Observation from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail from src.chat.focus_chat.info.info_base import InfoBase from src.chat.focus_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor +from src.chat.focus_chat.info_processors.relationship_processor import RelationshipProcessor from src.chat.focus_chat.info_processors.mind_processor import MindProcessor from src.chat.focus_chat.info_processors.working_memory_processor import WorkingMemoryProcessor @@ -45,6 +46,7 @@ PROCESSOR_CLASSES = { "ToolProcessor": (ToolProcessor, "tool_use_processor"), "WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"), "SelfProcessor": (SelfProcessor, "self_identify_processor"), + "RelationshipProcessor": (RelationshipProcessor, "relationship_processor"), } logger = get_logger("hfc") # Logger Name Changed @@ -194,7 +196,7 @@ class HeartFChatting: if processor_info: processor_actual_class = processor_info[0] # 获取实际的类定义 # 根据处理器类名判断是否需要 subheartflow_id - if name in ["MindProcessor", "ToolProcessor", "WorkingMemoryProcessor", "SelfProcessor"]: + if name in ["MindProcessor", "ToolProcessor", "WorkingMemoryProcessor", "SelfProcessor", "RelationshipProcessor"]: self.processors.append(processor_actual_class(subheartflow_id=self.stream_id)) elif name == "ChattingInfoProcessor": self.processors.append(processor_actual_class()) diff --git a/src/chat/focus_chat/info/relation_info.py b/src/chat/focus_chat/info/relation_info.py new file mode 100644 index 000000000..0e4ea9533 --- /dev/null +++ b/src/chat/focus_chat/info/relation_info.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class RelationInfo(InfoBase): + """关系信息类 + + 用于存储和管理当前关系状态的信息。 + + Attributes: + type (str): 信息类型标识符,默认为 "relation" + data (Dict[str, Any]): 包含 current_relation 的数据字典 + """ + + type: str = "relation" + + def get_relation_info(self) -> str: + """获取当前关系状态 + + Returns: + str: 当前关系状态 + """ + return self.get_info("relation_info") or "" + + def set_relation_info(self, relation_info: str) -> None: + """设置当前关系状态 + + Args: + relation_info: 要设置的关系状态 + """ + self.data["relation_info"] = relation_info + + def get_processed_info(self) -> str: + """获取处理后的信息 + + Returns: + str: 处理后的信息 + """ + return self.get_relation_info() or "" diff --git a/src/chat/focus_chat/info_processors/mind_processor.py b/src/chat/focus_chat/info_processors/mind_processor.py index d930065a6..9beee5ccb 100644 --- a/src/chat/focus_chat/info_processors/mind_processor.py +++ b/src/chat/focus_chat/info_processors/mind_processor.py @@ -23,7 +23,7 @@ logger = get_logger("processor") def init_prompt(): group_prompt = """ -{memory_str}{extra_info}{relation_prompt} +{extra_info}{relation_prompt} {cycle_info_block} 现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: {chat_observe_info} @@ -42,7 +42,7 @@ def init_prompt(): private_prompt = """ 你的名字是{bot_name} -{memory_str}{extra_info}{relation_prompt} +{extra_info}{relation_prompt} {cycle_info_block} 现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: {chat_observe_info} diff --git a/src/chat/focus_chat/info_processors/relationship_processor.py b/src/chat/focus_chat/info_processors/relationship_processor.py new file mode 100644 index 000000000..95628d4b3 --- /dev/null +++ b/src/chat/focus_chat/info_processors/relationship_processor.py @@ -0,0 +1,175 @@ +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.heart_flow.observation.observation import Observation +from src.llm_models.utils_model import LLMRequest +from src.config.config import global_config +import time +import traceback +from src.common.logger_manager import get_logger +from src.individuality.individuality import individuality +from src.chat.utils.prompt_builder import Prompt, global_prompt_manager +from src.chat.message_receive.chat_stream import chat_manager +from src.person_info.relationship_manager import relationship_manager +from .base_processor import BaseProcessor +from typing import List, Optional +from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation +from typing import Dict +from src.chat.focus_chat.info.info_base import InfoBase +from src.chat.focus_chat.info.relation_info import RelationInfo + +logger = get_logger("processor") + + +def init_prompt(): + relationship_prompt = """ +{name_block} +你是一个AI,但是你伪装成了一个人类,你不想让别人知道你是一个AI + +{relation_prompt} +现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: +{chat_observe_info} + +现在请你根据现有的信息,总结你和群里的人的关系 +1. 当聊天记录中提到你时,请输出你和这个人之间的关系 +2. 当聊天记录中提到其他人时,请输出你和这个人之间的关系 + +输出内容平淡一些,说中文。 +请注意不要输出多余内容(包括前后缀,括号(),表情包,at或 @等 )。只输出关系内容,记得明确说明这是你的关系。 + +""" + Prompt(relationship_prompt, "relationship_prompt") + + +class RelationshipProcessor(BaseProcessor): + log_prefix = "关系" + + def __init__(self, subheartflow_id: str): + super().__init__() + + self.subheartflow_id = subheartflow_id + + self.llm_model = LLMRequest( + model=global_config.model.focus_self_recognize, + temperature=global_config.model.focus_self_recognize["temp"], + max_tokens=800, + request_type="focus.processor.self_identify", + ) + + name = chat_manager.get_stream_name(self.subheartflow_id) + self.log_prefix = f"[{name}] " + + async def process_info( + self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos + ) -> List[InfoBase]: + """处理信息对象 + + Args: + *infos: 可变数量的InfoBase类型的信息对象 + + Returns: + List[InfoBase]: 处理后的结构化信息列表 + """ + relation_info_str = await self.relation_identify(observations) + + if relation_info_str: + relation_info = RelationInfo() + relation_info.set_relation_info(relation_info_str) + else: + relation_info = None + return None + + return [relation_info] + + async def relation_identify( + self, observations: Optional[List[Observation]] = None, + ): + """ + 在回复前进行思考,生成内心想法并收集工具调用结果 + + 参数: + observations: 观察信息 + + 返回: + 如果return_prompt为False: + tuple: (current_mind, past_mind) 当前想法和过去的想法列表 + 如果return_prompt为True: + tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt + """ + + for observation in observations: + if isinstance(observation, ChattingObservation): + is_group_chat = observation.is_group_chat + chat_target_info = observation.chat_target_info + chat_target_name = "对方" # 私聊默认名称 + person_list = observation.person_list + + relation_prompt = "" + for person in person_list: + if len(person) >= 3 and person[0] and person[1]: + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) + + if observations is None: + observations = [] + for observation in observations: + if isinstance(observation, ChattingObservation): + # 获取聊天元信息 + is_group_chat = observation.is_group_chat + chat_target_info = observation.chat_target_info + chat_target_name = "对方" # 私聊默认名称 + if not is_group_chat and chat_target_info: + # 优先使用person_name,其次user_nickname,最后回退到默认值 + chat_target_name = ( + chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name + ) + # 获取聊天内容 + chat_observe_info = observation.get_observe_info() + person_list = observation.person_list + if isinstance(observation, HFCloopObservation): + # hfcloop_observe_info = observation.get_observe_info() + pass + + nickname_str = "" + for nicknames in global_config.bot.alias_names: + nickname_str += f"{nicknames}," + name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" + + if is_group_chat: + relation_prompt_init = "在这个群聊中,你:\n" + else: + relation_prompt_init = "" + for person in person_list: + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) + if relation_prompt: + relation_prompt = relation_prompt_init + relation_prompt + else: + relation_prompt = relation_prompt_init + "没有特别在意的人\n" + + prompt = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format( + name_block=name_block, + relation_prompt=relation_prompt, + time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), + chat_observe_info=chat_observe_info, + ) + + # print(prompt) + + content = "" + try: + content, _ = await self.llm_model.generate_response_async(prompt=prompt) + if not content: + logger.warning(f"{self.log_prefix} LLM返回空结果,关系识别失败。") + except Exception as e: + # 处理总体异常 + logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}") + logger.error(traceback.format_exc()) + content = "关系识别过程中出现错误" + + if content == "None": + content = "" + # 记录初步思考结果 + logger.debug(f"{self.log_prefix} 关系识别prompt: \n{prompt}\n") + logger.info(f"{self.log_prefix} 关系识别: {content}") + + return content + + +init_prompt() diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py index 72562ad59..d53c7f48c 100644 --- a/src/chat/focus_chat/info_processors/self_processor.py +++ b/src/chat/focus_chat/info_processors/self_processor.py @@ -22,23 +22,21 @@ logger = get_logger("processor") def init_prompt(): indentify_prompt = """ {name_block} -你是一个AI,但是你伪装成了一个人类,你的人格是,{prompt_personality} +你是一个AI,但是你伪装成了一个人类,不要让别人发现这一点 +请参考以下人格,不要被当前聊天内容中的内容误导: +{prompt_personality} {indentify_block} -{relation_prompt}{memory_str} -现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: +以下是正在进行的聊天内容: +现在是{time_now},你正在参与聊天 {chat_observe_info} -现在请你根据现有的信息,思考自我认同:请严格遵守以下规则 -1. 请严格参考最上方的人设,适当参考记忆和当前聊天内容,不要被记忆和当前聊天内容中相反的内容误导 -2. 你是一个什么样的人,你和群里的人关系如何 -3. 你的形象是什么 -4. 思考有没有人提到你,或者图片与你有关 -5. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十几个字的简短自我认同 -6. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景 +现在请你输出对自己的描述:请严格遵守以下规则 +1. 根据聊天记录,输出与聊天记录相关的自我描述,包括人格,形象等等,对人格形象进行精简 +2. 思考有没有内容与你的描述相关 +3. 如果没有明显相关内容,请输出十几个字的简短自我描述 -输出内容平淡一些,说中文,不要浮夸,平淡一些。 -请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出自我认同内容,记得明确说明这是你的自我认同。 +现在请输出你的自我描述,请注意不要输出多余内容(包括前后缀,括号(),表情包,at或 @等 ): """ Prompt(indentify_prompt, "indentify_prompt") @@ -107,11 +105,6 @@ class SelfProcessor(BaseProcessor): chat_target_name = "对方" # 私聊默认名称 person_list = observation.person_list - memory_str = "" - if running_memorys: - memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" - for running_memory in running_memorys: - memory_str += f"{running_memory['topic']}: {running_memory['content']}\n" relation_prompt = "" for person in person_list: @@ -146,23 +139,10 @@ class SelfProcessor(BaseProcessor): personality_block = individuality.get_personality_prompt(x_person=2, level=2) identity_block = individuality.get_identity_prompt(x_person=2, level=2) - if is_group_chat: - relation_prompt_init = "在这个群聊中,你:\n" - else: - relation_prompt_init = "" - for person in person_list: - relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) - if relation_prompt: - relation_prompt = relation_prompt_init + relation_prompt - else: - relation_prompt = relation_prompt_init + "没有特别在意的人\n" - prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format( name_block=name_block, prompt_personality=personality_block, indentify_block=identity_block, - memory_str=memory_str, - relation_prompt=relation_prompt, time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), chat_observe_info=chat_observe_info, ) @@ -183,7 +163,7 @@ class SelfProcessor(BaseProcessor): if content == "None": content = "" # 记录初步思考结果 - # logger.debug(f"{self.log_prefix} 自我识别prompt: \n{prompt}\n") + logger.debug(f"{self.log_prefix} 自我识别prompt: \n{prompt}\n") logger.info(f"{self.log_prefix} 自我认知: {content}") return content diff --git a/src/chat/focus_chat/info_processors/tool_processor.py b/src/chat/focus_chat/info_processors/tool_processor.py index 46c2657d5..5e26829f3 100644 --- a/src/chat/focus_chat/info_processors/tool_processor.py +++ b/src/chat/focus_chat/info_processors/tool_processor.py @@ -23,7 +23,6 @@ def init_prompt(): # 添加工具执行器提示词 tool_executor_prompt = """ 你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。 -{memory_str} 群里正在进行的聊天内容: {chat_observe_info} @@ -33,7 +32,7 @@ def init_prompt(): 3. 是否有明确的工具使用指令 4. 考虑用户与你的关系以及当前的对话氛围 -如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。 +If you need to use a tool, please directly call the corresponding tool function. If you do not need to use any tool, simply output "No tool needed". """ Prompt(tool_executor_prompt, "tool_executor_prompt") diff --git a/src/chat/focus_chat/planners/actions/emoji_action.py b/src/chat/focus_chat/planners/actions/emoji_action.py index 68da4b1d8..dac6ca4d9 100644 --- a/src/chat/focus_chat/planners/actions/emoji_action.py +++ b/src/chat/focus_chat/planners/actions/emoji_action.py @@ -19,7 +19,7 @@ class EmojiAction(BaseAction): """ action_name: str = "emoji" - action_description: str = "当你想发送一个表情辅助你的回复表达" + action_description: str = "当你想单独发送一个表情辅助你的回复表达" action_parameters: dict[str:str] = { "description": "文字描述你想要发送的表情", } diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/focus_chat/planners/planner_simple.py index 60818a2f4..a99c80f47 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/focus_chat/planners/planner_simple.py @@ -11,6 +11,7 @@ from src.chat.focus_chat.info.mind_info import MindInfo from src.chat.focus_chat.info.action_info import ActionInfo from src.chat.focus_chat.info.structured_info import StructuredInfo from src.chat.focus_chat.info.self_info import SelfInfo +from src.chat.focus_chat.info.relation_info import RelationInfo from src.common.logger_manager import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.individuality.individuality import individuality @@ -29,6 +30,7 @@ def init_prompt(): """ 你的自我认知是: {self_info_block} +{relation_info_block} {extra_info_block} {memory_str} @@ -65,7 +67,7 @@ def init_prompt(): 描述:{action_description} {action_parameters} 使用该动作的场景: - {action_require}""", +{action_require}""", "action_prompt", ) @@ -122,6 +124,7 @@ class ActionPlanner(BasePlanner): observed_messages_str = "" chat_type = "group" is_group_chat = True + relation_info = "" for info in all_plan_info: if isinstance(info, ObsInfo): observed_messages = info.get_talking_message() @@ -134,6 +137,8 @@ class ActionPlanner(BasePlanner): cycle_info = info.get_observe_info() elif isinstance(info, SelfInfo): self_info = info.get_processed_info() + elif isinstance(info, RelationInfo): + relation_info = info.get_processed_info() elif isinstance(info, StructuredInfo): structured_info = info.get_processed_info() else: @@ -164,6 +169,7 @@ class ActionPlanner(BasePlanner): # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- prompt = await self.build_planner_prompt( self_info_block=self_info, + relation_info_block=relation_info, is_group_chat=is_group_chat, # <-- Pass HFC state chat_target_info=None, observed_messages_str=observed_messages_str, # <-- Pass local variable @@ -280,6 +286,7 @@ class ActionPlanner(BasePlanner): async def build_planner_prompt( self, self_info_block: str, + relation_info_block: str, is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument observed_messages_str: str, @@ -292,13 +299,17 @@ class ActionPlanner(BasePlanner): ) -> str: """构建 Planner LLM 的提示词 (获取模板并填充数据)""" try: + + if relation_info_block: + relation_info_block = f"以下是你和别人的关系描述:\n{relation_info_block}" + else: + relation_info_block = "" + memory_str = "" - if global_config.focus_chat.parallel_processing: - memory_str = "" - if running_memorys: - memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" - for running_memory in running_memorys: - memory_str += f"{running_memory['content']}\n" + if running_memorys: + memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" + for running_memory in running_memorys: + memory_str += f"{running_memory['content']}\n" chat_context_description = "你现在正在一个群聊中" chat_target_name = None # Only relevant for private @@ -338,7 +349,7 @@ class ActionPlanner(BasePlanner): require_text = "" for require_item in using_actions_info["require"]: - require_text += f" - {require_item}\n" + require_text += f"{require_item}\n" if param_text: param_text = f"参数:\n{param_text}" @@ -369,6 +380,7 @@ class ActionPlanner(BasePlanner): planner_prompt_template = await global_prompt_manager.get_prompt_async("simple_planner_prompt") prompt = planner_prompt_template.format( + relation_info_block=relation_info_block, self_info_block=self_info_block, memory_str=memory_str, time_block=time_block, diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 1972d31c8..d1437c79b 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -192,19 +192,19 @@ class NormalChat: await message_manager.add_message(bot_message) # 改为实例方法 (虽然它只用 message.chat_stream, 但逻辑上属于实例) - async def _update_relationship(self, message: MessageRecv, response_set): - """更新关系情绪""" - ori_response = ",".join(response_set) - stance, emotion = await self.gpt._get_emotion_tags(ori_response, message.processed_plain_text) - user_info = message.message_info.user_info - platform = user_info.platform - await relationship_manager.calculate_update_relationship_value( - user_info, - platform, - label=emotion, - stance=stance, # 使用 self.chat_stream - ) - self.mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor) + # async def _update_relationship(self, message: MessageRecv, response_set): + # """更新关系情绪""" + # ori_response = ",".join(response_set) + # stance, emotion = await self.gpt._get_emotion_tags(ori_response, message.processed_plain_text) + # user_info = message.message_info.user_info + # platform = user_info.platform + # await relationship_manager.calculate_update_relationship_value( + # user_info, + # platform, + # label=emotion, + # stance=stance, # 使用 self.chat_stream + # ) + # self.mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor) async def _reply_interested_message(self) -> None: """ @@ -452,8 +452,8 @@ class NormalChat: with Timer("处理表情包", timing_results): await self._handle_emoji(message, response_set[0]) - with Timer("关系更新", timing_results): - await self._update_relationship(message, response_set) + # with Timer("关系更新", timing_results): + # await self._update_relationship(message, response_set) # 回复后处理 await willing_manager.after_generate_reply_handle(message.message_info.message_id) diff --git a/src/chat/normal_chat/normal_chat_planner.py b/src/chat/normal_chat/normal_chat_planner.py index 634bf5c77..9c8a70221 100644 --- a/src/chat/normal_chat/normal_chat_planner.py +++ b/src/chat/normal_chat/normal_chat_planner.py @@ -71,7 +71,7 @@ class NormalChatPlanner: self.log_prefix = log_prefix # LLM规划器配置 self.planner_llm = LLMRequest( - model=global_config.model.normal_chat_2, + model=global_config.model.planner, max_tokens=1000, request_type="normal_chat.planner", # 用于normal_chat动作规划 ) diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 347d8f2d0..7436de970 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -171,6 +171,9 @@ class FocusChatProcessorConfig(ConfigBase): self_identify_processor: bool = True """是否启用自我识别处理器""" + relation_processor: bool = True + """是否启用关系识别处理器""" + tool_use_processor: bool = True """是否启用工具使用处理器""" diff --git a/src/main.py b/src/main.py index 5680e5525..afb42a8a9 100644 --- a/src/main.py +++ b/src/main.py @@ -72,10 +72,6 @@ class MainSystem: # 添加情绪打印任务 await async_task_manager.add_task(MoodPrintTask()) - # 检查并清除person_info冗余字段,启动个人习惯推断 - # await person_info_manager.del_all_undefined_field() - asyncio.create_task(person_info_manager.personal_habit_deduction()) - # 启动愿望管理器 await willing_manager.async_task_starter() diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index e26e2dfb2..489baeb78 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "2.11.1" +version = "2.12.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -96,9 +96,7 @@ talk_frequency_down_groups = [] #降低回复频率的群号码 think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗 consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高 -parallel_processing = true # 是否并行处理回忆和处理器阶段,可以节省时间 - -processor_max_time = 25 # 处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止 +processor_max_time = 20 # 处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止 observation_context_size = 20 # 观察到的最长上下文大小 compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5 @@ -109,13 +107,14 @@ planner_type = "simple" # 规划器类型,可选值:complex(复杂规划 [focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗 mind_processor = false # 是否启用思维处理器 self_identify_processor = true # 是否启用自我识别处理器 +relation_processor = true # 是否启用关系识别处理器 tool_use_processor = false # 是否启用工具使用处理器 working_memory_processor = false # 是否启用工作记忆处理器,不稳定,消耗量大 [emoji] -max_reg_num = 40 # 表情包最大注册数量 +max_reg_num = 60 # 表情包最大注册数量 do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包 -check_interval = 120 # 检查表情包(注册,破损,删除)的时间间隔(分钟) +check_interval = 10 # 检查表情包(注册,破损,删除)的时间间隔(分钟) steal_emoji = true # 是否偷取表情包,让麦麦可以将一些表情包据为己有 content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存 filtration_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存 @@ -123,7 +122,7 @@ filtration_prompt = "符合公序良俗" # 表情包过滤要求,只有符合 [memory] memory_build_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多 memory_build_distribution = [6.0, 3.0, 0.6, 32.0, 12.0, 0.4] # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重 -memory_build_sample_num = 8 # 采样数量,数值越高记忆采样次数越多 +memory_build_sample_num = 6 # 采样数量,数值越高记忆采样次数越多 memory_build_sample_length = 40 # 采样长度,数值越高一段记忆内容越丰富 memory_compress_rate = 0.1 # 记忆压缩率 控制记忆精简程度 建议保持默认,调高可以获得更多信息,但是冗余信息也会增多 @@ -131,7 +130,7 @@ forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低, memory_forget_time = 24 #多长时间后的记忆会被遗忘 单位小时 memory_forget_percentage = 0.01 # 记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认 -consolidate_memory_interval = 1000 # 记忆整合间隔 单位秒 间隔越低,麦麦整合越频繁,记忆更精简 +consolidate_memory_interval = 2000 # 记忆整合间隔 单位秒 间隔越低,麦麦整合越频繁,记忆更精简 consolidation_similarity_threshold = 0.7 # 相似度阈值 consolidation_check_percentage = 0.01 # 检查节点比例