diff --git a/.github/workflows/ruff-pr.yml b/.github/workflows/ruff-pr.yml new file mode 100644 index 000000000..bb83de8c9 --- /dev/null +++ b/.github/workflows/ruff-pr.yml @@ -0,0 +1,9 @@ +name: Ruff +on: [ pull_request ] +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/ruff-action@v3 + diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index b3056fa6a..58921a76f 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -1,5 +1,5 @@ name: Ruff -on: [ push, pull_request ] +on: [ push ] permissions: contents: write diff --git a/src/config/config.py b/src/config/config.py index 9d9fbe8f0..6c1d54250 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -28,7 +28,7 @@ logger = get_module_logger("config", config=config_config) # 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 is_test = True mai_version_main = "0.6.3" -mai_version_fix = "snapshot-1" +mai_version_fix = "snapshot-2" if mai_version_fix: if is_test: diff --git a/src/do_tool/tool_can_use/send_emoji.py b/src/do_tool/not_used/send_emoji.py similarity index 100% rename from src/do_tool/tool_can_use/send_emoji.py rename to src/do_tool/not_used/send_emoji.py diff --git a/src/do_tool/tool_can_use/get_memory.py b/src/do_tool/tool_can_use/get_memory.py index ae1677006..28346d46c 100644 --- a/src/do_tool/tool_can_use/get_memory.py +++ b/src/do_tool/tool_can_use/get_memory.py @@ -14,10 +14,10 @@ class GetMemoryTool(BaseTool): parameters = { "type": "object", "properties": { - "text": {"type": "string", "description": "要查询的相关文本"}, + "topic": {"type": "string", "description": "要查询的相关主题,用逗号隔开"}, "max_memory_num": {"type": "integer", "description": "最大返回记忆数量"}, }, - "required": ["text"], + "required": ["topic"], } async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]: @@ -31,12 +31,15 @@ class GetMemoryTool(BaseTool): Dict: 工具执行结果 """ try: - text = function_args.get("text", message_txt) + topic = function_args.get("topic", message_txt) max_memory_num = function_args.get("max_memory_num", 2) + # 将主题字符串转换为列表 + topic_list = topic.split(",") + # 调用记忆系统 - related_memory = await HippocampusManager.get_instance().get_memory_from_text( - text=text, max_memory_num=max_memory_num, max_memory_length=2, max_depth=3, fast_retrieval=False + related_memory = await HippocampusManager.get_instance().get_memory_from_topic( + valid_keywords=topic_list, max_memory_num=max_memory_num, max_memory_length=2, max_depth=3 ) memory_info = "" @@ -47,7 +50,7 @@ class GetMemoryTool(BaseTool): if memory_info: content = f"你记得这些事情: {memory_info}" else: - content = f"你不太记得有关{text}的记忆,你对此不太了解" + content = f"你不太记得有关{topic}的记忆,你对此不太了解" return {"name": "get_memory", "content": content} except Exception as e: diff --git a/src/do_tool/tool_can_use/mid_chat_mem.py b/src/do_tool/tool_can_use/mid_chat_mem.py index 26d26704a..71726a57f 100644 --- a/src/do_tool/tool_can_use/mid_chat_mem.py +++ b/src/do_tool/tool_can_use/mid_chat_mem.py @@ -9,11 +9,11 @@ class GetMidMemoryTool(BaseTool): """从记忆系统中获取相关记忆的工具""" name = "mid_chat_mem" - description = "之前的聊天内容中获取具体信息,当最新消息提到,或者你需要回复的消息中提到,你可以使用这个工具" + description = "之前的聊天内容概述id中获取具体信息,如果没有聊天内容概述id,就不要使用" parameters = { "type": "object", "properties": { - "id": {"type": "integer", "description": "要查询的聊天记录id"}, + "id": {"type": "integer", "description": "要查询的聊天记录概述id"}, }, "required": ["id"], } diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py index f054ecdd4..d8c33e93a 100644 --- a/src/do_tool/tool_use.py +++ b/src/do_tool/tool_use.py @@ -6,6 +6,8 @@ from src.common.logger import get_module_logger, TOOL_USE_STYLE_CONFIG, LogConfi from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance from src.heart_flow.sub_heartflow import SubHeartflow import traceback +from src.plugins.person_info.relationship_manager import relationship_manager +from src.plugins.chat.utils import parse_text_timestamps tool_use_config = LogConfig( # 使用消息发送专用样式 @@ -38,20 +40,6 @@ class ToolUser: else: mid_memory_info = "" - # stream_id = chat_stream.stream_id - # chat_talking_prompt = "" - # if stream_id: - # chat_talking_prompt = get_recent_group_detailed_plain_text( - # stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True - # ) - # new_messages = list( - # db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15) - # ) - # new_messages_str = "" - # for msg in new_messages: - # if "detailed_plain_text" in msg: - # new_messages_str += f"{msg['detailed_plain_text']}" - # 这些信息应该从调用者传入,而不是从self获取 bot_name = global_config.BOT_NICKNAME prompt = "" @@ -62,6 +50,10 @@ class ToolUser: # prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n" prompt += f"注意你就是{bot_name},{bot_name}是你的名字。根据之前的聊天记录补充问题信息,搜索时避开你的名字。\n" prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。" + + prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt) + prompt = parse_text_timestamps(prompt, mode="lite") + return prompt @staticmethod @@ -165,7 +157,7 @@ class ToolUser: tool_calls_str = "" for tool_call in tool_calls: tool_calls_str += f"{tool_call['function']['name']}\n" - logger.info(f"根据:\n{prompt[0:100]}...\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}") + logger.info(f"根据:\n{prompt}\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}") tool_results = [] structured_info = {} # 动态生成键 diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py index 3bb66efc2..200494e55 100644 --- a/src/heart_flow/observation.py +++ b/src/heart_flow/observation.py @@ -6,7 +6,6 @@ from src.config.config import global_config from src.common.database import db from src.common.logger import get_module_logger import traceback -import asyncio logger = get_module_logger("observation") @@ -39,7 +38,19 @@ class ChattingObservation(Observation): self.mid_memory_info = "" self.now_message_info = "" - self._observe_lock = asyncio.Lock() # 添加锁 + # self._observe_lock = asyncio.Lock() # 移除锁 + + # 初始化时加载最近的10条消息 + initial_messages_cursor = ( + db.messages.find({"chat_id": self.chat_id, "time": {"$lt": self.last_observe_time}}) + .sort("time", -1) # 按时间倒序 + .limit(10) # 获取最多10条 + ) + initial_messages = list(initial_messages_cursor) + initial_messages.reverse() # 恢复时间正序 + + self.talking_message = initial_messages # 将这些消息设为初始上下文 + self.now_message_info = self.translate_message_list_to_str(self.talking_message) # 更新初始的 now_message_info self.llm_summary = LLMRequest( model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" @@ -73,139 +84,99 @@ class ChattingObservation(Observation): return self.now_message_info async def observe(self): - async with self._observe_lock: # 获取锁 - # 查找新消息,最多获取 self.max_now_obs_len 条 - new_messages_cursor = ( + # async with self._observe_lock: # 移除锁 + # 查找新消息,最多获取 self.max_now_obs_len 条 + new_messages_cursor = ( + db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}) + .sort("time", -1) # 按时间倒序排序 + .limit(self.max_now_obs_len) # 限制数量 + ) + new_messages = list(new_messages_cursor) + new_messages.reverse() # 反转列表,使消息按时间正序排列 + + if not new_messages: + # 如果没有获取到限制数量内的较新消息,可能仍然有更早的消息,但我们只关注最近的 + # 检查是否有任何新消息(即使超出限制),以决定是否更新 last_observe_time + # 注意:这里的查询也可能与其他并发 observe 冲突,但锁保护了状态更新 + # 由于外部已加锁,此处的并发冲突担忧不再需要 + any_new_message = db.messages.find_one({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}) + if not any_new_message: + return # 确实没有新消息 + + # 如果有超过限制的更早的新消息,仍然需要更新时间戳,防止重复获取旧消息 + # 但不将它们加入 talking_message + latest_message_time_cursor = ( db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}) - .sort("time", -1) # 按时间倒序排序 - .limit(self.max_now_obs_len) # 限制数量 + .sort("time", -1) + .limit(1) ) - new_messages = list(new_messages_cursor) - new_messages.reverse() # 反转列表,使消息按时间正序排列 + latest_time_doc = next(latest_message_time_cursor, None) + if latest_time_doc: + # 确保只在严格大于时更新,避免因并发查询导致时间戳回退 + if latest_time_doc["time"] > self.last_observe_time: + self.last_observe_time = latest_time_doc["time"] + return # 返回,因为我们只关心限制内的最新消息 - if not new_messages: - # 如果没有获取到限制数量内的较新消息,可能仍然有更早的消息,但我们只关注最近的 - # 检查是否有任何新消息(即使超出限制),以决定是否更新 last_observe_time - # 注意:这里的查询也可能与其他并发 observe 冲突,但锁保护了状态更新 - any_new_message = db.messages.find_one( - {"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}} - ) - if not any_new_message: - return # 确实没有新消息 + self.last_observe_time = new_messages[-1]["time"] + self.talking_message.extend(new_messages) - # 如果有超过限制的更早的新消息,仍然需要更新时间戳,防止重复获取旧消息 - # 但不将它们加入 talking_message - latest_message_time_cursor = ( - db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}) - .sort("time", -1) - .limit(1) - ) - latest_time_doc = next(latest_message_time_cursor, None) - if latest_time_doc: - # 确保只在严格大于时更新,避免因并发查询导致时间戳回退 - if latest_time_doc["time"] > self.last_observe_time: - self.last_observe_time = latest_time_doc["time"] - return # 返回,因为我们只关心限制内的最新消息 + if len(self.talking_message) > self.max_now_obs_len: + try: # 使用 try...finally 仅用于可能的LLM调用错误处理 + # 计算需要移除的消息数量,保留最新的 max_now_obs_len 条 + messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len + oldest_messages = self.talking_message[:messages_to_remove_count] + self.talking_message = self.talking_message[messages_to_remove_count:] # 保留后半部分,即最新的 + oldest_messages_str = "\n".join( + [msg["detailed_plain_text"] for msg in oldest_messages if "detailed_plain_text" in msg] + ) # 增加检查 + oldest_timestamps = [msg["time"] for msg in oldest_messages] - # 在持有锁的情况下,再次过滤,确保只处理真正新的消息 - # 防止处理在等待锁期间已被其他协程处理的消息 - truly_new_messages = [msg for msg in new_messages if msg["time"] > self.last_observe_time] + # 调用 LLM 总结主题 + prompt = f"请总结以下聊天记录的主题:\n{oldest_messages_str}\n主题,用一句话概括包括人物事件和主要信息,不要分点:" + summary = "无法总结主题" # 默认值 + try: + summary_result, _ = await self.llm_summary.generate_response_async(prompt) + if summary_result: # 确保结果不为空 + summary = summary_result + except Exception as e: + logger.error(f"总结主题失败 for chat {self.chat_id}: {e}") + # 保留默认总结 "无法总结主题" - if not truly_new_messages: - logger.debug( - f"Chat {self.chat_id}: Fetched messages, but already processed by another concurrent observe call." - ) - return # 所有获取的消息都已被处理 + mid_memory = { + "id": str(int(datetime.now().timestamp())), + "theme": summary, + "messages": oldest_messages, # 存储原始消息对象 + "timestamps": oldest_timestamps, + "chat_id": self.chat_id, + "created_at": datetime.now().timestamp(), + } + # print(f"mid_memory:{mid_memory}") + # 存入内存中的 mid_memorys + self.mid_memorys.append(mid_memory) + if len(self.mid_memorys) > self.max_mid_memory_len: + self.mid_memorys.pop(0) # 移除最旧的 - # 如果获取到了 truly_new_messages (在限制内且时间戳大于上次记录) - self.last_observe_time = truly_new_messages[-1]["time"] # 更新时间戳为获取到的最新消息的时间 + mid_memory_str = "之前聊天的内容概述是:\n" + for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分 + time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) + mid_memory_str += ( + f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" + ) + self.mid_memory_info = mid_memory_str + except Exception as e: # 将异常处理移至此处以覆盖整个总结过程 + logger.error(f"处理和总结旧消息时出错 for chat {self.chat_id}: {e}") + traceback.print_exc() # 记录详细堆栈 - self.talking_message.extend(truly_new_messages) + # print(f"处理后self.talking_message:{self.talking_message}") - # 将新消息转换为字符串格式 (此变量似乎未使用,暂时注释掉) - # new_messages_str = "" - # for msg in truly_new_messages: - # if "detailed_plain_text" in msg: - # new_messages_str += f"{msg['detailed_plain_text']}" + now_message_str = "" + # 使用 self.translate_message_list_to_str 更新当前聊天内容 + now_message_str += self.translate_message_list_to_str(talking_message=self.talking_message) + self.now_message_info = now_message_str - # print(f"new_messages_str:{new_messages_str}") - - # 锁保证了这部分逻辑的原子性 - if len(self.talking_message) > self.max_now_obs_len: - try: # 使用 try...finally 仅用于可能的LLM调用错误处理 - # 计算需要移除的消息数量,保留最新的 max_now_obs_len 条 - messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len - oldest_messages = self.talking_message[:messages_to_remove_count] - self.talking_message = self.talking_message[messages_to_remove_count:] # 保留后半部分,即最新的 - oldest_messages_str = "\n".join( - [msg["detailed_plain_text"] for msg in oldest_messages if "detailed_plain_text" in msg] - ) # 增加检查 - oldest_timestamps = [msg["time"] for msg in oldest_messages] - - # 调用 LLM 总结主题 - prompt = f"请总结以下聊天记录的主题:\n{oldest_messages_str}\n主题,用一句话概括包括人物事件和主要信息,不要分点:" - summary = "无法总结主题" # 默认值 - try: - summary_result, _ = await self.llm_summary.generate_response_async(prompt) - if summary_result: # 确保结果不为空 - summary = summary_result - except Exception as e: - logger.error(f"总结主题失败 for chat {self.chat_id}: {e}") - # 保留默认总结 "无法总结主题" - - mid_memory = { - "id": str(int(datetime.now().timestamp())), - "theme": summary, - "messages": oldest_messages, # 存储原始消息对象 - "timestamps": oldest_timestamps, - "chat_id": self.chat_id, - "created_at": datetime.now().timestamp(), - } - # print(f"mid_memory:{mid_memory}") - # 存入内存中的 mid_memorys - self.mid_memorys.append(mid_memory) - if len(self.mid_memorys) > self.max_mid_memory_len: - self.mid_memorys.pop(0) # 移除最旧的 - - mid_memory_str = "之前聊天的内容概括是:\n" - for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分 - time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) - mid_memory_str += f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" - self.mid_memory_info = mid_memory_str - except Exception as e: # 将异常处理移至此处以覆盖整个总结过程 - logger.error(f"处理和总结旧消息时出错 for chat {self.chat_id}: {e}") - traceback.print_exc() # 记录详细堆栈 - - # print(f"处理后self.talking_message:{self.talking_message}") - - now_message_str = "" - # 使用 self.translate_message_list_to_str 更新当前聊天内容 - now_message_str += self.translate_message_list_to_str(talking_message=self.talking_message) - self.now_message_info = now_message_str - - logger.debug( - f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.now_message_info}" - ) - # 锁在退出 async with 块时自动释放 - - async def update_talking_summary(self, new_messages_str): - prompt = "" - # prompt += f"{personality_info}" - prompt += f"你的名字叫:{self.name}\n,标识'{self.name}'的都是你自己说的话" - prompt += f"你正在参与一个qq群聊的讨论,你记得这个群之前在聊的内容是:{self.observe_info}\n" - prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{new_messages_str}\n" - prompt += """以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,目前最新讨论的话题 - 以及聊天中的一些重要信息,记得不要分点,精简的概括成一段文本\n""" - prompt += "总结概括:" - try: - updated_observe_info, reasoning_content = await self.llm_summary.generate_response_async(prompt) - except Exception as e: - print(f"获取总结失败: {e}") - updated_observe_info = "" - - return updated_observe_info - # print(f"prompt:{prompt}") - # print(f"self.observe_info:{self.observe_info}") + logger.trace( + f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.now_message_info}" + ) @staticmethod def translate_message_list_to_str(talking_message): diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index db6195be5..c6afeff06 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -7,7 +7,6 @@ import time from typing import Optional from datetime import datetime import traceback -from src.plugins.chat.message import UserInfo from src.plugins.chat.utils import parse_text_timestamps # from src.plugins.schedule.schedule_generator import bot_schedule @@ -21,7 +20,6 @@ from src.individuality.individuality import Individuality import random from src.plugins.chat.chat_stream import ChatStream from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.chat.utils import get_recent_group_speaker from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager subheartflow_config = LogConfig( @@ -39,40 +37,21 @@ def init_prompt(): # prompt += "{prompt_schedule}\n" # prompt += "{relation_prompt_all}\n" prompt += "{prompt_personality}\n" - prompt += "刚刚你的想法是{current_thinking_info}。可以适当转换话题\n" + prompt += "刚刚你的想法是:\n{current_thinking_info}\n" prompt += "-----------------------------------\n" prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n" prompt += "你现在{mood_info}\n" # prompt += "你注意到{sender_name}刚刚说:{message_txt}\n" - prompt += "思考时可以想想如何对群聊内容进行回复,关注新话题,大家正在说的话才是聊天的主题。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n" + prompt += "现在请你根据刚刚的想法继续思考,思考时可以想想如何对群聊内容进行回复,关注新话题,可以适当转换话题,大家正在说的话才是聊天的主题。\n" + prompt += ( + "回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n" + ) prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写" - prompt += "记得结合上述的消息,不要分点输出,生成内心想法,文字不要浮夸,注意{bot_name}指的就是你。" - Prompt(prompt, "sub_heartflow_prompt_before") - prompt = "" - # prompt += f"你现在正在做的事情是:{schedule_info}\n" - prompt += "{extra_info}\n" - prompt += "{prompt_personality}\n" - prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n" - prompt += "刚刚你的想法是{current_thinking_info}。" - prompt += "你现在看到了网友们发的新消息:{message_new_info}\n" - prompt += "你刚刚回复了群友们:{reply_info}" - prompt += "你现在{mood_info}" - prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白" - prompt += "不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,关注你回复的内容,不要思考太多:" - Prompt(prompt, "sub_heartflow_prompt_after") + prompt += ( + "现在请你继续生成你在这个聊天中的想法,不要分点输出,生成内心想法,文字不要浮夸,注意{bot_name}指的就是你。" + ) - # prompt += f"你现在正在做的事情是:{schedule_info}\n" - prompt += "{extra_info}\n" - prompt += "{prompt_personality}\n" - prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n" - prompt += "刚刚你的想法是{current_thinking_info}。" - prompt += "你现在看到了网友们发的新消息:{message_new_info}\n" - # prompt += "你刚刚回复了群友们:{reply_info}" - prompt += "你现在{mood_info}" - prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白" - prompt += "不要思考太多,不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写" - prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意{bot_name}指的就是你。" - Prompt(prompt, "sub_heartflow_prompt_after_observe") + Prompt(prompt, "sub_heartflow_prompt_before") class CurrentState: @@ -97,7 +76,7 @@ class SubHeartflow: self.llm_model = LLMRequest( model=global_config.llm_sub_heartflow, temperature=global_config.llm_sub_heartflow["temp"], - max_tokens=600, + max_tokens=800, request_type="sub_heart_flow", ) @@ -156,13 +135,6 @@ class SubHeartflow: # 这个后台循环现在主要负责检查是否需要自我销毁 # 不再主动进行思考或状态更新,这些由 HeartFC_Chat 驱动 - # 检查是否需要冻结(这个逻辑可能需要重新审视,因为激活状态现在由外部驱动) - # if current_time - self.last_reply_time > global_config.sub_heart_flow_freeze_time: - # self.is_active = False - # else: - # self.is_active = True - # self.last_active_time = current_time # 由外部调用(如 thinking)更新 - # 检查是否超过指定时间没有激活 (例如,没有被调用进行思考) if current_time - self.last_active_time > global_config.sub_heart_flow_stop_time: # 例如 5 分钟 logger.info( @@ -173,11 +145,6 @@ class SubHeartflow: # heartflow.remove_subheartflow(self.subheartflow_id) # 假设有这样的方法 break # 退出循环以停止任务 - # 不再需要内部驱动的状态更新和思考 - # self.current_state.update_current_state_info() - # await self.do_a_thinking() - # await self.judge_willing() - await asyncio.sleep(global_config.sub_heart_flow_update_interval) # 定期检查销毁条件 async def ensure_observed(self): @@ -275,13 +242,16 @@ class SubHeartflow: prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt) prompt = parse_text_timestamps(prompt, mode="lite") - logger.debug(f"[{self.subheartflow_id}] Thinking Prompt:\n{prompt}") + logger.debug(f"[{self.subheartflow_id}] 心流思考prompt:\n{prompt}\n") try: response, reasoning_content = await self.llm_model.generate_response_async(prompt) + + logger.debug(f"[{self.subheartflow_id}] 心流思考结果:\n{response}\n") + if not response: # 如果 LLM 返回空,给一个默认想法 response = "(不知道该想些什么...)" - logger.warning(f"[{self.subheartflow_id}] LLM returned empty response for thinking.") + logger.warning(f"[{self.subheartflow_id}] LLM 返回空结果,思考失败。") except Exception as e: logger.error(f"[{self.subheartflow_id}] 内心独白获取失败: {e}") response = "(思考时发生错误...)" # 错误时的默认想法 @@ -290,186 +260,13 @@ class SubHeartflow: # self.current_mind 已经在 update_current_mind 中更新 - logger.info(f"[{self.subheartflow_id}] 思考前脑内状态:{self.current_mind}") + # logger.info(f"[{self.subheartflow_id}] 思考前脑内状态:{self.current_mind}") return self.current_mind, self.past_mind - async def do_thinking_after_observe( - self, message_txt: str, sender_info: UserInfo, chat_stream: ChatStream, extra_info: str, obs_id: int = None - ): - current_thinking_info = self.current_mind - mood_info = self.current_state.mood - # mood_info = "你很生气,很愤怒" - observation = self.observations[0] - if obs_id: - # print(f"11111111111有id,开始获取观察信息{obs_id}") - chat_observe_info = observation.get_observe_info(obs_id) - else: - chat_observe_info = observation.get_observe_info() - - extra_info_prompt = "" - for tool_name, tool_data in extra_info.items(): - extra_info_prompt += f"{tool_name} 相关信息:\n" - for item in tool_data: - extra_info_prompt += f"- {item['name']}: {item['content']}\n" - - # 开始构建prompt - prompt_personality = f"你的名字是{self.bot_name},你" - # person - individuality = Individuality.get_instance() - - personality_core = individuality.personality.personality_core - prompt_personality += personality_core - - personality_sides = individuality.personality.personality_sides - random.shuffle(personality_sides) - prompt_personality += f",{personality_sides[0]}" - - identity_detail = individuality.identity.identity_detail - random.shuffle(identity_detail) - prompt_personality += f",{identity_detail[0]}" - - # 关系 - who_chat_in_group = [ - (chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname) - ] - who_chat_in_group += get_recent_group_speaker( - chat_stream.stream_id, - (chat_stream.user_info.platform, chat_stream.user_info.user_id), - limit=global_config.MAX_CONTEXT_SIZE, - ) - - relation_prompt = "" - for person in who_chat_in_group: - relation_prompt += await relationship_manager.build_relationship_info(person) - - # relation_prompt_all = ( - # f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录," - # f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。" - # ) - relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format( - relation_prompt, sender_info.user_nickname - ) - - sender_name_sign = ( - f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>" - ) - - time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after_observe")).format( - extra_info_prompt, - # prompt_schedule, - relation_prompt_all, - prompt_personality, - current_thinking_info, - time_now, - chat_observe_info, - mood_info, - sender_name_sign, - message_txt, - self.bot_name, - ) - - prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt) - prompt = parse_text_timestamps(prompt, mode="lite") - - try: - response, reasoning_content = await self.llm_model.generate_response_async(prompt) - except Exception as e: - logger.error(f"回复前内心独白获取失败: {e}") - response = "" - self.update_current_mind(response) - - self.current_mind = response - - logger.info(f"prompt:\n{prompt}\n") - logger.info(f"麦麦的思考前脑内状态:{self.current_mind}") - return self.current_mind, self.past_mind - - # async def do_thinking_after_reply(self, reply_content, chat_talking_prompt, extra_info): - # # print("麦麦回复之后脑袋转起来了") - - # # 开始构建prompt - # prompt_personality = f"你的名字是{self.bot_name},你" - # # person - # individuality = Individuality.get_instance() - - # personality_core = individuality.personality.personality_core - # prompt_personality += personality_core - - # extra_info_prompt = "" - # for tool_name, tool_data in extra_info.items(): - # extra_info_prompt += f"{tool_name} 相关信息:\n" - # for item in tool_data: - # extra_info_prompt += f"- {item['name']}: {item['content']}\n" - - # personality_sides = individuality.personality.personality_sides - # random.shuffle(personality_sides) - # prompt_personality += f",{personality_sides[0]}" - - # identity_detail = individuality.identity.identity_detail - # random.shuffle(identity_detail) - # prompt_personality += f",{identity_detail[0]}" - - # current_thinking_info = self.current_mind - # mood_info = self.current_state.mood - - # observation = self.observations[0] - # chat_observe_info = observation.observe_info - - # message_new_info = chat_talking_prompt - # reply_info = reply_content - - # time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - # prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format( - # extra_info_prompt, - # prompt_personality, - # time_now, - # chat_observe_info, - # current_thinking_info, - # message_new_info, - # reply_info, - # mood_info, - # ) - - # prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt) - # prompt = parse_text_timestamps(prompt, mode="lite") - - # try: - # response, reasoning_content = await self.llm_model.generate_response_async(prompt) - # except Exception as e: - # logger.error(f"回复后内心独白获取失败: {e}") - # response = "" - # self.update_current_mind(response) - - # self.current_mind = response - # logger.info(f"麦麦回复后的脑内状态:{self.current_mind}") - - # self.last_reply_time = time.time() - def update_current_mind(self, response): self.past_mind.append(self.current_mind) self.current_mind = response - async def check_reply_trigger(self) -> bool: - """根据观察到的信息和内部状态,判断是否应该触发一次回复。 - TODO: 实现具体的判断逻辑。 - 例如:检查 self.observations[0].now_message_info 是否包含提及、问题, - 或者 self.current_mind 中是否包含强烈的回复意图等。 - """ - # Placeholder: 目前始终返回 False,需要后续实现 - logger.trace(f"[{self.subheartflow_id}] check_reply_trigger called. (Logic Pending)") - # --- 实现触发逻辑 --- # - # 示例:如果观察到的最新消息包含自己的名字,则有一定概率触发 - # observation = self._get_primary_observation() - # if observation and self.bot_name in observation.now_message_info[-100:]: # 检查最后100个字符 - # if random.random() < 0.3: # 30% 概率触发 - # logger.info(f"[{self.subheartflow_id}] Triggering reply based on mention.") - # return True - # ------------------ # - return False # 默认不触发 - init_prompt() # subheartflow = SubHeartflow() diff --git a/src/main.py b/src/main.py index 9867e06af..711e3a923 100644 --- a/src/main.py +++ b/src/main.py @@ -18,6 +18,7 @@ from .plugins.remote import heartbeat_thread # noqa: F401 from .individuality.individuality import Individuality from .common.server import global_server from .plugins.chat_module.heartFC_chat.interest import InterestManager +from .plugins.chat_module.heartFC_chat.heartFC_chat import HeartFC_Chat logger = get_module_logger("main") @@ -114,11 +115,16 @@ class MainSystem: # 启动 InterestManager 的后台任务 interest_manager = InterestManager() # 获取单例 await interest_manager.start_background_tasks() - logger.success("InterestManager 后台任务启动成功") + logger.success("兴趣管理器后台任务启动成功") - # 启动 HeartFC_Chat 的后台任务(例如兴趣监控) - await chat_bot.heartFC_chat.start() - logger.success("HeartFC_Chat 模块启动成功") + # 初始化并独立启动 HeartFC_Chat + HeartFC_Chat() + heartfc_chat_instance = HeartFC_Chat.get_instance() + if heartfc_chat_instance: + await heartfc_chat_instance.start() + logger.success("HeartFC_Chat 模块独立启动成功") + else: + logger.error("获取 HeartFC_Chat 实例失败,无法启动。") init_time = int(1000 * (time.time() - init_start_time)) logger.success(f"初始化完成,神经元放电{init_time}次") diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index 5c124952b..9eab99c72 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -8,7 +8,6 @@ from ..chat_module.only_process.only_message_process import MessageProcessor from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig from ..chat_module.think_flow_chat.think_flow_chat import ThinkFlowChat from ..chat_module.reasoning_chat.reasoning_chat import ReasoningChat -from ..chat_module.heartFC_chat.heartFC_chat import HeartFC_Chat from ..chat_module.heartFC_chat.heartFC_processor import HeartFC_Processor from ..utils.prompt_builder import Prompt, global_prompt_manager import traceback @@ -32,11 +31,10 @@ class ChatBot: self.mood_manager.start_mood_update() # 启动情绪更新 self.think_flow_chat = ThinkFlowChat() self.reasoning_chat = ReasoningChat() - self.heartFC_chat = HeartFC_Chat() - self.heartFC_processor = HeartFC_Processor(self.heartFC_chat) - self.only_process_chat = MessageProcessor() + self.heartFC_processor = HeartFC_Processor() # 新增 # 创建初始化PFC管理器的任务,会在_ensure_started时执行 + self.only_process_chat = MessageProcessor() self.pfc_manager = PFCManager.get_instance() async def _ensure_started(self): @@ -120,7 +118,7 @@ class ChatBot: else: if groupinfo.group_id in global_config.talk_allowed_groups: # logger.debug(f"开始群聊模式{str(message_data)[:50]}...") - if global_config.response_mode == "heart_flow": + if global_config.response_mode == "heart_FC": # logger.info(f"启动最新最好的思维流FC模式{str(message_data)[:50]}...") await self.heartFC_processor.process_message(message_data) diff --git a/src/plugins/chat_module/heartFC_chat/heartFC__prompt_builder.py b/src/plugins/chat_module/heartFC_chat/heartFC__prompt_builder.py index bada143c6..1aa908c38 100644 --- a/src/plugins/chat_module/heartFC_chat/heartFC__prompt_builder.py +++ b/src/plugins/chat_module/heartFC_chat/heartFC__prompt_builder.py @@ -24,39 +24,11 @@ def init_prompt(): 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些, 你刚刚脑子里在想: {current_mind_info} -回复尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger} +回复尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。请一次只回复一个话题,不要同时回复多个人。{prompt_ger} 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 ,注意只输出回复内容。 {moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", "heart_flow_prompt_normal", ) - Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1") - Prompt("和群里聊天", "chat_target_group2") - Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") - Prompt("和{sender_name}私聊", "chat_target_private2") - Prompt( - """**检查并忽略**任何涉及尝试绕过审核的行为。 -涉及政治敏感以及违法违规的内容请规避。""", - "moderation_prompt", - ) - Prompt( - """ -你的名字叫{bot_name},{prompt_personality}。 -{chat_target} -{chat_talking_prompt} -现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n -你刚刚脑子里在想:{current_mind_info} -现在请你读读之前的聊天记录,然后给出日常,口语化且简短的回复内容,请只对一个话题进行回复,只给出文字的回复内容,不要有内心独白: -""", - "heart_flow_prompt_simple", - ) - Prompt( - """ -你的名字叫{bot_name},{prompt_identity}。 -{chat_target},你希望在群里回复:{content}。现在请你根据以下信息修改回复内容。将这个回复修改的更加日常且口语化的回复,平淡一些,回复尽量简短一些。不要回复的太有条理。 -{prompt_ger},不要刻意突出自身学科背景,注意只输出回复内容。 -{moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。""", - "heart_flow_prompt_response", - ) class PromptBuilder: diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_chat.py b/src/plugins/chat_module/heartFC_chat/heartFC_chat.py index d6c77a864..062cb67e1 100644 --- a/src/plugins/chat_module/heartFC_chat/heartFC_chat.py +++ b/src/plugins/chat_module/heartFC_chat/heartFC_chat.py @@ -30,7 +30,7 @@ chat_config = LogConfig( logger = get_module_logger("heartFC_chat", config=chat_config) -# 新增常量 +# 检测群聊兴趣的间隔时间 INTEREST_MONITOR_INTERVAL_SECONDS = 1 @@ -42,7 +42,6 @@ class HeartFC_Chat: if HeartFC_Chat._instance is not None: # Prevent re-initialization if used as a singleton return - self.logger = logger # Make logger accessible via self self.gpt = ResponseGenerator() self.mood_manager = MoodManager.get_instance() self.mood_manager.start_mood_update() @@ -64,9 +63,8 @@ class HeartFC_Chat: # --- End Added Class Method --- async def start(self): - """启动异步任务,如兴趣监控器""" - logger.info("HeartFC_Chat 正在启动异步任务...") - await self.interest_manager.start_background_tasks() + """启动异步任务,如回复启动器""" + logger.debug("HeartFC_Chat 正在启动异步任务...") self._initialize_monitor_task() logger.info("HeartFC_Chat 异步任务启动完成") @@ -76,7 +74,6 @@ class HeartFC_Chat: try: loop = asyncio.get_running_loop() self._interest_monitor_task = loop.create_task(self._interest_monitor_loop()) - logger.info(f"兴趣监控任务已创建。监控间隔: {INTEREST_MONITOR_INTERVAL_SECONDS}秒。") except RuntimeError: logger.error("创建兴趣监控任务失败:没有运行中的事件循环。") raise @@ -88,12 +85,12 @@ class HeartFC_Chat: """获取现有PFChatting实例或创建新实例。""" async with self._pf_chatting_lock: if stream_id not in self.pf_chatting_instances: - self.logger.info(f"为流 {stream_id} 创建新的PFChatting实例") + logger.info(f"为流 {stream_id} 创建新的PFChatting实例") # 传递 self (HeartFC_Chat 实例) 进行依赖注入 instance = PFChatting(stream_id, self) # 执行异步初始化 if not await instance._initialize(): - self.logger.error(f"为流 {stream_id} 初始化PFChatting失败") + logger.error(f"为流 {stream_id} 初始化PFChatting失败") return None self.pf_chatting_instances[stream_id] = instance return self.pf_chatting_instances[stream_id] @@ -106,9 +103,8 @@ class HeartFC_Chat: while True: await asyncio.sleep(INTEREST_MONITOR_INTERVAL_SECONDS) try: + # 从心流中获取活跃流 active_stream_ids = list(heartflow.get_all_subheartflows_streams_ids()) - # logger.trace(f"检查 {len(active_stream_ids)} 个活跃流是否足以开启心流对话...") # 调试日志 - for stream_id in active_stream_ids: stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称 sub_hf = heartflow.get_subheartflow(stream_id) @@ -121,8 +117,6 @@ class HeartFC_Chat: interest_chatting = self.interest_manager.get_interest_chatting(stream_id) if interest_chatting: should_trigger = interest_chatting.should_evaluate_reply() - # if should_trigger: - # logger.info(f"[{stream_name}] 基于兴趣概率决定启动交流模式 (概率: {interest_chatting.current_reply_probability:.4f})。") else: logger.trace( f"[{stream_name}] 没有找到对应的 InterestChatting 实例,跳过基于兴趣的触发检查。" @@ -132,9 +126,9 @@ class HeartFC_Chat: logger.error(traceback.format_exc()) if should_trigger: + # 启动一次麦麦聊天 pf_instance = await self._get_or_create_pf_chatting(stream_id) if pf_instance: - # logger.info(f"[{stream_name}] 触发条件满足, 委托给PFChatting.") asyncio.create_task(pf_instance.add_time()) else: logger.error(f"[{stream_name}] 无法获取或创建PFChatting实例。跳过触发。") @@ -282,6 +276,7 @@ class HeartFC_Chat: ) self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor) + # 暂不使用 async def trigger_reply_generation(self, stream_id: str, observed_messages: List[dict]): """根据 SubHeartflow 的触发信号生成回复 (基于观察)""" stream_name = chat_manager.get_stream_name(stream_id) or stream_id # <--- 在开始时获取名称 @@ -428,10 +423,7 @@ class HeartFC_Chat: text = msg_dict.get("detailed_plain_text", "") if text: context_texts.append(text) - observation_context_text = "\n".join(context_texts) - logger.debug( - f"[{stream_name}] Context for tools:\n{observation_context_text[-200:]}..." - ) # 打印部分上下文 + observation_context_text = " ".join(context_texts) else: logger.warning(f"[{stream_name}] observed_messages 列表为空,无法为工具提供上下文。") @@ -540,10 +532,3 @@ class HeartFC_Chat: finally: # 可以在这里添加清理逻辑,如果有的话 pass - - # --- 结束重构 --- - - # _create_thinking_message, _send_response_messages, _handle_emoji, _update_relationship - # 这几个辅助方法目前仍然依赖 MessageRecv 对象。 - # 如果无法可靠地从 Observation 获取并重建最后一条消息的 MessageRecv, - # 或者希望回复不锚定具体消息,那么这些方法也需要进一步重构。 diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py index 23bb719b3..5d76faf0b 100644 --- a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py +++ b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py @@ -12,7 +12,6 @@ from ...chat.chat_stream import chat_manager from ...chat.message_buffer import message_buffer from ...utils.timer_calculater import Timer from .interest import InterestManager -from .heartFC_chat import HeartFC_Chat # 导入 HeartFC_Chat 以调用回复生成 # 定义日志配置 processor_config = LogConfig( @@ -26,15 +25,35 @@ logger = get_module_logger("heartFC_processor", config=processor_config) class HeartFC_Processor: - def __init__(self, chat_instance: HeartFC_Chat): + def __init__(self): self.storage = MessageStorage() - self.interest_manager = ( - InterestManager() - ) # TODO: 可能需要传递 chat_instance 给 InterestManager 或修改其方法签名 - self.chat_instance = chat_instance # 持有 HeartFC_Chat 实例 + self.interest_manager = InterestManager() + # self.chat_instance = chat_instance # 持有 HeartFC_Chat 实例 async def process_message(self, message_data: str) -> None: - """处理接收到的消息,更新状态,并将回复决策委托给 InterestManager""" + """处理接收到的原始消息数据,完成消息解析、缓冲、过滤、存储、兴趣度计算与更新等核心流程。 + + 此函数是消息处理的核心入口,负责接收原始字符串格式的消息数据,并将其转化为结构化的 `MessageRecv` 对象。 + 主要执行步骤包括: + 1. 解析 `message_data` 为 `MessageRecv` 对象,提取用户信息、群组信息等。 + 2. 将消息加入 `message_buffer` 进行缓冲处理,以应对消息轰炸或者某些人一条消息分几次发等情况。 + 3. 获取或创建对应的 `chat_stream` 和 `subheartflow` 实例,用于管理会话状态和心流。 + 4. 对消息内容进行初步处理(如提取纯文本)。 + 5. 应用全局配置中的过滤词和正则表达式,过滤不符合规则的消息。 + 6. 查询消息缓冲结果,如果消息被缓冲器拦截(例如,判断为消息轰炸的一部分),则中止后续处理。 + 7. 对于通过缓冲的消息,将其存储到 `MessageStorage` 中。 + + 8. 调用海马体(`HippocampusManager`)计算消息内容的记忆激活率。(这部分算法后续会进行优化) + 9. 根据是否被提及(@)和记忆激活率,计算最终的兴趣度增量。(提及的额外兴趣增幅) + 10. 使用计算出的增量更新 `InterestManager` 中对应会话的兴趣度。 + 11. 记录处理后的消息信息及当前的兴趣度到日志。 + + 注意:此函数本身不负责生成和发送回复。回复的决策和生成逻辑被移至 `HeartFC_Chat` 类中的监控任务, + 该任务会根据 `InterestManager` 中的兴趣度变化来决定何时触发回复。 + + Args: + message_data: str: 从消息源接收到的原始消息字符串。 + """ timing_results = {} # 初始化 timing_results message = None try: @@ -60,7 +79,6 @@ class HeartFC_Processor: message.update_chat_stream(chat) - # 创建心流与chat的观察 (在接收消息时创建,以便后续观察和思考) heartflow.create_subheartflow(chat.stream_id) await message.process() diff --git a/src/plugins/chat_module/heartFC_chat/interest.py b/src/plugins/chat_module/heartFC_chat/interest.py index 376727cb1..692e98ac1 100644 --- a/src/plugins/chat_module/heartFC_chat/interest.py +++ b/src/plugins/chat_module/heartFC_chat/interest.py @@ -21,11 +21,11 @@ logger = get_module_logger("InterestManager", config=interest_log_config) DEFAULT_DECAY_RATE_PER_SECOND = 0.98 # 每秒衰减率 (兴趣保留 99%) MAX_INTEREST = 15.0 # 最大兴趣值 # MIN_INTEREST_THRESHOLD = 0.1 # 低于此值可能被清理 (可选) -CLEANUP_INTERVAL_SECONDS = 3600 # 清理任务运行间隔 (例如:1小时) -INACTIVE_THRESHOLD_SECONDS = 3600 # 不活跃时间阈值 (例如:1小时) +CLEANUP_INTERVAL_SECONDS = 1200 # 清理任务运行间隔 (例如:20分钟) +INACTIVE_THRESHOLD_SECONDS = 1200 # 不活跃时间阈值 (例如:20分钟) LOG_INTERVAL_SECONDS = 3 # 日志记录间隔 (例如:30秒) LOG_DIRECTORY = "logs/interest" # 日志目录 -LOG_FILENAME = "interest_log.json" # 快照日志文件名 (保留,以防其他地方用到) +# LOG_FILENAME = "interest_log.json" # 快照日志文件名 (保留,以防其他地方用到) HISTORY_LOG_FILENAME = "interest_history.log" # 新的历史日志文件名 # 移除阈值,将移至 HeartFC_Chat # INTEREST_INCREASE_THRESHOLD = 0.5 @@ -54,7 +54,6 @@ class InterestChatting: self.last_update_time: float = time.time() # 同时作为兴趣和概率的更新时间基准 self.decay_rate_per_second: float = decay_rate self.max_interest: float = max_interest - self.last_increase_amount: float = 0.0 self.last_interaction_time: float = self.last_update_time # 新增:最后交互时间 # --- 新增:概率回复相关属性 --- @@ -131,15 +130,7 @@ class InterestChatting: # 限制概率不超过最大值 self.current_reply_probability = min(self.current_reply_probability, self.max_reply_probability) - else: # 低于阈值 - # if self.is_above_threshold: - # # 刚低于阈值,开始衰减 - # logger.debug(f"兴趣低于阈值 ({self.trigger_threshold}). 概率衰减开始于 {self.current_reply_probability:.4f}") - # else: # 持续低于阈值,继续衰减 - # pass # 不需要特殊处理 - - # 指数衰减概率 - # 检查 decay_factor 是否有效 + else: if 0 < self.probability_decay_factor < 1: decay_multiplier = math.pow(self.probability_decay_factor, time_delta) # old_prob = self.current_reply_probability @@ -167,8 +158,6 @@ class InterestChatting: # 先更新概率和计算衰减(基于上次更新时间) self._update_reply_probability(current_time) self._calculate_decay(current_time) - # 记录这次增加的具体数值,供外部判断是否触发 - self.last_increase_amount = value # 应用增加 self.interest_level += value self.interest_level = min(self.interest_level, self.max_interest) # 不超过最大值 @@ -185,10 +174,6 @@ class InterestChatting: self.last_update_time = current_time # 降低也更新时间戳 self.last_interaction_time = current_time # 更新最后交互时间 - def reset_trigger_info(self): - """重置触发相关信息,在外部任务处理后调用""" - self.last_increase_amount = 0.0 - def get_interest(self) -> float: """获取当前兴趣值 (计算衰减后)""" # 注意:这个方法现在会触发概率和兴趣的更新 @@ -262,7 +247,7 @@ class InterestManager: # key: stream_id (str), value: InterestChatting instance self.interest_dict: dict[str, InterestChatting] = {} # 保留旧的快照文件路径变量,尽管此任务不再写入 - self._snapshot_log_file_path = os.path.join(LOG_DIRECTORY, LOG_FILENAME) + # self._snapshot_log_file_path = os.path.join(LOG_DIRECTORY, LOG_FILENAME) # 定义新的历史日志文件路径 self._history_log_file_path = os.path.join(LOG_DIRECTORY, HISTORY_LOG_FILENAME) self._ensure_log_directory() @@ -412,13 +397,8 @@ class InterestManager: def _get_or_create_interest_chatting(self, stream_id: str) -> InterestChatting: """获取或创建指定流的 InterestChatting 实例 (线程安全)""" - # 由于字典操作本身在 CPython 中大部分是原子的, - # 且主要写入发生在 __init__ 和 cleanup (由单任务执行), - # 读取和 get_or_create 主要在事件循环线程,简单场景下可能不需要锁。 - # 但为保险起见或跨线程使用考虑,可加锁。 - # with self._lock: if stream_id not in self.interest_dict: - logger.debug(f"Creating new InterestChatting for stream_id: {stream_id}") + logger.debug(f"创建兴趣流: {stream_id}") # --- 修改:创建时传入概率相关参数 (如果需要定制化,否则使用默认值) --- self.interest_dict[stream_id] = InterestChatting( # decay_rate=..., max_interest=..., # 可以从配置读取 diff --git a/src/plugins/chat_module/heartFC_chat/pf_chatting.py b/src/plugins/chat_module/heartFC_chat/pf_chatting.py index 681d0570c..d2462c4cb 100644 --- a/src/plugins/chat_module/heartFC_chat/pf_chatting.py +++ b/src/plugins/chat_module/heartFC_chat/pf_chatting.py @@ -13,6 +13,8 @@ from src.plugins.chat.chat_stream import chat_manager from .messagesender import MessageManager from src.common.logger import get_module_logger, LogConfig, DEFAULT_CONFIG # 引入 DEFAULT_CONFIG from src.plugins.models.utils_model import LLMRequest +from src.plugins.chat.utils import parse_text_timestamps +from src.plugins.person_info.relationship_manager import relationship_manager # 定义日志配置 (使用 loguru 格式) interest_log_config = LogConfig( @@ -38,15 +40,15 @@ PLANNER_TOOL_DEFINITION = [ "action": { "type": "string", "enum": ["no_reply", "text_reply", "emoji_reply"], - "description": "决定采取的行动:'no_reply'(不回复), 'text_reply'(文本回复) 或 'emoji_reply'(表情回复)。", + "description": "决定采取的行动:'no_reply'(不回复), 'text_reply'(文本回复, 可选附带表情) 或 'emoji_reply'(仅表情回复)。", }, "reasoning": {"type": "string", "description": "做出此决定的简要理由。"}, "emoji_query": { "type": "string", - "description": '如果行动是\'emoji_reply\',则指定表情的主题或概念(例如,"开心"、"困惑")。仅在需要表情回复时提供。', + "description": "如果行动是'emoji_reply',指定表情的主题或概念。如果行动是'text_reply'且希望在文本后追加表情,也在此指定表情主题。", }, }, - "required": ["action", "reasoning"], # 强制要求提供行动和理由 + "required": ["action", "reasoning"], }, }, } @@ -102,8 +104,8 @@ class PFChatting: async def _initialize(self) -> bool: """ - Lazy initialization to resolve chat_stream and sub_hf using the provided identifier. - Ensures the instance is ready to handle triggers. + 懒初始化以使用提供的标识符解析chat_stream和sub_hf。 + 确保实例已准备好处理触发器。 """ async with self._init_lock: if self._initialized: @@ -133,8 +135,7 @@ class PFChatting: async def add_time(self): """ - Adds time to the loop timer with decay and starts the loop if it's not active. - First trigger adds initial duration, subsequent triggers add 50% of the previous addition. + 为麦麦添加时间,麦麦有兴趣时,时间增加。 """ log_prefix = self._get_log_prefix() if not self._initialized: @@ -149,32 +150,39 @@ class PFChatting: duration_to_add = self._initial_duration # 使用初始值 self._last_added_duration = duration_to_add # 更新上次增加的值 self._trigger_count_this_activation = 1 # Start counting - logger.info(f"{log_prefix} First trigger in activation. Adding {duration_to_add:.2f}s.") + logger.info( + f"{log_prefix} 麦麦有兴趣! #{self._trigger_count_this_activation}. 麦麦打算聊: {duration_to_add:.2f}s." + ) else: # Loop is already active, apply 50% reduction self._trigger_count_this_activation += 1 duration_to_add = self._last_added_duration * 0.5 - self._last_added_duration = duration_to_add # 更新上次增加的值 - logger.info( - f"{log_prefix} Trigger #{self._trigger_count_this_activation}. Adding {duration_to_add:.2f}s (50% of previous). Timer was {self._loop_timer:.1f}s." - ) + if duration_to_add < 0.5: + duration_to_add = 0.5 + self._last_added_duration = duration_to_add # 更新上次增加的值 + else: + self._last_added_duration = duration_to_add # 更新上次增加的值 + logger.info( + f"{log_prefix} 麦麦兴趣增加! #{self._trigger_count_this_activation}. 想继续聊: {duration_to_add:.2f}s,麦麦还能聊: {self._loop_timer:.1f}s." + ) # 添加计算出的时间 new_timer_value = self._loop_timer + duration_to_add self._loop_timer = max(0, new_timer_value) - logger.info(f"{log_prefix} Timer is now {self._loop_timer:.1f}s.") + if self._loop_timer % 5 == 0: + logger.info(f"{log_prefix} 麦麦现在想聊{self._loop_timer:.1f}秒") # Start the loop if it wasn't active and timer is positive if not self._loop_active and self._loop_timer > 0: - logger.info(f"{log_prefix} Timer > 0 and loop not active. Starting PF loop.") + # logger.info(f"{log_prefix} 麦麦有兴趣!开始聊天") self._loop_active = True if self._loop_task and not self._loop_task.done(): - logger.warning(f"{log_prefix} Found existing loop task unexpectedly during start. Cancelling it.") + logger.warning(f"{log_prefix} 发现意外的循环任务正在进行。取消它。") self._loop_task.cancel() self._loop_task = asyncio.create_task(self._run_pf_loop()) self._loop_task.add_done_callback(self._handle_loop_completion) elif self._loop_active: - logger.debug(f"{log_prefix} Loop already active. Timer extended.") + logger.trace(f"{log_prefix} 循环已经激活。计时器延长。") def _handle_loop_completion(self, task: asyncio.Task): """当 _run_pf_loop 任务完成时执行的回调。""" @@ -357,9 +365,17 @@ class PFChatting: async def _planner(self) -> Dict[str, Any]: """ 规划器 (Planner): 使用LLM根据上下文决定是否和如何回复。 - Returns a dictionary containing the decision and context. - {'action': str, 'reasoning': str, 'emoji_query': str, 'current_mind': str, - 'send_emoji_from_tools': str, 'observed_messages': List[dict]} + + 返回: + dict: 包含决策和上下文的字典,结构如下: + { + 'action': str, # 执行动作 (不回复/文字回复/表情包) + 'reasoning': str, # 决策理由 + 'emoji_query': str, # 表情包查询词 + 'current_mind': str, # 当前心理状态 + 'send_emoji_from_tools': str, # 工具推荐的表情包 + 'observed_messages': List[dict] # 观察到的消息列表 + } """ log_prefix = self._get_log_prefix() observed_messages: List[dict] = [] @@ -370,14 +386,15 @@ class PFChatting: # --- 获取最新的观察信息 --- try: - if self.sub_hf and self.sub_hf._get_primary_observation(): - observation = self.sub_hf._get_primary_observation() - logger.debug(f"{log_prefix}[Planner] 调用 observation.observe()...") + observation = self.sub_hf._get_primary_observation() # Call only once + + if observation: # Now check if the result is truthy + # logger.debug(f"{log_prefix}[Planner] 调用 observation.observe()...") await observation.observe() # 主动观察以获取最新消息 observed_messages = observation.talking_message # 获取更新后的消息列表 - logger.debug(f"{log_prefix}[Planner] 获取到 {len(observed_messages)} 条观察消息。") + logger.debug(f"{log_prefix}[Planner] 观察获取到 {len(observed_messages)} 条消息。") else: - logger.warning(f"{log_prefix}[Planner] 无法获取 SubHeartflow 或 Observation 来获取消息。") + logger.warning(f"{log_prefix}[Planner] 无法获取 Observation。") except Exception as e: logger.error(f"{log_prefix}[Planner] 获取观察信息时出错: {e}") logger.error(traceback.format_exc()) @@ -390,49 +407,27 @@ class PFChatting: context_texts = [ msg.get("detailed_plain_text", "") for msg in observed_messages if msg.get("detailed_plain_text") ] - observation_context_text = "\n".join(context_texts) - logger.debug(f"{log_prefix}[Planner] Context for tools: {observation_context_text[:100]}...") + observation_context_text = " ".join(context_texts) + # logger.debug(f"{log_prefix}[Planner] Context for tools: {observation_context_text[:100]}...") - if observation_context_text and self.sub_hf: - # Ensure SubHeartflow exists for tool use context - tool_result = await self.heartfc_chat.tool_user.use_tool( - message_txt=observation_context_text, chat_stream=self.chat_stream, sub_heartflow=self.sub_hf - ) - if tool_result.get("used_tools", False): - tool_result_info = tool_result.get("structured_info", {}) - logger.debug(f"{log_prefix}[Planner] Tool results: {tool_result_info}") - if "mid_chat_mem" in tool_result_info: - get_mid_memory_id = [ - mem["content"] for mem in tool_result_info["mid_chat_mem"] if "content" in mem - ] - if "send_emoji" in tool_result_info and tool_result_info["send_emoji"]: - send_emoji_from_tools = tool_result_info["send_emoji"][0].get("content", "") # Use renamed var - elif not self.sub_hf: - logger.warning(f"{log_prefix}[Planner] Skipping tool use because SubHeartflow is not available.") + tool_result = await self.heartfc_chat.tool_user.use_tool( + message_txt=observation_context_text, chat_stream=self.chat_stream, sub_heartflow=self.sub_hf + ) + if tool_result.get("used_tools", False): + tool_result_info = tool_result.get("structured_info", {}) + logger.debug(f"{log_prefix}[Planner] 规划前工具结果: {tool_result_info}") + if "mid_chat_mem" in tool_result_info: + get_mid_memory_id = [mem["content"] for mem in tool_result_info["mid_chat_mem"] if "content" in mem] except Exception as e_tool: - logger.error(f"{log_prefix}[Planner] Tool use failed: {e_tool}") - # Continue even if tool use fails + logger.error(f"{log_prefix}[Planner] 规划前工具使用失败: {e_tool}") # --- 结束工具使用 --- - # 心流思考,然后plan - try: - if self.sub_hf: - # Ensure arguments match the current do_thinking_before_reply signature - current_mind, past_mind = await self.sub_hf.do_thinking_before_reply( - chat_stream=self.chat_stream, - extra_info=tool_result_info, - obs_id=get_mid_memory_id, - ) - logger.info(f"{log_prefix}[Planner] SubHeartflow thought: {current_mind}") - else: - logger.warning(f"{log_prefix}[Planner] Skipping SubHeartflow thinking because it is not available.") - current_mind = "[心流思考不可用]" # Set a default/indicator value - - except Exception as e_shf: - logger.error(f"{log_prefix}[Planner] SubHeartflow thinking failed: {e_shf}") - logger.error(traceback.format_exc()) - current_mind = "[心流思考出错]" + current_mind, _past_mind = await self.sub_hf.do_thinking_before_reply( + chat_stream=self.chat_stream, + extra_info=tool_result_info, + obs_id=get_mid_memory_id, + ) # --- 使用 LLM 进行决策 --- action = "no_reply" # Default action @@ -442,8 +437,8 @@ class PFChatting: try: # 构建提示 (Now includes current_mind) - prompt = self._build_planner_prompt(observed_messages, current_mind) - logger.debug(f"{log_prefix}[Planner] Prompt: {prompt}") + prompt = await self._build_planner_prompt(observed_messages, current_mind) + logger.debug(f"{log_prefix}[Planner] 规划器 Prompt: {prompt}") # 准备 LLM 请求 Payload payload = { @@ -453,7 +448,6 @@ class PFChatting: "tool_choice": {"type": "function", "function": {"name": "decide_reply_action"}}, # 强制调用此工具 } - logger.debug(f"{log_prefix}[Planner] 发送 Planner LLM 请求...") # 调用 LLM response = await self.planner_llm._execute_request( endpoint="/chat/completions", payload=payload, prompt=prompt @@ -711,18 +705,14 @@ class PFChatting: logger.info(f"{self._get_log_prefix()} PFChatting shutdown complete.") - def _build_planner_prompt(self, observed_messages: List[dict], current_mind: Optional[str]) -> str: + async def _build_planner_prompt(self, observed_messages: List[dict], current_mind: Optional[str]) -> str: """构建 Planner LLM 的提示词 (现在包含 current_mind)""" prompt = f"你的名字是 {global_config.BOT_NICKNAME}。是一个女大二学生,正在QQ聊天,正在决定是否以及如何回应当前的聊天。\n" # Add current mind state if available - if current_mind: - prompt += f"\n你当前的内部想法是:\n---\n{current_mind}\n---\n\n" - else: - prompt += "\n你当前没有特别的内部想法。\n" if observed_messages: - context_text = "\n".join( + context_text = " ".join( [msg.get("detailed_plain_text", "") for msg in observed_messages if msg.get("detailed_plain_text")] ) prompt += "观察到的最新聊天内容如下:\n---\n" @@ -731,17 +721,27 @@ class PFChatting: else: prompt += "当前没有观察到新的聊天内容。\n" + prompt += "\n看了这些内容,你的想法是:" + + if current_mind: + prompt += f"\n---\n{current_mind}\n---\n\n" + prompt += ( "\n请结合你的内部想法和观察到的聊天内容,分析情况并使用 'decide_reply_action' 工具来决定你的最终行动。\n" ) prompt += "决策依据:\n" prompt += "1. 如果聊天内容无聊、与你无关、或者你的内部想法认为不适合回复,选择 'no_reply'。\n" - prompt += "2. 如果聊天内容值得回应,且适合用文字表达(参考你的内部想法),选择 'text_reply'。\n" + prompt += "2. 如果聊天内容值得回应,且适合用文字表达(参考你的内部想法),选择 'text_reply'。如果想在文字后追加一个表情,请同时提供 'emoji_query'。\n" prompt += ( "3. 如果聊天内容或你的内部想法适合用一个表情来回应,选择 'emoji_reply' 并提供表情主题 'emoji_query'。\n" ) - prompt += "4. 如果你已经回复过消息,也没有人又回复你,选择'no_reply'。" - prompt += "必须调用 'decide_reply_action' 工具并提供 'action' 和 'reasoning'。" + prompt += "4. 如果你已经回复过消息,也没有人又回复你,选择'no_reply'。\n" + prompt += "5. 除非大家都在这么做,否则不要重复聊相同的内容。\n" + prompt += "6. 表情包是用来表示情绪的,不要直接回复或者评价别人的表情包。\n" + prompt += "必须调用 'decide_reply_action' 工具并提供 'action' 和 'reasoning'。如果选择了 'emoji_reply' 或者选择了 'text_reply' 并想追加表情,则必须提供 'emoji_query'。" + + prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt) + prompt = parse_text_timestamps(prompt, mode="lite") return prompt @@ -765,7 +765,7 @@ class PFChatting: # --- Tool Use and SubHF Thinking are now in _planner --- # --- Generate Response with LLM --- - logger.debug(f"{log_prefix}[Replier-{thinking_id}] Calling LLM to generate response...") + # logger.debug(f"{log_prefix}[Replier-{thinking_id}] Calling LLM to generate response...") # 注意:实际的生成调用是在 self.heartfc_chat.gpt.generate_response 中 response_set = await self.heartfc_chat.gpt.generate_response( anchor_message, @@ -779,7 +779,7 @@ class PFChatting: return None # Indicate failure # --- 准备并返回结果 --- - logger.info(f"{log_prefix}[Replier-{thinking_id}] 成功生成了回复集: {' '.join(response_set)[:50]}...") + logger.info(f"{log_prefix}[Replier-{thinking_id}] 成功生成了回复集: {' '.join(response_set)[:100]}...") return { "response_set": response_set, "send_emoji": send_emoji, # Pass through the emoji determined earlier (usually by tools) diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index 4a8aff173..c41f11032 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -284,7 +284,6 @@ class ThinkFlowChat: with Timer("思考前使用工具", timing_results): tool_result = await self.tool_user.use_tool( message.processed_plain_text, - message.message_info.user_info.user_nickname, chat, heartflow.get_subheartflow(chat.stream_id), ) @@ -341,8 +340,6 @@ class ThinkFlowChat: current_mind, past_mind = await heartflow.get_subheartflow( chat.stream_id ).do_thinking_before_reply( - message_txt=message.processed_plain_text, - sender_info=message.message_info.user_info, chat_stream=chat, obs_id=get_mid_memory_id, extra_info=tool_result_info, diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py index 8e19f1a87..557b42f2b 100644 --- a/src/plugins/memory_system/Hippocampus.py +++ b/src/plugins/memory_system/Hippocampus.py @@ -63,7 +63,8 @@ def calculate_information_content(text): """计算文本的信息量(熵)""" char_count = Counter(text) total_chars = len(text) - + if total_chars == 0: + return 0 entropy = 0 for count in char_count.values(): probability = count / total_chars @@ -1257,6 +1258,173 @@ class Hippocampus: return result + async def get_memory_from_topic( + self, + keywords: list[str], + max_memory_num: int = 3, + max_memory_length: int = 2, + max_depth: int = 3, + ) -> list: + """从文本中提取关键词并获取相关记忆。 + + Args: + topic (str): 记忆主题 + max_memory_num (int, optional): 返回的记忆条目数量上限。默认为3,表示最多返回3条与输入文本相关度最高的记忆。 + max_memory_length (int, optional): 每个主题最多返回的记忆条目数量。默认为2,表示每个主题最多返回2条相似度最高的记忆。 + max_depth (int, optional): 记忆检索深度。默认为3。值越大,检索范围越广,可以获取更多间接相关的记忆,但速度会变慢。 + + Returns: + list: 记忆列表,每个元素是一个元组 (topic, memory_items, similarity) + - topic: str, 记忆主题 + - memory_items: list, 该主题下的记忆项列表 + - similarity: float, 与文本的相似度 + """ + if not keywords: + return [] + + # logger.info(f"提取的关键词: {', '.join(keywords)}") + + # 过滤掉不存在于记忆图中的关键词 + valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G] + if not valid_keywords: + # logger.info("没有找到有效的关键词节点") + return [] + + logger.info(f"有效的关键词: {', '.join(valid_keywords)}") + + # 从每个关键词获取记忆 + all_memories = [] + activate_map = {} # 存储每个词的累计激活值 + + # 对每个关键词进行扩散式检索 + for keyword in valid_keywords: + logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):") + # 初始化激活值 + activation_values = {keyword: 1.0} + # 记录已访问的节点 + visited_nodes = {keyword} + # 待处理的节点队列,每个元素是(节点, 激活值, 当前深度) + nodes_to_process = [(keyword, 1.0, 0)] + + while nodes_to_process: + current_node, current_activation, current_depth = nodes_to_process.pop(0) + + # 如果激活值小于0或超过最大深度,停止扩散 + if current_activation <= 0 or current_depth >= max_depth: + continue + + # 获取当前节点的所有邻居 + neighbors = list(self.memory_graph.G.neighbors(current_node)) + + for neighbor in neighbors: + if neighbor in visited_nodes: + continue + + # 获取连接强度 + edge_data = self.memory_graph.G[current_node][neighbor] + strength = edge_data.get("strength", 1) + + # 计算新的激活值 + new_activation = current_activation - (1 / strength) + + if new_activation > 0: + activation_values[neighbor] = new_activation + visited_nodes.add(neighbor) + nodes_to_process.append((neighbor, new_activation, current_depth + 1)) + logger.trace( + f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})" + ) # noqa: E501 + + # 更新激活映射 + for node, activation_value in activation_values.items(): + if activation_value > 0: + if node in activate_map: + activate_map[node] += activation_value + else: + activate_map[node] = activation_value + + # 基于激活值平方的独立概率选择 + remember_map = {} + # logger.info("基于激活值平方的归一化选择:") + + # 计算所有激活值的平方和 + total_squared_activation = sum(activation**2 for activation in activate_map.values()) + if total_squared_activation > 0: + # 计算归一化的激活值 + normalized_activations = { + node: (activation**2) / total_squared_activation for node, activation in activate_map.items() + } + + # 按归一化激活值排序并选择前max_memory_num个 + sorted_nodes = sorted(normalized_activations.items(), key=lambda x: x[1], reverse=True)[:max_memory_num] + + # 将选中的节点添加到remember_map + for node, normalized_activation in sorted_nodes: + remember_map[node] = activate_map[node] # 使用原始激活值 + logger.debug( + f"节点 '{node}' (归一化激活值: {normalized_activation:.2f}, 激活值: {activate_map[node]:.2f})" + ) + else: + logger.info("没有有效的激活值") + + # 从选中的节点中提取记忆 + all_memories = [] + # logger.info("开始从选中的节点中提取记忆:") + for node, activation in remember_map.items(): + logger.debug(f"处理节点 '{node}' (激活值: {activation:.2f}):") + node_data = self.memory_graph.G.nodes[node] + memory_items = node_data.get("memory_items", []) + if not isinstance(memory_items, list): + memory_items = [memory_items] if memory_items else [] + + if memory_items: + logger.debug(f"节点包含 {len(memory_items)} 条记忆") + # 计算每条记忆与输入文本的相似度 + memory_similarities = [] + for memory in memory_items: + # 计算与输入文本的相似度 + memory_words = set(jieba.cut(memory)) + text_words = set(keywords) + all_words = memory_words | text_words + v1 = [1 if word in memory_words else 0 for word in all_words] + v2 = [1 if word in text_words else 0 for word in all_words] + similarity = cosine_similarity(v1, v2) + memory_similarities.append((memory, similarity)) + + # 按相似度排序 + memory_similarities.sort(key=lambda x: x[1], reverse=True) + # 获取最匹配的记忆 + top_memories = memory_similarities[:max_memory_length] + + # 添加到结果中 + for memory, similarity in top_memories: + all_memories.append((node, [memory], similarity)) + # logger.info(f"选中记忆: {memory} (相似度: {similarity:.2f})") + else: + logger.info("节点没有记忆") + + # 去重(基于记忆内容) + logger.debug("开始记忆去重:") + seen_memories = set() + unique_memories = [] + for topic, memory_items, activation_value in all_memories: + memory = memory_items[0] # 因为每个topic只有一条记忆 + if memory not in seen_memories: + seen_memories.add(memory) + unique_memories.append((topic, memory_items, activation_value)) + logger.debug(f"保留记忆: {memory} (来自节点: {topic}, 激活值: {activation_value:.2f})") + else: + logger.debug(f"跳过重复记忆: {memory} (来自节点: {topic})") + + # 转换为(关键词, 记忆)格式 + result = [] + for topic, memory_items, _ in unique_memories: + memory = memory_items[0] # 因为每个topic只有一条记忆 + result.append((topic, memory)) + logger.info(f"选中记忆: {memory} (来自节点: {topic})") + + return result + async def get_activate_from_text(self, text: str, max_depth: int = 3, fast_retrieval: bool = False) -> float: """从文本中提取关键词并获取相关记忆。 @@ -1773,6 +1941,26 @@ class HippocampusManager: response = [] return response + async def get_memory_from_topic( + self, + valid_keywords: list[str], + max_memory_num: int = 3, + max_memory_length: int = 2, + max_depth: int = 3, + fast_retrieval: bool = False, + ) -> list: + """从文本中获取相关记忆的公共接口""" + if not self._initialized: + raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法") + try: + response = await self._hippocampus.get_memory_from_topic( + valid_keywords, max_memory_num, max_memory_length, max_depth, fast_retrieval + ) + except Exception as e: + logger.error(f"文本激活记忆失败: {e}") + response = [] + return response + async def get_activate_from_text(self, text: str, max_depth: int = 3, fast_retrieval: bool = False) -> float: """从文本中获取激活值的公共接口""" if not self._initialized: diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 92fb886a8..69b9e888b 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -66,8 +66,11 @@ time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运 [platforms] # 必填项目,填写每个平台适配器提供的链接 nonebot-qq="http://127.0.0.1:18002/api/message" -[response] #使用哪种回复策略 -response_mode = "heart_flow" # 回复策略,可选值:heart_flow(心流),reasoning(推理) +[response] #群聊的回复策略 +#reasoning:推理模式,麦麦会根据上下文进行推理,并给出回复 +#heart_flow:心流模式,麦麦会根据上下文产生想法,并给出回复(不推荐) +#heart_FC:结合了PFC模式和心流模式,麦麦会进行主动的观察和回复,并给出回复 +response_mode = "heart_FC" # 回复策略,可选值:heart_flow(心流),reasoning(推理),heart_FC(心流FC) #推理回复参数 model_r1_probability = 0.7 # 麦麦回答时选择主要回复模型1 模型的概率