From 05f0aaa6d7313cc681b019010c7870bbb90a1fbc Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 12 May 2025 11:49:14 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E5=AF=B9HFC=E8=BF=9B=E8=A1=8C?= =?UTF-8?q?=E5=B7=A8=E5=A4=A7=E9=87=8D=E6=9E=84=EF=BC=8C=E9=87=87=E7=94=A8?= =?UTF-8?q?=E6=96=B0=E6=9E=B6=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/config/config.py | 6 +- src/do_tool/tool_use.py | 2 +- src/heart_flow/chatting_observation.py | 269 ++++ src/heart_flow/hfcloop_observation.py | 74 ++ src/heart_flow/info/chat_info.py | 97 ++ src/heart_flow/info/cycle_info.py | 157 +++ src/heart_flow/info/info_base.py | 60 + src/heart_flow/info/mind_info.py | 34 + src/heart_flow/info/obs_info.py | 107 ++ src/heart_flow/info/structured_info.py | 69 + src/heart_flow/memory_observation.py | 57 + src/heart_flow/observation.py | 267 +--- src/heart_flow/sub_heartflow.py | 14 +- src/heart_flow/subheartflow_manager.py | 2 +- src/heart_flow/tool_user.py | 73 +- src/heart_flow/working_observation.py | 34 + src/plugins/chat/message.py | 1 + src/plugins/chat/message_sender.py | 4 +- src/plugins/chat/utils_image.py | 4 +- src/plugins/heartFC_chat/cycle_analyzer.py | 95 +- src/plugins/heartFC_chat/cycle_viewer.py | 90 +- .../expressors/default_expressor.py | 319 +++++ src/plugins/heartFC_chat/heartFC_Cycleinfo.py | 100 +- src/plugins/heartFC_chat/heartFC_chat.py | 1129 +++-------------- src/plugins/heartFC_chat/heartFC_sender.py | 8 +- .../heartFC_chat/heartflow_prompt_builder.py | 120 +- src/plugins/heartFC_chat/hfc_utils.py | 44 + .../info_processors/base_processor.py | 48 + .../info_processors/chattinginfo_processor.py | 70 + .../info_processors/mind_processor.py} | 346 ++--- .../info_processors/processor_utils.py | 56 + .../info_processors/tool_processor.py | 200 +++ src/plugins/heartFC_chat/normal_chat.py | 3 + 33 files changed, 2221 insertions(+), 1738 deletions(-) create mode 100644 src/heart_flow/chatting_observation.py create mode 100644 src/heart_flow/hfcloop_observation.py create mode 100644 src/heart_flow/info/chat_info.py create mode 100644 src/heart_flow/info/cycle_info.py create mode 100644 src/heart_flow/info/info_base.py create mode 100644 src/heart_flow/info/mind_info.py create mode 100644 src/heart_flow/info/obs_info.py create mode 100644 src/heart_flow/info/structured_info.py create mode 100644 src/heart_flow/memory_observation.py create mode 100644 src/heart_flow/working_observation.py create mode 100644 src/plugins/heartFC_chat/expressors/default_expressor.py create mode 100644 src/plugins/heartFC_chat/hfc_utils.py create mode 100644 src/plugins/heartFC_chat/info_processors/base_processor.py create mode 100644 src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py rename src/{heart_flow/sub_mind.py => plugins/heartFC_chat/info_processors/mind_processor.py} (57%) create mode 100644 src/plugins/heartFC_chat/info_processors/processor_utils.py create mode 100644 src/plugins/heartFC_chat/info_processors/tool_processor.py diff --git a/src/config/config.py b/src/config/config.py index 5c2bdcc2a..be120f491 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -23,9 +23,9 @@ install(extra_lines=3) logger = get_logger("config") # 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 -is_test = False -mai_version_main = "0.6.3" -mai_version_fix = "fix-3" +is_test = True +mai_version_main = "0.6.4" +mai_version_fix = "snapshot-1" if mai_version_fix: if is_test: diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py index b2f59cc8b..d17155ad8 100644 --- a/src/do_tool/tool_use.py +++ b/src/do_tool/tool_use.py @@ -7,7 +7,7 @@ import traceback from src.plugins.person_info.relationship_manager import relationship_manager from src.plugins.chat.utils import parse_text_timestamps from src.plugins.chat.chat_stream import ChatStream -from src.heart_flow.observation import ChattingObservation +from src.heart_flow.chatting_observation import ChattingObservation logger = get_logger("tool_use") diff --git a/src/heart_flow/chatting_observation.py b/src/heart_flow/chatting_observation.py new file mode 100644 index 000000000..e44c290cb --- /dev/null +++ b/src/heart_flow/chatting_observation.py @@ -0,0 +1,269 @@ +from datetime import datetime +from src.plugins.models.utils_model import LLMRequest +from src.config.config import global_config +import traceback +from src.plugins.utils.chat_message_builder import ( + get_raw_msg_before_timestamp_with_chat, + build_readable_messages, + get_raw_msg_by_timestamp_with_chat, + num_new_messages_since, + get_person_id_list, +) +from src.plugins.utils.prompt_builder import global_prompt_manager +from typing import Optional +import difflib +from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入 +from src.heart_flow.observation import Observation +from src.common.logger_manager import get_logger +from src.heart_flow.utils_chat import get_chat_type_and_target_info + + +logger = get_logger(__name__) + + +# 聊天观察 +class ChattingObservation(Observation): + def __init__(self, chat_id): + super().__init__(chat_id) + self.chat_id = chat_id + + # --- Initialize attributes (defaults) --- + self.is_group_chat: bool = False + self.chat_target_info: Optional[dict] = None + # --- End Initialization --- + + # --- Other attributes initialized in __init__ --- + self.talking_message = [] + self.talking_message_str = "" + self.talking_message_str_truncate = "" + self.name = global_config.BOT_NICKNAME + self.nick_name = global_config.BOT_ALIAS_NAMES + self.max_now_obs_len = global_config.observation_context_size + self.overlap_len = global_config.compressed_length + self.mid_memorys = [] + self.max_mid_memory_len = global_config.compress_length_limit + self.mid_memory_info = "" + self.person_list = [] + self.llm_summary = LLMRequest( + model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" + ) + + async def initialize(self): + self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id) + logger.debug(f"初始化observation: self.is_group_chat: {self.is_group_chat}") + logger.debug(f"初始化observation: self.chat_target_info: {self.chat_target_info}") + initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10) + self.talking_message = initial_messages + self.talking_message_str = await build_readable_messages(self.talking_message) + + # 进行一次观察 返回观察结果observe_info + def get_observe_info(self, ids=None): + if ids: + mid_memory_str = "" + for id in ids: + print(f"id:{id}") + try: + for mid_memory in self.mid_memorys: + if mid_memory["id"] == id: + mid_memory_by_id = mid_memory + msg_str = "" + for msg in mid_memory_by_id["messages"]: + msg_str += f"{msg['detailed_plain_text']}" + # time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60) + # mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n" + mid_memory_str += f"{msg_str}\n" + except Exception as e: + logger.error(f"获取mid_memory_id失败: {e}") + traceback.print_exc() + return self.talking_message_str + + return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str + + else: + return self.talking_message_str + + def serch_message_by_text(self, text: str) -> Optional[MessageRecv]: + """ + 根据回复的纯文本 + 1. 在talking_message中查找最新的,最匹配的消息 + 2. 如果找到,则返回消息 + """ + msg_list = [] + find_msg = None + reverse_talking_message = list(reversed(self.talking_message)) + + for message in reverse_talking_message: + if message["processed_plain_text"] == text: + find_msg = message + logger.debug(f"找到的锚定消息:find_msg: {find_msg}") + break + else: + similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio() + msg_list.append({"message": message, "similarity": similarity}) + logger.debug(f"对锚定消息检查:message: {message['processed_plain_text']},similarity: {similarity}") + if not find_msg: + if msg_list: + msg_list.sort(key=lambda x: x["similarity"], reverse=True) + if msg_list[0]["similarity"] >= 0.5: # 只返回相似度大于等于0.5的消息 + find_msg = msg_list[0]["message"] + else: + logger.debug("没有找到锚定消息,相似度低") + return None + else: + logger.debug("没有找到锚定消息,没有消息捕获") + return None + + # logger.debug(f"找到的锚定消息:find_msg: {find_msg}") + group_info = find_msg.get("chat_info", {}).get("group_info") + user_info = find_msg.get("chat_info", {}).get("user_info") + + content_format = "" + accept_format = "" + template_items = {} + template_name = {} + template_default = True + + format_info = {"content_format": content_format, "accept_format": accept_format} + template_info = { + "template_items": template_items, + } + + message_info = { + "platform": find_msg.get("platform"), + "message_id": find_msg.get("message_id"), + "time": find_msg.get("time"), + "group_info": group_info, + "user_info": user_info, + "format_info": find_msg.get("format_info"), + "template_info": find_msg.get("template_info"), + "additional_config": find_msg.get("additional_config"), + "format_info": format_info, + "template_info": template_info, + } + message_dict = { + "message_info": message_info, + "raw_message": find_msg.get("processed_plain_text"), + "detailed_plain_text": find_msg.get("processed_plain_text"), + "processed_plain_text": find_msg.get("processed_plain_text"), + } + find_rec_msg = MessageRecv(message_dict) + logger.debug(f"锚定消息处理后:find_rec_msg: {find_rec_msg}") + return find_rec_msg + + async def observe(self): + # 自上一次观察的新消息 + new_messages_list = get_raw_msg_by_timestamp_with_chat( + chat_id=self.chat_id, + timestamp_start=self.last_observe_time, + timestamp_end=datetime.now().timestamp(), + limit=self.max_now_obs_len, + limit_mode="latest", + ) + + last_obs_time_mark = self.last_observe_time + if new_messages_list: + self.last_observe_time = new_messages_list[-1]["time"] + self.talking_message.extend(new_messages_list) + + if len(self.talking_message) > self.max_now_obs_len: + # 计算需要移除的消息数量,保留最新的 max_now_obs_len 条 + messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len + oldest_messages = self.talking_message[:messages_to_remove_count] + self.talking_message = self.talking_message[messages_to_remove_count:] # 保留后半部分,即最新的 + + oldest_messages_str = await build_readable_messages( + messages=oldest_messages, timestamp_mode="normal", read_mark=0 + ) + + # --- Build prompt using template --- + prompt = None # Initialize prompt as None + try: + # 构建 Prompt - 根据 is_group_chat 选择模板 + if self.is_group_chat: + prompt_template_name = "chat_summary_group_prompt" + prompt = await global_prompt_manager.format_prompt( + prompt_template_name, chat_logs=oldest_messages_str + ) + else: + # For private chat, add chat_target to the prompt variables + prompt_template_name = "chat_summary_private_prompt" + # Determine the target name for the prompt + chat_target_name = "对方" # Default fallback + if self.chat_target_info: + # Prioritize person_name, then nickname + chat_target_name = ( + self.chat_target_info.get("person_name") + or self.chat_target_info.get("user_nickname") + or chat_target_name + ) + + # Format the private chat prompt + prompt = await global_prompt_manager.format_prompt( + prompt_template_name, + # Assuming the private prompt template uses {chat_target} + chat_target=chat_target_name, + chat_logs=oldest_messages_str, + ) + except Exception as e: + logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}") + # prompt remains None + + summary = "没有主题的闲聊" # 默认值 + + if prompt: # Check if prompt was built successfully + try: + summary_result, _, _ = await self.llm_summary.generate_response(prompt) + if summary_result: # 确保结果不为空 + summary = summary_result + except Exception as e: + logger.error(f"总结主题失败 for chat {self.chat_id}: {e}") + # 保留默认总结 "没有主题的闲聊" + else: + logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}") + + mid_memory = { + "id": str(int(datetime.now().timestamp())), + "theme": summary, + "messages": oldest_messages, # 存储原始消息对象 + "readable_messages": oldest_messages_str, + # "timestamps": oldest_timestamps, + "chat_id": self.chat_id, + "created_at": datetime.now().timestamp(), + } + + self.mid_memorys.append(mid_memory) + if len(self.mid_memorys) > self.max_mid_memory_len: + self.mid_memorys.pop(0) # 移除最旧的 + + mid_memory_str = "之前聊天的内容概述是:\n" + for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分 + time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) + mid_memory_str += ( + f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" + ) + self.mid_memory_info = mid_memory_str + + self.talking_message_str = await build_readable_messages( + messages=self.talking_message, + timestamp_mode="lite", + read_mark=last_obs_time_mark, + ) + self.talking_message_str_truncate = await build_readable_messages( + messages=self.talking_message, + timestamp_mode="normal", + read_mark=last_obs_time_mark, + truncate=True, + ) + + self.person_list = await get_person_id_list(self.talking_message) + + # print(f"self.11111person_list: {self.person_list}") + + logger.trace( + f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}" + ) + + async def has_new_messages_since(self, timestamp: float) -> bool: + """检查指定时间戳之后是否有新消息""" + count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp) + return count > 0 diff --git a/src/heart_flow/hfcloop_observation.py b/src/heart_flow/hfcloop_observation.py new file mode 100644 index 000000000..c2d012667 --- /dev/null +++ b/src/heart_flow/hfcloop_observation.py @@ -0,0 +1,74 @@ +# 定义了来自外部世界的信息 +# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 +from datetime import datetime +from src.common.logger_manager import get_logger +from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleDetail +from typing import List +# Import the new utility function + +logger = get_logger("observation") + + +# 所有观察的基类 +class HFCloopObservation: + def __init__(self, observe_id): + self.observe_info = "" + self.observe_id = observe_id + self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 + self.history_loop: List[CycleDetail] = [] + + def get_observe_info(self): + return self.observe_info + + def add_loop_info(self, loop_info: CycleDetail): + logger.debug(f"添加循环信息111111111111111111111111111111111111: {loop_info}") + print(f"添加循环信息111111111111111111111111111111111111: {loop_info}") + print(f"action_taken: {loop_info.action_taken}") + print(f"action_type: {loop_info.action_type}") + print(f"response_info: {loop_info.response_info}") + self.history_loop.append(loop_info) + + async def observe(self): + recent_active_cycles: List[CycleDetail] = [] + for cycle in reversed(self.history_loop): + # 只关心实际执行了动作的循环 + if cycle.action_taken: + recent_active_cycles.append(cycle) + # 最多找最近的3个活动循环 + if len(recent_active_cycles) == 3: + break + + cycle_info_block = "" + consecutive_text_replies = 0 + responses_for_prompt = [] + + # 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看) + for cycle in recent_active_cycles: + if cycle.action_type == "reply": + consecutive_text_replies += 1 + # 获取回复内容,如果不存在则返回'[空回复]' + response_text = cycle.response_info.get("response_text", []) + # 使用简单的 join 来格式化回复内容列表 + formatted_response = "[空回复]" if not response_text else " ".join(response_text) + responses_for_prompt.append(formatted_response) + else: + # 一旦遇到非文本回复,连续性中断 + break + + # 根据连续文本回复的数量构建提示信息 + # 注意: responses_for_prompt 列表是从最近到最远排序的 + if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复 + cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' + elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复 + cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' + elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复 + cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' + + # 包装提示块,增加可读性,即使没有连续回复也给个标记 + if cycle_info_block: + cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" + else: + # 如果最近的活动循环不是文本回复,或者没有活动循环 + cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n" + + self.observe_info = cycle_info_block diff --git a/src/heart_flow/info/chat_info.py b/src/heart_flow/info/chat_info.py new file mode 100644 index 000000000..445529318 --- /dev/null +++ b/src/heart_flow/info/chat_info.py @@ -0,0 +1,97 @@ +from typing import Dict, Optional +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class ChatInfo(InfoBase): + """聊天信息类 + + 用于记录和管理聊天相关的信息,包括聊天ID、名称和类型等。 + 继承自 InfoBase 类,使用字典存储具体数据。 + + Attributes: + type (str): 信息类型标识符,固定为 "chat" + + Data Fields: + chat_id (str): 聊天的唯一标识符 + chat_name (str): 聊天的名称 + chat_type (str): 聊天的类型 + """ + + type: str = "chat" + + def set_chat_id(self, chat_id: str) -> None: + """设置聊天ID + + Args: + chat_id (str): 聊天的唯一标识符 + """ + self.data["chat_id"] = chat_id + + def set_chat_name(self, chat_name: str) -> None: + """设置聊天名称 + + Args: + chat_name (str): 聊天的名称 + """ + self.data["chat_name"] = chat_name + + def set_chat_type(self, chat_type: str) -> None: + """设置聊天类型 + + Args: + chat_type (str): 聊天的类型 + """ + self.data["chat_type"] = chat_type + + def get_chat_id(self) -> Optional[str]: + """获取聊天ID + + Returns: + Optional[str]: 聊天的唯一标识符,如果未设置则返回 None + """ + return self.get_info("chat_id") + + def get_chat_name(self) -> Optional[str]: + """获取聊天名称 + + Returns: + Optional[str]: 聊天的名称,如果未设置则返回 None + """ + return self.get_info("chat_name") + + def get_chat_type(self) -> Optional[str]: + """获取聊天类型 + + Returns: + Optional[str]: 聊天的类型,如果未设置则返回 None + """ + return self.get_info("chat_type") + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, str]: + """获取所有信息数据 + + Returns: + Dict[str, str]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[str]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[str]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) diff --git a/src/heart_flow/info/cycle_info.py b/src/heart_flow/info/cycle_info.py new file mode 100644 index 000000000..3701aa153 --- /dev/null +++ b/src/heart_flow/info/cycle_info.py @@ -0,0 +1,157 @@ +from typing import Dict, Optional, Any +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class CycleInfo(InfoBase): + """循环信息类 + + 用于记录和管理心跳循环的相关信息,包括循环ID、时间信息、动作信息等。 + 继承自 InfoBase 类,使用字典存储具体数据。 + + Attributes: + type (str): 信息类型标识符,固定为 "cycle" + + Data Fields: + cycle_id (str): 当前循环的唯一标识符 + start_time (str): 循环开始的时间 + end_time (str): 循环结束的时间 + action (str): 在循环中采取的动作 + action_data (Dict[str, Any]): 动作相关的详细数据 + reason (str): 触发循环的原因 + observe_info (str): 当前的回复信息 + """ + + type: str = "cycle" + + def get_type(self) -> str: + """获取信息类型""" + return self.type + + def get_data(self) -> Dict[str, str]: + """获取信息数据""" + return self.data + + def get_info(self, key: str) -> Optional[str]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + 属性值,如果键不存在则返回 None + """ + return self.data.get(key) + + def set_cycle_id(self, cycle_id: str) -> None: + """设置循环ID + + Args: + cycle_id (str): 循环的唯一标识符 + """ + self.data["cycle_id"] = cycle_id + + def set_start_time(self, start_time: str) -> None: + """设置开始时间 + + Args: + start_time (str): 循环开始的时间,建议使用标准时间格式 + """ + self.data["start_time"] = start_time + + def set_end_time(self, end_time: str) -> None: + """设置结束时间 + + Args: + end_time (str): 循环结束的时间,建议使用标准时间格式 + """ + self.data["end_time"] = end_time + + def set_action(self, action: str) -> None: + """设置采取的动作 + + Args: + action (str): 在循环中执行的动作名称 + """ + self.data["action"] = action + + def set_action_data(self, action_data: Dict[str, Any]) -> None: + """设置动作数据 + + Args: + action_data (Dict[str, Any]): 动作相关的详细数据,将被转换为字符串存储 + """ + self.data["action_data"] = str(action_data) + + def set_reason(self, reason: str) -> None: + """设置原因 + + Args: + reason (str): 触发循环的原因说明 + """ + self.data["reason"] = reason + + def set_observe_info(self, observe_info: str) -> None: + """设置回复信息 + + Args: + observe_info (str): 当前的回复信息 + """ + self.data["observe_info"] = observe_info + + def get_cycle_id(self) -> Optional[str]: + """获取循环ID + + Returns: + Optional[str]: 循环的唯一标识符,如果未设置则返回 None + """ + return self.get_info("cycle_id") + + def get_start_time(self) -> Optional[str]: + """获取开始时间 + + Returns: + Optional[str]: 循环开始的时间,如果未设置则返回 None + """ + return self.get_info("start_time") + + def get_end_time(self) -> Optional[str]: + """获取结束时间 + + Returns: + Optional[str]: 循环结束的时间,如果未设置则返回 None + """ + return self.get_info("end_time") + + def get_action(self) -> Optional[str]: + """获取采取的动作 + + Returns: + Optional[str]: 在循环中执行的动作名称,如果未设置则返回 None + """ + return self.get_info("action") + + def get_action_data(self) -> Optional[str]: + """获取动作数据 + + Returns: + Optional[str]: 动作相关的详细数据(字符串形式),如果未设置则返回 None + """ + return self.get_info("action_data") + + def get_reason(self) -> Optional[str]: + """获取原因 + + Returns: + Optional[str]: 触发循环的原因说明,如果未设置则返回 None + """ + return self.get_info("reason") + + def get_observe_info(self) -> Optional[str]: + """获取回复信息 + + Returns: + Optional[str]: 当前的回复信息,如果未设置则返回 None + """ + return self.get_info("observe_info") diff --git a/src/heart_flow/info/info_base.py b/src/heart_flow/info/info_base.py new file mode 100644 index 000000000..7779d913a --- /dev/null +++ b/src/heart_flow/info/info_base.py @@ -0,0 +1,60 @@ +from typing import Dict, Optional, Any, List +from dataclasses import dataclass, field + + +@dataclass +class InfoBase: + """信息基类 + + 这是一个基础信息类,用于存储和管理各种类型的信息数据。 + 所有具体的信息类都应该继承自这个基类。 + + Attributes: + type (str): 信息类型标识符,默认为 "base" + data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典, + 支持存储字符串、字典、列表等嵌套数据结构 + """ + + type: str = "base" + data: Dict[str, Any] = field(default_factory=dict) + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, Any]: + """获取所有信息数据 + + Returns: + Dict[str, Any]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[Any]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[Any]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) + + def get_info_list(self, key: str) -> List[Any]: + """获取特定属性的信息列表 + + Args: + key: 要获取的属性键名 + + Returns: + List[Any]: 属性值列表,如果键不存在则返回空列表 + """ + value = self.data.get(key) + if isinstance(value, list): + return value + return [] diff --git a/src/heart_flow/info/mind_info.py b/src/heart_flow/info/mind_info.py new file mode 100644 index 000000000..3cfde1bbb --- /dev/null +++ b/src/heart_flow/info/mind_info.py @@ -0,0 +1,34 @@ +from typing import Dict, Any +from dataclasses import dataclass, field +from .info_base import InfoBase + + +@dataclass +class MindInfo(InfoBase): + """思维信息类 + + 用于存储和管理当前思维状态的信息。 + + Attributes: + type (str): 信息类型标识符,默认为 "mind" + data (Dict[str, Any]): 包含 current_mind 的数据字典 + """ + + type: str = "mind" + data: Dict[str, Any] = field(default_factory=lambda: {"current_mind": ""}) + + def get_current_mind(self) -> str: + """获取当前思维状态 + + Returns: + str: 当前思维状态 + """ + return self.get_info("current_mind") or "" + + def set_current_mind(self, mind: str) -> None: + """设置当前思维状态 + + Args: + mind: 要设置的思维状态 + """ + self.data["current_mind"] = mind diff --git a/src/heart_flow/info/obs_info.py b/src/heart_flow/info/obs_info.py new file mode 100644 index 000000000..9a3d41f56 --- /dev/null +++ b/src/heart_flow/info/obs_info.py @@ -0,0 +1,107 @@ +from typing import Dict, Optional +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class ObsInfo(InfoBase): + """OBS信息类 + + 用于记录和管理OBS相关的信息,包括说话消息、截断后的说话消息和聊天类型。 + 继承自 InfoBase 类,使用字典存储具体数据。 + + Attributes: + type (str): 信息类型标识符,固定为 "obs" + + Data Fields: + talking_message (str): 说话消息内容 + talking_message_str_truncate (str): 截断后的说话消息内容 + chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) + """ + + type: str = "obs" + + def set_talking_message(self, message: str) -> None: + """设置说话消息 + + Args: + message (str): 说话消息内容 + """ + self.data["talking_message"] = message + + def set_talking_message_str_truncate(self, message: str) -> None: + """设置截断后的说话消息 + + Args: + message (str): 截断后的说话消息内容 + """ + self.data["talking_message_str_truncate"] = message + + def set_chat_type(self, chat_type: str) -> None: + """设置聊天类型 + + Args: + chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) + """ + if chat_type not in ["private", "group", "other"]: + chat_type = "other" + self.data["chat_type"] = chat_type + + def set_chat_target(self, chat_target: str) -> None: + """设置聊天目标 + + Args: + chat_target (str): 聊天目标,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) + """ + self.data["chat_target"] = chat_target + + def get_talking_message(self) -> Optional[str]: + """获取说话消息 + + Returns: + Optional[str]: 说话消息内容,如果未设置则返回 None + """ + return self.get_info("talking_message") + + def get_talking_message_str_truncate(self) -> Optional[str]: + """获取截断后的说话消息 + + Returns: + Optional[str]: 截断后的说话消息内容,如果未设置则返回 None + """ + return self.get_info("talking_message_str_truncate") + + def get_chat_type(self) -> str: + """获取聊天类型 + + Returns: + str: 聊天类型,默认为 "other" + """ + return self.get_info("chat_type") or "other" + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, str]: + """获取所有信息数据 + + Returns: + Dict[str, str]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[str]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[str]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) diff --git a/src/heart_flow/info/structured_info.py b/src/heart_flow/info/structured_info.py new file mode 100644 index 000000000..61269c8f2 --- /dev/null +++ b/src/heart_flow/info/structured_info.py @@ -0,0 +1,69 @@ +from typing import Dict, Optional, Any, List +from dataclasses import dataclass, field + + +@dataclass +class StructuredInfo: + """信息基类 + + 这是一个基础信息类,用于存储和管理各种类型的信息数据。 + 所有具体的信息类都应该继承自这个基类。 + + Attributes: + type (str): 信息类型标识符,默认为 "base" + data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典, + 支持存储字符串、字典、列表等嵌套数据结构 + """ + + type: str = "structured_info" + data: Dict[str, Any] = field(default_factory=dict) + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, Any]: + """获取所有信息数据 + + Returns: + Dict[str, Any]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[Any]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[Any]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) + + def get_info_list(self, key: str) -> List[Any]: + """获取特定属性的信息列表 + + Args: + key: 要获取的属性键名 + + Returns: + List[Any]: 属性值列表,如果键不存在则返回空列表 + """ + value = self.data.get(key) + if isinstance(value, list): + return value + return [] + + def set_info(self, key: str, value: Any) -> None: + """设置特定属性的信息值 + + Args: + key: 要设置的属性键名 + value: 要设置的属性值 + """ + self.data[key] = value diff --git a/src/heart_flow/memory_observation.py b/src/heart_flow/memory_observation.py new file mode 100644 index 000000000..347e7ce05 --- /dev/null +++ b/src/heart_flow/memory_observation.py @@ -0,0 +1,57 @@ +from src.heart_flow.chatting_observation import Observation +from datetime import datetime +from src.common.logger_manager import get_logger +import traceback + +# Import the new utility function +from src.plugins.memory_system.Hippocampus import HippocampusManager +import jieba +from typing import List + +logger = get_logger("memory") + + +class MemoryObservation(Observation): + def __init__(self, observe_id): + super().__init__(observe_id) + self.observe_info: str = "" + self.context: str = "" + self.running_memory: List[dict] = [] + + def get_observe_info(self): + for memory in self.running_memory: + self.observe_info += f"{memory['topic']}:{memory['content']}\n" + return self.observe_info + + async def observe(self): + # ---------- 2. 获取记忆 ---------- + try: + # 从聊天内容中提取关键词 + chat_words = set(jieba.cut(self.context)) + # 过滤掉停用词和单字词 + keywords = [word for word in chat_words if len(word) > 1] + # 去重并限制数量 + keywords = list(set(keywords))[:5] + + logger.debug(f"取的关键词: {keywords}") + + # 调用记忆系统获取相关记忆 + related_memory = await HippocampusManager.get_instance().get_memory_from_topic( + valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 + ) + + logger.debug(f"获取到的记忆: {related_memory}") + + if related_memory: + for topic, memory in related_memory: + new_item = {"type": "memory", "id": topic, "content": memory, "ttl": 3} + self.structured_info.append(new_item) + # 将记忆添加到 running_memory + self.running_memory.append( + {"topic": topic, "content": memory, "timestamp": datetime.now().isoformat()} + ) + logger.debug(f"添加新记忆: {topic} - {memory}") + + except Exception as e: + logger.error(f"观察 记忆时出错: {e}") + logger.error(traceback.format_exc()) diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py index 2d819a880..9b43d79aa 100644 --- a/src/heart_flow/observation.py +++ b/src/heart_flow/observation.py @@ -1,24 +1,10 @@ # 定义了来自外部世界的信息 # 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 from datetime import datetime -from src.plugins.models.utils_model import LLMRequest -from src.config.config import global_config from src.common.logger_manager import get_logger -import traceback -from src.plugins.utils.chat_message_builder import ( - get_raw_msg_before_timestamp_with_chat, - build_readable_messages, - get_raw_msg_by_timestamp_with_chat, - num_new_messages_since, - get_person_id_list, -) -from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager -from typing import Optional -import difflib -from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入 +from src.plugins.utils.prompt_builder import Prompt # Import the new utility function -from .utils_chat import get_chat_type_and_target_info logger = get_logger("observation") @@ -41,259 +27,10 @@ Prompt( # 所有观察的基类 class Observation: - def __init__(self, observe_type, observe_id): + def __init__(self, observe_id): self.observe_info = "" - self.observe_type = observe_type self.observe_id = observe_id self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 async def observe(self): pass - - -# 聊天观察 -class ChattingObservation(Observation): - def __init__(self, chat_id): - super().__init__("chat", chat_id) - self.chat_id = chat_id - - # --- Initialize attributes (defaults) --- - self.is_group_chat: bool = False - self.chat_target_info: Optional[dict] = None - # --- End Initialization --- - - # --- Other attributes initialized in __init__ --- - self.talking_message = [] - self.talking_message_str = "" - self.talking_message_str_truncate = "" - self.name = global_config.BOT_NICKNAME - self.nick_name = global_config.BOT_ALIAS_NAMES - self.max_now_obs_len = global_config.observation_context_size - self.overlap_len = global_config.compressed_length - self.mid_memorys = [] - self.max_mid_memory_len = global_config.compress_length_limit - self.mid_memory_info = "" - self.person_list = [] - self.llm_summary = LLMRequest( - model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" - ) - - async def initialize(self): - # --- Use utility function to determine chat type and fetch info --- - self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id) - # logger.debug(f"is_group_chat: {self.is_group_chat}") - # logger.debug(f"chat_target_info: {self.chat_target_info}") - # --- End using utility function --- - - # Fetch initial messages (existing logic) - initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10) - self.talking_message = initial_messages - self.talking_message_str = await build_readable_messages(self.talking_message) - - # 进行一次观察 返回观察结果observe_info - def get_observe_info(self, ids=None): - if ids: - mid_memory_str = "" - for id in ids: - print(f"id:{id}") - try: - for mid_memory in self.mid_memorys: - if mid_memory["id"] == id: - mid_memory_by_id = mid_memory - msg_str = "" - for msg in mid_memory_by_id["messages"]: - msg_str += f"{msg['detailed_plain_text']}" - # time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60) - # mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n" - mid_memory_str += f"{msg_str}\n" - except Exception as e: - logger.error(f"获取mid_memory_id失败: {e}") - traceback.print_exc() - return self.talking_message_str - - return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str - - else: - return self.talking_message_str - - async def observe(self): - # 自上一次观察的新消息 - new_messages_list = get_raw_msg_by_timestamp_with_chat( - chat_id=self.chat_id, - timestamp_start=self.last_observe_time, - timestamp_end=datetime.now().timestamp(), - limit=self.max_now_obs_len, - limit_mode="latest", - ) - - last_obs_time_mark = self.last_observe_time - if new_messages_list: - self.last_observe_time = new_messages_list[-1]["time"] - self.talking_message.extend(new_messages_list) - - if len(self.talking_message) > self.max_now_obs_len: - # 计算需要移除的消息数量,保留最新的 max_now_obs_len 条 - messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len - oldest_messages = self.talking_message[:messages_to_remove_count] - self.talking_message = self.talking_message[messages_to_remove_count:] # 保留后半部分,即最新的 - - oldest_messages_str = await build_readable_messages( - messages=oldest_messages, timestamp_mode="normal", read_mark=0 - ) - - # --- Build prompt using template --- - prompt = None # Initialize prompt as None - try: - # 构建 Prompt - 根据 is_group_chat 选择模板 - if self.is_group_chat: - prompt_template_name = "chat_summary_group_prompt" - prompt = await global_prompt_manager.format_prompt( - prompt_template_name, chat_logs=oldest_messages_str - ) - else: - # For private chat, add chat_target to the prompt variables - prompt_template_name = "chat_summary_private_prompt" - # Determine the target name for the prompt - chat_target_name = "对方" # Default fallback - if self.chat_target_info: - # Prioritize person_name, then nickname - chat_target_name = ( - self.chat_target_info.get("person_name") - or self.chat_target_info.get("user_nickname") - or chat_target_name - ) - - # Format the private chat prompt - prompt = await global_prompt_manager.format_prompt( - prompt_template_name, - # Assuming the private prompt template uses {chat_target} - chat_target=chat_target_name, - chat_logs=oldest_messages_str, - ) - except Exception as e: - logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}") - # prompt remains None - - summary = "没有主题的闲聊" # 默认值 - - if prompt: # Check if prompt was built successfully - try: - summary_result, _, _ = await self.llm_summary.generate_response(prompt) - if summary_result: # 确保结果不为空 - summary = summary_result - except Exception as e: - logger.error(f"总结主题失败 for chat {self.chat_id}: {e}") - # 保留默认总结 "没有主题的闲聊" - else: - logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}") - - mid_memory = { - "id": str(int(datetime.now().timestamp())), - "theme": summary, - "messages": oldest_messages, # 存储原始消息对象 - "readable_messages": oldest_messages_str, - # "timestamps": oldest_timestamps, - "chat_id": self.chat_id, - "created_at": datetime.now().timestamp(), - } - - self.mid_memorys.append(mid_memory) - if len(self.mid_memorys) > self.max_mid_memory_len: - self.mid_memorys.pop(0) # 移除最旧的 - - mid_memory_str = "之前聊天的内容概述是:\n" - for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分 - time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) - mid_memory_str += ( - f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" - ) - self.mid_memory_info = mid_memory_str - - self.talking_message_str = await build_readable_messages( - messages=self.talking_message, - timestamp_mode="lite", - read_mark=last_obs_time_mark, - ) - self.talking_message_str_truncate = await build_readable_messages( - messages=self.talking_message, - timestamp_mode="normal", - read_mark=last_obs_time_mark, - truncate=True, - ) - - self.person_list = await get_person_id_list(self.talking_message) - - # print(f"self.11111person_list: {self.person_list}") - - logger.trace( - f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}" - ) - - async def find_best_matching_message(self, search_str: str, min_similarity: float = 0.6) -> Optional[MessageRecv]: - """ - 在 talking_message 中查找与 search_str 最匹配的消息。 - - Args: - search_str: 要搜索的字符串。 - min_similarity: 要求的最低相似度(0到1之间)。 - - Returns: - 匹配的 MessageRecv 实例,如果找不到则返回 None。 - """ - best_match_score = -1.0 - best_match_dict = None - - if not self.talking_message: - logger.debug(f"Chat {self.chat_id}: talking_message is empty, cannot find match for '{search_str}'") - return None - - for message_dict in self.talking_message: - try: - # 临时创建 MessageRecv 以处理文本 - temp_msg = MessageRecv(message_dict) - await temp_msg.process() # 处理消息以获取 processed_plain_text - current_text = temp_msg.processed_plain_text - - if not current_text: # 跳过没有文本内容的消息 - continue - - # 计算相似度 - matcher = difflib.SequenceMatcher(None, search_str, current_text) - score = matcher.ratio() - - # logger.debug(f"Comparing '{search_str}' with '{current_text}', score: {score}") # 可选:用于调试 - - if score > best_match_score: - best_match_score = score - best_match_dict = message_dict - - except Exception as e: - logger.error(f"Error processing message for matching in chat {self.chat_id}: {e}", exc_info=True) - continue # 继续处理下一条消息 - - if best_match_dict is not None and best_match_score >= min_similarity: - logger.debug(f"Found best match for '{search_str}' with score {best_match_score:.2f}") - try: - final_msg = MessageRecv(best_match_dict) - await final_msg.process() - # 确保 MessageRecv 实例有关联的 chat_stream - if hasattr(self, "chat_stream"): - final_msg.update_chat_stream(self.chat_stream) - else: - logger.warning( - f"ChattingObservation instance for chat {self.chat_id} does not have a chat_stream attribute set." - ) - return final_msg - except Exception as e: - logger.error(f"Error creating final MessageRecv for chat {self.chat_id}: {e}", exc_info=True) - return None - else: - logger.debug( - f"No suitable match found for '{search_str}' in chat {self.chat_id} (best score: {best_match_score:.2f}, threshold: {min_similarity})" - ) - return None - - async def has_new_messages_since(self, timestamp: float) -> bool: - """检查指定时间戳之后是否有新消息""" - count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp) - return count > 0 diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index 5be0d73cd..a002a6fad 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -1,4 +1,5 @@ -from .observation import Observation, ChattingObservation +from .observation import Observation +from .chatting_observation import ChattingObservation import asyncio import time from typing import Optional, List, Dict, Tuple, Callable, Coroutine @@ -10,7 +11,6 @@ from src.plugins.heartFC_chat.heartFC_chat import HeartFChatting from src.plugins.heartFC_chat.normal_chat import NormalChat from src.heart_flow.mai_state_manager import MaiStateInfo from src.heart_flow.chat_state_info import ChatState, ChatStateInfo -from src.heart_flow.sub_mind import SubMind from .utils_chat import get_chat_type_and_target_info from .interest_chatting import InterestChatting @@ -68,11 +68,6 @@ class SubHeartflow: self.observations: List[ChattingObservation] = [] # 观察列表 # self.running_knowledges = [] # 运行中的知识,待完善 - # LLM模型配置,负责进行思考 - self.sub_mind = SubMind( - subheartflow_id=self.subheartflow_id, chat_state=self.chat_state, observations=self.observations - ) - # 日志前缀 - Moved determination to initialize self.log_prefix = str(subheartflow_id) # Initial default prefix @@ -186,7 +181,6 @@ class SubHeartflow: # 创建 HeartFChatting 实例,并传递 从构造函数传入的 回调函数 self.heart_fc_instance = HeartFChatting( chat_id=self.subheartflow_id, - sub_mind=self.sub_mind, observations=self.observations, # 传递所有观察者 on_consecutive_no_reply_callback=self.hfc_no_reply_callback, # <-- Use stored callback ) @@ -288,9 +282,6 @@ class SubHeartflow: logger.info(f"{self.log_prefix} 子心流后台任务已停止。") - def update_current_mind(self, response): - self.sub_mind.update_current_mind(response) - def add_observation(self, observation: Observation): for existing_obs in self.observations: if existing_obs.observe_id == observation.observe_id: @@ -332,7 +323,6 @@ class SubHeartflow: interest_state = await self.get_interest_state() return { "interest_state": interest_state, - "current_mind": self.sub_mind.current_mind, "chat_state": self.chat_state.chat_status.value, "chat_state_changed_time": self.chat_state_changed_time, } diff --git a/src/heart_flow/subheartflow_manager.py b/src/heart_flow/subheartflow_manager.py index c074d29a2..4d5f99983 100644 --- a/src/heart_flow/subheartflow_manager.py +++ b/src/heart_flow/subheartflow_manager.py @@ -14,7 +14,7 @@ from src.plugins.chat.chat_stream import chat_manager # 导入心流相关类 from src.heart_flow.sub_heartflow import SubHeartflow, ChatState from src.heart_flow.mai_state_manager import MaiStateInfo -from .observation import ChattingObservation +from src.heart_flow.chatting_observation import ChattingObservation # 导入LLM请求工具 from src.plugins.models.utils_model import LLMRequest diff --git a/src/heart_flow/tool_user.py b/src/heart_flow/tool_user.py index 25345819d..92533542a 100644 --- a/src/heart_flow/tool_user.py +++ b/src/heart_flow/tool_user.py @@ -2,27 +2,20 @@ from .observation import ChattingObservation from src.plugins.models.utils_model import LLMRequest from src.config.config import global_config import time -import traceback from src.common.logger_manager import get_logger from src.individuality.individuality import Individuality -import random from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager from src.do_tool.tool_use import ToolUser -from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls -from src.heart_flow.chat_state_info import ChatStateInfo -from src.plugins.chat.chat_stream import chat_manager -from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo -import difflib +from src.plugins.utils.json_utils import process_llm_tool_calls from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.memory_system.Hippocampus import HippocampusManager -import jieba -from src.common.logger_manager import get_logger from src.heart_flow.sub_mind import SubMind + logger = get_logger("tool_use") + def init_prompt(): # ... 原有代码 ... - + # 添加工具执行器提示词 tool_executor_prompt = """ 你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。 @@ -51,6 +44,7 @@ def init_prompt(): """ Prompt(tool_executor_prompt, "tool_executor_prompt") + class ToolExecutor: def __init__(self, subheartflow_id: str): self.subheartflow_id = subheartflow_id @@ -62,18 +56,20 @@ class ToolExecutor: request_type="tool_execution", ) self.structured_info = [] - - async def execute_tools(self, sub_mind: SubMind, chat_target_name="对方", is_group_chat=False, return_details=False, cycle_info=None): + + async def execute_tools( + self, sub_mind: SubMind, chat_target_name="对方", is_group_chat=False, return_details=False, cycle_info=None + ): """ 并行执行工具,返回结构化信息 - + 参数: sub_mind: 子思维对象 chat_target_name: 聊天目标名称,默认为"对方" is_group_chat: 是否为群聊,默认为False return_details: 是否返回详细信息,默认为False cycle_info: 循环信息对象,可用于记录详细执行信息 - + 返回: 如果return_details为False: List[Dict]: 工具执行结果的结构化信息列表 @@ -83,31 +79,31 @@ class ToolExecutor: # 初始化工具 tool_instance = ToolUser() tools = tool_instance._define_tools() - + observation: ChattingObservation = sub_mind.observations[0] if sub_mind.observations else None - + # 获取观察内容 chat_observe_info = observation.get_observe_info() person_list = observation.person_list - + # extra structured info extra_structured_info = sub_mind.structured_info_str - + # 构建关系信息 relation_prompt = "【关系信息】\n" for person in person_list: relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) - + # 获取个性信息 individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(x_person=2, level=2) - + # 获取心情信息 mood_info = observation.chat_state.mood if hasattr(observation, "chat_state") else "" - + # 获取时间信息 time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - + # 构建专用于工具调用的提示词 prompt = await global_prompt_manager.format_prompt( "tool_executor_prompt", @@ -119,24 +115,22 @@ class ToolExecutor: prompt_personality=prompt_personality, mood_info=mood_info, bot_name=individuality.name, - time_now=time_now + time_now=time_now, ) - + # 如果指定了cycle_info,记录工具执行的prompt if cycle_info: cycle_info.set_tooluse_info(prompt=prompt) - + # 调用LLM,专注于工具使用 logger.info(f"开始执行工具调用{prompt}") - response, _, tool_calls = await self.llm_model.generate_response_tool_async( - prompt=prompt, tools=tools - ) - + response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools) + logger.debug(f"获取到工具原始输出:\n{tool_calls}") # 处理工具调用和结果收集,类似于SubMind中的逻辑 new_structured_items = [] used_tools = [] # 记录使用了哪些工具 - + if tool_calls: success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls) if success and valid_tool_calls: @@ -145,12 +139,12 @@ class ToolExecutor: # 记录使用的工具名称 tool_name = tool_call.get("name", "unknown_tool") used_tools.append(tool_name) - + result = await tool_instance._execute_tool_call(tool_call) - + name = result.get("type", "unknown_type") content = result.get("content", "") - + logger.info(f"工具{name},获得信息:{content}") if result: new_item = { @@ -162,14 +156,11 @@ class ToolExecutor: new_structured_items.append(new_item) except Exception as e: logger.error(f"{self.log_prefix}工具执行失败: {e}") - + # 如果指定了cycle_info,记录工具执行结果 if cycle_info: - cycle_info.set_tooluse_info( - tools_used=used_tools, - tool_results=new_structured_items - ) - + cycle_info.set_tooluse_info(tools_used=used_tools, tool_results=new_structured_items) + # 根据return_details决定返回值 if return_details: return new_structured_items, used_tools, prompt @@ -177,4 +168,4 @@ class ToolExecutor: return new_structured_items -init_prompt() \ No newline at end of file +init_prompt() diff --git a/src/heart_flow/working_observation.py b/src/heart_flow/working_observation.py new file mode 100644 index 000000000..27b6ab92d --- /dev/null +++ b/src/heart_flow/working_observation.py @@ -0,0 +1,34 @@ +# 定义了来自外部世界的信息 +# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 +from datetime import datetime +from src.common.logger_manager import get_logger + +# Import the new utility function + +logger = get_logger("observation") + + +# 所有观察的基类 +class WorkingObservation: + def __init__(self, observe_id): + self.observe_info = "" + self.observe_id = observe_id + self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 + self.history_loop = [] + self.structured_info = [] + + def get_observe_info(self): + return self.structured_info + + def add_structured_info(self, structured_info: dict): + self.structured_info.append(structured_info) + + async def observe(self): + observed_structured_infos = [] + for structured_info in self.structured_info: + if structured_info.get("ttl") > 0: + structured_info["ttl"] -= 1 + observed_structured_infos.append(structured_info) + logger.debug(f"观察到结构化信息仍旧在: {structured_info}") + + self.structured_info = observed_structured_infos diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py index b9c152889..578b8edc3 100644 --- a/src/plugins/chat/message.py +++ b/src/plugins/chat/message.py @@ -100,6 +100,7 @@ class MessageRecv(Message): Args: message_dict: MessageCQ序列化后的字典 """ + # print(f"message_dict: {message_dict}") self.message_info = BaseMessageInfo.from_dict(message_dict.get("message_info", {})) self.message_segment = Seg.from_dict(message_dict.get("message_segment", {})) diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py index 104a5ea49..408c59a00 100644 --- a/src/plugins/chat/message_sender.py +++ b/src/plugins/chat/message_sender.py @@ -212,7 +212,7 @@ class MessageManager: _ = message.update_thinking_time() # 更新思考时间 thinking_start_time = message.thinking_start_time now_time = time.time() - logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}") + # logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}") thinking_messages_count, thinking_messages_length = count_messages_between( start_time=thinking_start_time, end_time=now_time, stream_id=message.chat_stream.stream_id ) @@ -236,7 +236,7 @@ class MessageManager: await message.process() # 预处理消息内容 - logger.debug(f"{message}") + # logger.debug(f"{message}") # 使用全局 message_sender 实例 await send_message(message) diff --git a/src/plugins/chat/utils_image.py b/src/plugins/chat/utils_image.py index 5508ad233..455038246 100644 --- a/src/plugins/chat/utils_image.py +++ b/src/plugins/chat/utils_image.py @@ -117,7 +117,7 @@ class ImageManager: cached_description = self._get_description_from_db(image_hash, "emoji") if cached_description: # logger.debug(f"缓存表情包描述: {cached_description}") - return f"[表达了:{cached_description}]" + return f"[表情包,含义看起来是:{cached_description}]" # 调用AI获取描述 if image_format == "gif" or image_format == "GIF": @@ -131,7 +131,7 @@ class ImageManager: cached_description = self._get_description_from_db(image_hash, "emoji") if cached_description: logger.warning(f"虽然生成了描述,但是找到缓存表情包描述: {cached_description}") - return f"[表达了:{cached_description}]" + return f"[表情包,含义看起来是:{cached_description}]" # 根据配置决定是否保存图片 if global_config.save_emoji: diff --git a/src/plugins/heartFC_chat/cycle_analyzer.py b/src/plugins/heartFC_chat/cycle_analyzer.py index a36bd8416..6ae3e8f66 100644 --- a/src/plugins/heartFC_chat/cycle_analyzer.py +++ b/src/plugins/heartFC_chat/cycle_analyzer.py @@ -1,48 +1,47 @@ import os import time -import json -from typing import List, Dict, Any, Optional, Tuple +from typing import List, Dict, Any, Tuple from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo from src.common.logger_manager import get_logger logger = get_logger("cycle_analyzer") + class CycleAnalyzer: """循环信息分析类,提供查询和分析CycleInfo的工具""" - + def __init__(self, base_dir: str = "log_debug"): """ 初始化分析器 - + 参数: base_dir: 存储CycleInfo的基础目录,默认为log_debug """ self.base_dir = base_dir - + def list_streams(self) -> List[str]: """ 获取所有聊天流ID列表 - + 返回: List[str]: 聊天流ID列表 """ try: if not os.path.exists(self.base_dir): return [] - - return [d for d in os.listdir(self.base_dir) - if os.path.isdir(os.path.join(self.base_dir, d))] + + return [d for d in os.listdir(self.base_dir) if os.path.isdir(os.path.join(self.base_dir, d))] except Exception as e: logger.error(f"获取聊天流列表时出错: {e}") return [] - + def get_stream_cycle_count(self, stream_id: str) -> int: """ 获取指定聊天流的循环数量 - + 参数: stream_id: 聊天流ID - + 返回: int: 循环数量 """ @@ -52,16 +51,16 @@ class CycleAnalyzer: except Exception as e: logger.error(f"获取聊天流循环数量时出错: {e}") return 0 - + def get_stream_cycles(self, stream_id: str, start: int = 0, limit: int = -1) -> List[str]: """ 获取指定聊天流的循环文件列表 - + 参数: stream_id: 聊天流ID start: 起始索引,默认为0 limit: 返回的最大数量,默认为-1(全部) - + 返回: List[str]: 循环文件路径列表 """ @@ -70,38 +69,38 @@ class CycleAnalyzer: if limit < 0: return files[start:] else: - return files[start:start+limit] + return files[start : start + limit] except Exception as e: logger.error(f"获取聊天流循环文件列表时出错: {e}") return [] - + def get_cycle_content(self, filepath: str) -> str: """ 获取循环文件的内容 - + 参数: filepath: 文件路径 - + 返回: str: 文件内容 """ try: if not os.path.exists(filepath): return f"文件不存在: {filepath}" - - with open(filepath, 'r', encoding='utf-8') as f: + + with open(filepath, "r", encoding="utf-8") as f: return f.read() except Exception as e: logger.error(f"读取循环文件内容时出错: {e}") return f"读取文件出错: {e}" - + def analyze_stream_cycles(self, stream_id: str) -> Dict[str, Any]: """ 分析指定聊天流的所有循环,生成统计信息 - + 参数: stream_id: 聊天流ID - + 返回: Dict[str, Any]: 统计信息 """ @@ -109,75 +108,75 @@ class CycleAnalyzer: files = CycleInfo.list_cycles(stream_id, self.base_dir) if not files: return {"error": "没有找到循环记录"} - + total_cycles = len(files) action_counts = {"text_reply": 0, "emoji_reply": 0, "no_reply": 0, "unknown": 0} total_duration = 0 tool_usage = {} - + for filepath in files: - with open(filepath, 'r', encoding='utf-8') as f: + with open(filepath, "r", encoding="utf-8") as f: content = f.read() - + # 解析动作类型 - for line in content.split('\n'): + for line in content.split("\n"): if line.startswith("动作:"): action = line[3:].strip() action_counts[action] = action_counts.get(action, 0) + 1 - + # 解析耗时 elif line.startswith("耗时:"): try: - duration = float(line[3:].strip().split('秒')[0]) + duration = float(line[3:].strip().split("秒")[0]) total_duration += duration except: pass - + # 解析工具使用 elif line.startswith("使用的工具:"): - tools = line[6:].strip().split(', ') + tools = line[6:].strip().split(", ") for tool in tools: tool_usage[tool] = tool_usage.get(tool, 0) + 1 - + avg_duration = total_duration / total_cycles if total_cycles > 0 else 0 - + return { "总循环数": total_cycles, "动作统计": action_counts, "平均耗时": f"{avg_duration:.2f}秒", "总耗时": f"{total_duration:.2f}秒", - "工具使用次数": tool_usage + "工具使用次数": tool_usage, } except Exception as e: logger.error(f"分析聊天流循环时出错: {e}") return {"error": f"分析出错: {e}"} - + def get_latest_cycles(self, count: int = 10) -> List[Tuple[str, str]]: """ 获取所有聊天流中最新的几个循环 - + 参数: count: 获取的数量,默认为10 - + 返回: List[Tuple[str, str]]: 聊天流ID和文件路径的元组列表 """ try: all_cycles = [] streams = self.list_streams() - + for stream_id in streams: files = CycleInfo.list_cycles(stream_id, self.base_dir) for filepath in files: try: # 从文件名中提取时间戳 filename = os.path.basename(filepath) - timestamp_str = filename.split('_', 2)[2].split('.')[0] + timestamp_str = filename.split("_", 2)[2].split(".")[0] timestamp = time.mktime(time.strptime(timestamp_str, "%Y%m%d_%H%M%S")) all_cycles.append((timestamp, stream_id, filepath)) except: continue - + # 按时间戳排序,取最新的count个 all_cycles.sort(reverse=True) return [(item[1], item[2]) for item in all_cycles[:count]] @@ -189,11 +188,11 @@ class CycleAnalyzer: # 使用示例 if __name__ == "__main__": analyzer = CycleAnalyzer() - + # 列出所有聊天流 streams = analyzer.list_streams() print(f"找到 {len(streams)} 个聊天流: {streams}") - + # 分析第一个聊天流的循环 if streams: stream_id = streams[0] @@ -201,15 +200,15 @@ if __name__ == "__main__": print(f"\n聊天流 {stream_id} 的统计信息:") for key, value in stats.items(): print(f" {key}: {value}") - + # 获取最新的循环 cycles = analyzer.get_stream_cycles(stream_id, limit=1) if cycles: - print(f"\n最新循环内容:") + print("\n最新循环内容:") print(analyzer.get_cycle_content(cycles[0])) - + # 获取所有聊天流中最新的3个循环 latest_cycles = analyzer.get_latest_cycles(3) print(f"\n所有聊天流中最新的 {len(latest_cycles)} 个循环:") for stream_id, filepath in latest_cycles: - print(f" 聊天流 {stream_id}: {os.path.basename(filepath)}") \ No newline at end of file + print(f" 聊天流 {stream_id}: {os.path.basename(filepath)}") diff --git a/src/plugins/heartFC_chat/cycle_viewer.py b/src/plugins/heartFC_chat/cycle_viewer.py index fbbd5626e..e99ccf1c6 100644 --- a/src/plugins/heartFC_chat/cycle_viewer.py +++ b/src/plugins/heartFC_chat/cycle_viewer.py @@ -1,119 +1,123 @@ import os -import sys import argparse -from typing import List, Dict, Any from src.plugins.heartFC_chat.cycle_analyzer import CycleAnalyzer + def print_section(title: str, width: int = 80): """打印分隔线和标题""" print("\n" + "=" * width) print(f" {title} ".center(width, "=")) print("=" * width) + def list_streams_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace): """列出所有聊天流""" print_section("所有聊天流") streams = analyzer.list_streams() - + if not streams: print("没有找到任何聊天流记录。") return - + for i, stream_id in enumerate(streams): count = analyzer.get_stream_cycle_count(stream_id) - print(f"[{i+1}] {stream_id} - {count} 个循环") + print(f"[{i + 1}] {stream_id} - {count} 个循环") + def analyze_stream_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace): """分析指定聊天流的循环信息""" stream_id = args.stream_id - + print_section(f"聊天流 {stream_id} 分析") stats = analyzer.analyze_stream_cycles(stream_id) - + if "error" in stats: print(f"错误: {stats['error']}") return - + print("基本统计:") print(f" 总循环数: {stats['总循环数']}") print(f" 总耗时: {stats['总耗时']}") print(f" 平均耗时: {stats['平均耗时']}") - + print("\n动作统计:") - for action, count in stats['动作统计'].items(): + for action, count in stats["动作统计"].items(): if count > 0: - percent = (count / stats['总循环数']) * 100 + percent = (count / stats["总循环数"]) * 100 print(f" {action}: {count} ({percent:.1f}%)") - - if stats.get('工具使用次数'): + + if stats.get("工具使用次数"): print("\n工具使用次数:") - for tool, count in stats['工具使用次数'].items(): + for tool, count in stats["工具使用次数"].items(): print(f" {tool}: {count}") + def list_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace): """列出指定聊天流的循环""" stream_id = args.stream_id limit = args.limit if args.limit > 0 else -1 - + print_section(f"聊天流 {stream_id} 的循环列表") cycles = analyzer.get_stream_cycles(stream_id) - + if not cycles: print("没有找到任何循环记录。") return - + if limit > 0: cycles = cycles[-limit:] # 取最新的limit个 print(f"显示最新的 {limit} 个循环 (共 {len(cycles)} 个):") else: print(f"共找到 {len(cycles)} 个循环:") - + for i, filepath in enumerate(cycles): filename = os.path.basename(filepath) - cycle_id = filename.split('_')[1] - timestamp = filename.split('_', 2)[2].split('.')[0] - print(f"[{i+1}] 循环ID: {cycle_id}, 时间: {timestamp}, 文件: {filename}") + cycle_id = filename.split("_")[1] + timestamp = filename.split("_", 2)[2].split(".")[0] + print(f"[{i + 1}] 循环ID: {cycle_id}, 时间: {timestamp}, 文件: {filename}") + def view_cycle_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace): """查看指定循环的详细信息""" stream_id = args.stream_id cycle_index = args.cycle_index - 1 # 转换为0-based索引 - + cycles = analyzer.get_stream_cycles(stream_id) if not cycles: print(f"错误: 聊天流 {stream_id} 没有找到任何循环记录。") return - + if cycle_index < 0 or cycle_index >= len(cycles): print(f"错误: 循环索引 {args.cycle_index} 超出范围 (1-{len(cycles)})。") return - + filepath = cycles[cycle_index] filename = os.path.basename(filepath) - + print_section(f"循环详情: {filename}") content = analyzer.get_cycle_content(filepath) print(content) + def latest_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace): """查看所有聊天流中最新的几个循环""" count = args.count if args.count > 0 else 10 - + print_section(f"最新的 {count} 个循环") latest_cycles = analyzer.get_latest_cycles(count) - + if not latest_cycles: print("没有找到任何循环记录。") return - + for i, (stream_id, filepath) in enumerate(latest_cycles): filename = os.path.basename(filepath) - cycle_id = filename.split('_')[1] - timestamp = filename.split('_', 2)[2].split('.')[0] - print(f"[{i+1}] 聊天流: {stream_id}, 循环ID: {cycle_id}, 时间: {timestamp}") - + cycle_id = filename.split("_")[1] + timestamp = filename.split("_", 2)[2].split(".")[0] + print(f"[{i + 1}] 聊天流: {stream_id}, 循环ID: {cycle_id}, 时间: {timestamp}") + # 可以选择性添加提取基本信息的功能 - with open(filepath, 'r', encoding='utf-8') as f: + with open(filepath, "r", encoding="utf-8") as f: for line in f: if line.startswith("动作:"): action = line.strip() @@ -121,35 +125,36 @@ def latest_cycles_cmd(analyzer: CycleAnalyzer, args: argparse.Namespace): break print() + def main(): parser = argparse.ArgumentParser(description="HeartFC循环信息查看工具") subparsers = parser.add_subparsers(dest="command", help="子命令") - + # 列出所有聊天流 list_streams_parser = subparsers.add_parser("list-streams", help="列出所有聊天流") - + # 分析聊天流 analyze_parser = subparsers.add_parser("analyze", help="分析指定聊天流的循环信息") analyze_parser.add_argument("stream_id", help="聊天流ID") - + # 列出聊天流的循环 list_cycles_parser = subparsers.add_parser("list-cycles", help="列出指定聊天流的循环") list_cycles_parser.add_argument("stream_id", help="聊天流ID") list_cycles_parser.add_argument("-l", "--limit", type=int, default=-1, help="显示最新的N个循环") - + # 查看指定循环 view_parser = subparsers.add_parser("view", help="查看指定循环的详细信息") view_parser.add_argument("stream_id", help="聊天流ID") view_parser.add_argument("cycle_index", type=int, help="循环索引(从1开始)") - + # 查看最新循环 latest_parser = subparsers.add_parser("latest", help="查看所有聊天流中最新的几个循环") latest_parser.add_argument("-c", "--count", type=int, default=10, help="显示的数量") - + args = parser.parse_args() - + analyzer = CycleAnalyzer() - + if args.command == "list-streams": list_streams_cmd(analyzer, args) elif args.command == "analyze": @@ -163,5 +168,6 @@ def main(): else: parser.print_help() + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/plugins/heartFC_chat/expressors/default_expressor.py b/src/plugins/heartFC_chat/expressors/default_expressor.py new file mode 100644 index 000000000..3dc39ff41 --- /dev/null +++ b/src/plugins/heartFC_chat/expressors/default_expressor.py @@ -0,0 +1,319 @@ +import time +import traceback +from typing import List, Optional, Dict, Any +from src.plugins.chat.message import MessageRecv, MessageThinking, MessageSending +from src.plugins.chat.message import Seg # Local import needed after move +from src.plugins.chat.message import UserInfo +from src.plugins.chat.chat_stream import chat_manager +from src.common.logger_manager import get_logger +from src.plugins.models.utils_model import LLMRequest +from src.config.config import global_config +from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move +from src.plugins.utils.timer_calculator import Timer # <--- Import Timer +from src.plugins.emoji_system.emoji_manager import emoji_manager +from src.plugins.heartFC_chat.heartflow_prompt_builder import prompt_builder +from src.plugins.heartFC_chat.heartFC_sender import HeartFCSender +from src.plugins.chat.utils import process_llm_response +from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager +from src.plugins.moods.moods import MoodManager +from src.heart_flow.utils_chat import get_chat_type_and_target_info +from src.plugins.chat.chat_stream import ChatStream + +logger = get_logger("expressor") + + +class DefaultExpressor: + def __init__(self, chat_id: str): + self.log_prefix = "expressor" + self.express_model = LLMRequest( + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=256, + request_type="response_heartflow", + ) + self.heart_fc_sender = HeartFCSender() + + self.chat_id = chat_id + self.chat_stream: Optional[ChatStream] = None + self.is_group_chat = True + self.chat_target_info = None + + async def initialize(self): + self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id) + + async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]: + """创建思考消息 (尝试锚定到 anchor_message)""" + if not anchor_message or not anchor_message.chat_stream: + logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。") + return None + + chat = anchor_message.chat_stream + messageinfo = anchor_message.message_info + bot_user_info = UserInfo( + user_id=global_config.BOT_QQ, + user_nickname=global_config.BOT_NICKNAME, + platform=messageinfo.platform, + ) + logger.debug(f"创建思考消息:{anchor_message}") + logger.debug(f"创建思考消息chat:{chat}") + logger.debug(f"创建思考消息bot_user_info:{bot_user_info}") + logger.debug(f"创建思考消息messageinfo:{messageinfo}") + + thinking_time_point = round(time.time(), 2) + thinking_id = "mt" + str(thinking_time_point) + thinking_message = MessageThinking( + message_id=thinking_id, + chat_stream=chat, + bot_user_info=bot_user_info, + reply=anchor_message, # 回复的是锚点消息 + thinking_start_time=thinking_time_point, + ) + logger.debug(f"创建思考消息thinking_message:{thinking_message}") + # Access MessageManager directly (using heart_fc_sender) + await self.heart_fc_sender.register_thinking(thinking_message) + return thinking_id + + async def deal_reply( + self, + cycle_timers: dict, + action_data: Dict[str, Any], + reasoning: str, + anchor_message: MessageRecv, + ) -> tuple[bool, str]: + # 创建思考消息 + thinking_id = await self._create_thinking_message(anchor_message) + if not thinking_id: + raise Exception("无法创建思考消息") + + try: + has_sent_something = False + + # 处理文本部分 + text_part = action_data.get("text", []) + if text_part: + with Timer("生成回复", cycle_timers): + # 可以保留原有的文本处理逻辑或进行适当调整 + reply = await self.express( + in_mind_reply=text_part, + anchor_message=anchor_message, + thinking_id=thinking_id, + reason=reasoning, + ) + + if reply: + with Timer("发送文本消息", cycle_timers): + await self._send_response_messages( + anchor_message=anchor_message, + thinking_id=thinking_id, + response_set=reply, + ) + has_sent_something = True + else: + logger.warning(f"{self.log_prefix} 文本回复生成失败") + + # 处理表情部分 + emoji_keyword = action_data.get("emojis", []) + if emoji_keyword: + await self._handle_emoji(anchor_message, [], emoji_keyword) + has_sent_something = True + + if not has_sent_something: + logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容") + + return has_sent_something, thinking_id + + except Exception as e: + logger.error(f"回复失败: {e}") + return False, thinking_id + + # --- 回复器 (Replier) 的定义 --- # + + async def express( + self, + in_mind_reply: str, + reason: str, + anchor_message: MessageRecv, + thinking_id: str, + ) -> Optional[List[str]]: + """ + 回复器 (Replier): 核心逻辑,负责生成回复文本。 + (已整合原 HeartFCGenerator 的功能) + """ + try: + # 1. 获取情绪影响因子并调整模型温度 + arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier() + current_temp = global_config.llm_normal["temp"] * arousal_multiplier + self.express_model.temperature = current_temp # 动态调整温度 + + # 2. 获取信息捕捉器 + info_catcher = info_catcher_manager.get_info_catcher(thinking_id) + + # --- Determine sender_name for private chat --- + sender_name_for_prompt = "某人" # Default for group or if info unavailable + if not self.is_group_chat and self.chat_target_info: + # Prioritize person_name, then nickname + sender_name_for_prompt = ( + self.chat_target_info.get("person_name") + or self.chat_target_info.get("user_nickname") + or sender_name_for_prompt + ) + # --- End determining sender_name --- + + # 3. 构建 Prompt + with Timer("构建Prompt", {}): # 内部计时器,可选保留 + prompt = await prompt_builder.build_prompt( + build_mode="focus", + chat_stream=self.chat_stream, # Pass the stream object + in_mind_reply=in_mind_reply, + reason=reason, + current_mind_info="", + structured_info="", + sender_name=sender_name_for_prompt, # Pass determined name + ) + + # 4. 调用 LLM 生成回复 + content = None + reasoning_content = None + model_name = "unknown_model" + if not prompt: + logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。") + return None + + try: + with Timer("LLM生成", {}): # 内部计时器,可选保留 + content, reasoning_content, model_name = await self.express_model.generate_response(prompt) + # logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n生成回复: {content}\n") + # 捕捉 LLM 输出信息 + info_catcher.catch_after_llm_generated( + prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name + ) + + except Exception as llm_e: + # 精简报错信息 + logger.error(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成失败: {llm_e}") + return None # LLM 调用失败则无法生成回复 + + # 5. 处理 LLM 响应 + if not content: + logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成了空内容。") + return None + + processed_response = process_llm_response(content) + + if not processed_response: + logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] 处理后的回复为空。") + return None + + return processed_response + + except Exception as e: + logger.error(f"{self.log_prefix}[Replier-{thinking_id}] 回复生成意外失败: {e}") + traceback.print_exc() + return None + + # --- 发送器 (Sender) --- # + + async def _send_response_messages( + self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str + ) -> Optional[MessageSending]: + """发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender""" + if not anchor_message or not anchor_message.chat_stream: + logger.error(f"{self.log_prefix} 无法发送回复,缺少有效的锚点消息或聊天流。") + return None + + chat = self.chat_stream + chat_id = self.chat_id + stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志 + + # 检查思考过程是否仍在进行,并获取开始时间 + thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id) + + if thinking_start_time is None: + logger.warning(f"[{stream_name}] {thinking_id} 思考过程未找到或已结束,无法发送回复。") + return None + + mark_head = False + first_bot_msg: Optional[MessageSending] = None + reply_message_ids = [] # 记录实际发送的消息ID + bot_user_info = UserInfo( + user_id=global_config.BOT_QQ, + user_nickname=global_config.BOT_NICKNAME, + platform=chat.platform, + ) + + for i, msg_text in enumerate(response_set): + # 为每个消息片段生成唯一ID + part_message_id = f"{thinking_id}_{i}" + message_segment = Seg(type="text", data=msg_text) + bot_message = MessageSending( + message_id=part_message_id, # 使用片段的唯一ID + chat_stream=chat, + bot_user_info=bot_user_info, + sender_info=anchor_message.message_info.user_info, + message_segment=message_segment, + reply=anchor_message, # 回复原始锚点 + is_head=not mark_head, + is_emoji=False, + thinking_start_time=thinking_start_time, # 传递原始思考开始时间 + ) + try: + if not mark_head: + mark_head = True + first_bot_msg = bot_message # 保存第一个成功发送的消息对象 + await self.heart_fc_sender.type_and_send_message(bot_message, typing=False) + else: + await self.heart_fc_sender.type_and_send_message(bot_message, typing=True) + + reply_message_ids.append(part_message_id) # 记录我们生成的ID + + except Exception as e: + logger.error( + f"{self.log_prefix}[Sender-{thinking_id}] 发送回复片段 {i} ({part_message_id}) 时失败: {e}" + ) + # 这里可以选择是继续发送下一个片段还是中止 + + # 在尝试发送完所有片段后,完成原始的 thinking_id 状态 + try: + await self.heart_fc_sender.complete_thinking(chat_id, thinking_id) + except Exception as e: + logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 完成思考状态 {thinking_id} 时出错: {e}") + + return first_bot_msg # 返回第一个成功发送的消息对象 + + async def _handle_emoji(self, anchor_message: Optional[MessageRecv], response_set: List[str], send_emoji: str = ""): + """处理表情包 (尝试锚定到 anchor_message),使用 HeartFCSender""" + if not anchor_message or not anchor_message.chat_stream: + logger.error(f"{self.log_prefix} 无法处理表情包,缺少有效的锚点消息或聊天流。") + return + + chat = anchor_message.chat_stream + + emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji) + + if emoji_raw: + emoji_path, description = emoji_raw + + emoji_cq = image_path_to_base64(emoji_path) + thinking_time_point = round(time.time(), 2) # 用于唯一ID + message_segment = Seg(type="emoji", data=emoji_cq) + bot_user_info = UserInfo( + user_id=global_config.BOT_QQ, + user_nickname=global_config.BOT_NICKNAME, + platform=anchor_message.message_info.platform, + ) + bot_message = MessageSending( + message_id="me" + str(thinking_time_point), # 表情消息的唯一ID + chat_stream=chat, + bot_user_info=bot_user_info, + sender_info=anchor_message.message_info.user_info, + message_segment=message_segment, + reply=anchor_message, # 回复原始锚点 + is_head=False, # 表情通常不是头部消息 + is_emoji=True, + # 不需要 thinking_start_time + ) + + try: + await self.heart_fc_sender.send_and_store(bot_message) + except Exception as e: + logger.error(f"{self.log_prefix} 发送表情包 {bot_message.message_info.message_id} 时失败: {e}") diff --git a/src/plugins/heartFC_chat/heartFC_Cycleinfo.py b/src/plugins/heartFC_chat/heartFC_Cycleinfo.py index 2abf315ec..d4dddccee 100644 --- a/src/plugins/heartFC_chat/heartFC_Cycleinfo.py +++ b/src/plugins/heartFC_chat/heartFC_Cycleinfo.py @@ -4,7 +4,7 @@ import json from typing import List, Optional, Dict, Any -class CycleInfo: +class CycleDetail: """循环信息记录类""" def __init__(self, cycle_id: int): @@ -27,21 +27,21 @@ class CycleInfo: "sub_mind_thinking": "", # 子思维思考内容 "in_mind_reply": [], # 子思维思考内容 } - + # 添加SubMind相关信息 self.submind_info: Dict[str, Any] = { "prompt": "", # SubMind输入的prompt "structured_info": "", # 结构化信息 "result": "", # SubMind的思考结果 } - + # 添加ToolUse相关信息 self.tooluse_info: Dict[str, Any] = { "prompt": "", # 工具使用的prompt "tools_used": [], # 使用了哪些工具 "tool_results": [], # 工具获得的信息 } - + # 添加Planner相关信息 self.planner_info: Dict[str, Any] = { "prompt": "", # 规划器的prompt @@ -70,9 +70,12 @@ class CycleInfo: """完成循环,记录结束时间""" self.end_time = time.time() - def set_action_info(self, action_type: str, reasoning: str, action_taken: bool): + def set_action_info( + self, action_type: str, reasoning: str, action_taken: bool, action_data: Optional[Dict[str, Any]] = None + ): """设置动作信息""" self.action_type = action_type + self.action_data = action_data self.reasoning = reasoning self.action_taken = action_taken @@ -99,7 +102,7 @@ class CycleInfo: self.response_info["reply_message_ids"] = reply_message_ids if sub_mind_thinking is not None: self.response_info["sub_mind_thinking"] = sub_mind_thinking - + def set_submind_info( self, prompt: Optional[str] = None, @@ -113,7 +116,7 @@ class CycleInfo: self.submind_info["structured_info"] = structured_info if result is not None: self.submind_info["result"] = result - + def set_tooluse_info( self, prompt: Optional[str] = None, @@ -127,7 +130,7 @@ class CycleInfo: self.tooluse_info["tools_used"] = tools_used if tool_results is not None: self.tooluse_info["tool_results"] = tool_results - + def set_planner_info( self, prompt: Optional[str] = None, @@ -141,17 +144,17 @@ class CycleInfo: self.planner_info["response"] = response if parsed_result is not None: self.planner_info["parsed_result"] = parsed_result - + @staticmethod - def save_to_file(cycle_info: 'CycleInfo', stream_id: str, base_dir: str = "log_debug") -> str: + def save_to_file(cycle_info: "CycleDetail", stream_id: str, base_dir: str = "log_debug") -> str: """ 将CycleInfo保存到文件 - + 参数: cycle_info: CycleInfo对象 stream_id: 聊天流ID base_dir: 基础目录,默认为log_debug - + 返回: str: 保存的文件路径 """ @@ -159,17 +162,17 @@ class CycleInfo: # 创建目录结构 stream_dir = os.path.join(base_dir, stream_id) os.makedirs(stream_dir, exist_ok=True) - + # 生成文件名和路径 timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime(cycle_info.start_time)) filename = f"cycle_{cycle_info.cycle_id}_{timestamp}.txt" filepath = os.path.join(stream_dir, filename) - + # 将CycleInfo转换为JSON格式 cycle_data = cycle_info.to_dict() - + # 格式化输出成易读的格式 - with open(filepath, 'w', encoding='utf-8') as f: + with open(filepath, "w", encoding="utf-8") as f: # 写入基本信息 f.write(f"循环ID: {cycle_info.cycle_id}\n") f.write(f"开始时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cycle_info.start_time))}\n") @@ -182,7 +185,7 @@ class CycleInfo: f.write(f"执行状态: {'已执行' if cycle_info.action_taken else '未执行'}\n") f.write(f"思考ID: {cycle_info.thinking_id}\n") f.write(f"是否为重新规划: {'是' if cycle_info.replanned else '否'}\n\n") - + # 写入计时器信息 if cycle_info.timers: f.write("== 计时器信息 ==\n") @@ -190,42 +193,42 @@ class CycleInfo: formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" f.write(f"{name}: {formatted_time}\n") f.write("\n") - + # 写入响应信息 f.write("== 响应信息 ==\n") f.write(f"锚点消息ID: {cycle_info.response_info['anchor_message_id']}\n") - if cycle_info.response_info['response_text']: + if cycle_info.response_info["response_text"]: f.write("回复文本:\n") - for i, text in enumerate(cycle_info.response_info['response_text']): - f.write(f" [{i+1}] {text}\n") - if cycle_info.response_info['emoji_info']: + for i, text in enumerate(cycle_info.response_info["response_text"]): + f.write(f" [{i + 1}] {text}\n") + if cycle_info.response_info["emoji_info"]: f.write(f"表情信息: {cycle_info.response_info['emoji_info']}\n") - if cycle_info.response_info['reply_message_ids']: + if cycle_info.response_info["reply_message_ids"]: f.write(f"回复消息ID: {', '.join(cycle_info.response_info['reply_message_ids'])}\n") f.write("\n") - + # 写入SubMind信息 f.write("== SubMind信息 ==\n") f.write(f"结构化信息:\n{cycle_info.submind_info['structured_info']}\n\n") f.write(f"思考结果:\n{cycle_info.submind_info['result']}\n\n") f.write("SubMind Prompt:\n") f.write(f"{cycle_info.submind_info['prompt']}\n\n") - + # 写入ToolUse信息 f.write("== 工具使用信息 ==\n") - if cycle_info.tooluse_info['tools_used']: + if cycle_info.tooluse_info["tools_used"]: f.write(f"使用的工具: {', '.join(cycle_info.tooluse_info['tools_used'])}\n") else: f.write("未使用工具\n") - - if cycle_info.tooluse_info['tool_results']: + + if cycle_info.tooluse_info["tool_results"]: f.write("工具结果:\n") - for i, result in enumerate(cycle_info.tooluse_info['tool_results']): - f.write(f" [{i+1}] 类型: {result.get('type', '未知')}, 内容: {result.get('content', '')}\n") + for i, result in enumerate(cycle_info.tooluse_info["tool_results"]): + f.write(f" [{i + 1}] 类型: {result.get('type', '未知')}, 内容: {result.get('content', '')}\n") f.write("\n") f.write("工具执行 Prompt:\n") f.write(f"{cycle_info.tooluse_info['prompt']}\n\n") - + # 写入Planner信息 f.write("== Planner信息 ==\n") f.write("Planner Prompt:\n") @@ -234,7 +237,7 @@ class CycleInfo: f.write(f"{cycle_info.planner_info['response']}\n\n") f.write("解析结果:\n") f.write(f"{json.dumps(cycle_info.planner_info['parsed_result'], ensure_ascii=False, indent=2)}\n") - + return filepath except Exception as e: print(f"保存CycleInfo到文件时出错: {e}") @@ -244,10 +247,10 @@ class CycleInfo: def load_from_file(filepath: str) -> Optional[Dict[str, Any]]: """ 从文件加载CycleInfo信息(只加载JSON格式的数据,不解析文本格式) - + 参数: filepath: 文件路径 - + 返回: Optional[Dict[str, Any]]: 加载的CycleInfo数据,失败则返回None """ @@ -255,39 +258,39 @@ class CycleInfo: if not os.path.exists(filepath): print(f"文件不存在: {filepath}") return None - + # 尝试从文件末尾读取JSON数据 - with open(filepath, 'r', encoding='utf-8') as f: + with open(filepath, "r", encoding="utf-8") as f: lines = f.readlines() - + # 查找"解析结果:"后的JSON数据 for i, line in enumerate(lines): - if "解析结果:" in line and i+1 < len(lines): + if "解析结果:" in line and i + 1 < len(lines): # 尝试解析后面的行 json_data = "" - for j in range(i+1, len(lines)): + for j in range(i + 1, len(lines)): json_data += lines[j] - + try: return json.loads(json_data) except json.JSONDecodeError: continue - + # 如果没有找到JSON数据,则返回None return None except Exception as e: print(f"从文件加载CycleInfo时出错: {e}") return None - + @staticmethod def list_cycles(stream_id: str, base_dir: str = "log_debug") -> List[str]: """ 列出指定stream_id的所有循环文件 - + 参数: stream_id: 聊天流ID base_dir: 基础目录,默认为log_debug - + 返回: List[str]: 文件路径列表 """ @@ -295,9 +298,12 @@ class CycleInfo: stream_dir = os.path.join(base_dir, stream_id) if not os.path.exists(stream_dir): return [] - - files = [os.path.join(stream_dir, f) for f in os.listdir(stream_dir) - if f.startswith("cycle_") and f.endswith(".txt")] + + files = [ + os.path.join(stream_dir, f) + for f in os.listdir(stream_dir) + if f.startswith("cycle_") and f.endswith(".txt") + ] return sorted(files) except Exception as e: print(f"列出循环文件时出错: {e}") diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index 03a68037c..2e1c7cde3 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -5,30 +5,32 @@ import random # <--- 添加导入 import json # <--- 确保导入 json from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine from collections import deque -from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending -from src.plugins.chat.message import Seg # Local import needed after move from src.plugins.chat.chat_stream import ChatStream -from src.plugins.chat.message import UserInfo from src.plugins.chat.chat_stream import chat_manager from src.common.logger_manager import get_logger from src.plugins.models.utils_model import LLMRequest from src.config.config import global_config -from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move -from src.plugins.utils.timer_calculator import Timer # <--- Import Timer -from src.plugins.emoji_system.emoji_manager import emoji_manager -from src.heart_flow.sub_mind import SubMind -from src.heart_flow.observation import Observation -from src.plugins.heartFC_chat.heartflow_prompt_builder import global_prompt_manager, prompt_builder +from src.plugins.utils.timer_calculator import Timer +from src.heart_flow.chatting_observation import Observation +from src.plugins.heartFC_chat.heartflow_prompt_builder import prompt_builder import contextlib -from src.plugins.utils.chat_message_builder import num_new_messages_since -from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo -from .heartFC_sender import HeartFCSender -from src.plugins.chat.utils import process_llm_response -from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager -from src.plugins.moods.moods import MoodManager +from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleDetail +from src.heart_flow.chatting_observation import ChattingObservation from src.heart_flow.utils_chat import get_chat_type_and_target_info from rich.traceback import install -from src.heart_flow.tool_user import ToolExecutor +from src.heart_flow.info.info_base import InfoBase +from src.heart_flow.info.obs_info import ObsInfo +from src.heart_flow.info.cycle_info import CycleInfo +from src.heart_flow.info.mind_info import MindInfo +from src.heart_flow.info.structured_info import StructuredInfo +from src.plugins.heartFC_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor +from src.plugins.heartFC_chat.info_processors.mind_processor import MindProcessor +from src.heart_flow.memory_observation import MemoryObservation +from src.heart_flow.hfcloop_observation import HFCloopObservation +from src.heart_flow.working_observation import WorkingObservation +from src.plugins.heartFC_chat.info_processors.tool_processor import ToolProcessor +from src.plugins.heartFC_chat.expressors.default_expressor import DefaultExpressor +from src.plugins.heartFC_chat.hfc_utils import _create_empty_anchor_message install(extra_lines=3) @@ -39,24 +41,11 @@ EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发 CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值 -# 添加并行模式开关常量 -# 并行模式优化说明: -# 1. 并行模式下,SubMind的思考(think)和工具执行(tools)同时进行,而规划(plan)在获取思考结果后串行执行 -# 2. 这种半并行模式中,Planner依赖SubMind的思考结果(current_mind)进行决策,但仍能与工具调用并行处理 -# 3. 优点:处理速度显著提升,同时保持规划器能利用思考内容进行决策 -# 4. 可能的缺点:整体处理时间比完全并行模式略长,但决策质量可能更好 -# 5. 对比原来的全并行模式(think+plan+tools三者同时进行),这种模式更平衡效率和质量 -PARALLEL_MODE_ENABLED = True # 设置为 True 启用半并行模式,False 使用原始串行模式 - - logger = get_logger("hfc") # Logger Name Changed # 默认动作定义 -DEFAULT_ACTIONS = { - "no_reply": "不回复", - "reply": "回复:可以包含文本、表情或两者结合,顺序任意" -} +DEFAULT_ACTIONS = {"no_reply": "不回复", "reply": "回复:可以包含文本、表情或两者结合,顺序任意"} class ActionManager: @@ -189,7 +178,6 @@ class HeartFChatting: def __init__( self, chat_id: str, - sub_mind: SubMind, observations: list[Observation], on_consecutive_no_reply_callback: Callable[[], Coroutine[None, None, None]], ): @@ -198,17 +186,22 @@ class HeartFChatting: 参数: chat_id: 聊天流唯一标识符(如stream_id) - sub_mind: 关联的子思维 observations: 关联的观察列表 on_consecutive_no_reply_callback: 连续不回复达到阈值时调用的异步回调函数 """ # 基础属性 self.stream_id: str = chat_id # 聊天流ID self.chat_stream: Optional[ChatStream] = None # 关联的聊天流 - self.sub_mind: SubMind = sub_mind # 关联的子思维 self.observations: List[Observation] = observations # 关联的观察列表,用于监控聊天流状态 self.on_consecutive_no_reply_callback = on_consecutive_no_reply_callback - self.parallel_mode: bool = PARALLEL_MODE_ENABLED # 并行模式开关 + + self.chatting_info_processor = ChattingInfoProcessor() + self.mind_processor = MindProcessor(subheartflow_id=self.stream_id) + + self.memory_observation = MemoryObservation(observe_id=self.stream_id) + self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id) + self.tool_processor = ToolProcessor(subheartflow_id=self.stream_id) + self.working_observation = WorkingObservation(observe_id=self.stream_id) # 日志前缀 self.log_prefix: str = str(chat_id) # Initial default, will be updated @@ -217,6 +210,7 @@ class HeartFChatting: self.is_group_chat: bool = False self.chat_target_info: Optional[dict] = None # --- End Initialization --- + self.expressor = DefaultExpressor(chat_id=self.stream_id) # 动作管理器 self.action_manager = ActionManager() @@ -225,16 +219,6 @@ class HeartFChatting: self._initialized = False self._processing_lock = asyncio.Lock() - # --- 移除 gpt_instance, 直接初始化 LLM 模型 --- - # self.gpt_instance = HeartFCGenerator() # <-- 移除 - self.model_normal = LLMRequest( # <-- 新增 LLM 初始化 - model=global_config.llm_normal, - temperature=global_config.llm_normal["temp"], - max_tokens=256, - request_type="response_heartflow", - ) - self.heart_fc_sender = HeartFCSender() - # LLM规划器配置 self.planner_llm = LLMRequest( model=global_config.llm_plan, @@ -248,43 +232,45 @@ class HeartFChatting: # 添加循环信息管理相关的属性 self._cycle_counter = 0 - self._cycle_history: Deque[CycleInfo] = deque(maxlen=10) # 保留最近10个循环的信息 - self._current_cycle: Optional[CycleInfo] = None + self._cycle_history: Deque[CycleDetail] = deque(maxlen=10) # 保留最近10个循环的信息 + self._current_cycle: Optional[CycleDetail] = None self._lian_xu_bu_hui_fu_ci_shu: int = 0 # <--- 新增:连续不回复计数器 self._shutting_down: bool = False # <--- 新增:关闭标志位 self._lian_xu_deng_dai_shi_jian: float = 0.0 # <--- 新增:累计等待时间 async def _initialize(self) -> bool: """ - 懒初始化,解析chat_stream, 获取聊天类型和目标信息。 + 执行懒初始化操作 + + 功能: + 1. 获取聊天类型(群聊/私聊)和目标信息 + 2. 获取聊天流对象 + 3. 设置日志前缀 + + 返回: + bool: 初始化是否成功 + + 注意: + - 如果已经初始化过会直接返回True + - 需要获取chat_stream对象才能继续后续操作 """ + # 如果已经初始化过,直接返回成功 if self._initialized: return True - # --- Use utility function to determine chat type and fetch info --- - # Note: get_chat_type_and_target_info handles getting the chat_stream internally - self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.stream_id) - - # Update log prefix based on potential stream name (if needed, or get it from chat_stream if util doesn't return it) - # Assuming get_chat_type_and_target_info focuses only on type/target - # We still need the chat_stream object itself for other operations try: + self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.stream_id) + await self.expressor.initialize() self.chat_stream = await asyncio.to_thread(chat_manager.get_stream, self.stream_id) - if not self.chat_stream: - logger.error( - f"[HFC:{self.stream_id}] 获取ChatStream失败 during _initialize, though util func might have succeeded earlier." - ) - return False # Cannot proceed without chat_stream object - # Update log prefix using the fetched stream object + self.expressor.chat_stream = self.chat_stream self.log_prefix = f"[{chat_manager.get_stream_name(self.stream_id) or self.stream_id}]" except Exception as e: - logger.error(f"[HFC:{self.stream_id}] 获取ChatStream时出错 in _initialize: {e}") + logger.error(f"[HFC:{self.stream_id}] 初始化HFC时发生错误: {e}") return False - # --- End using utility function --- - + # 标记初始化完成 self._initialized = True - logger.debug(f"{self.log_prefix} 麦麦感觉到了,可以开始认真水群 ") + logger.debug(f"{self.log_prefix} 初始化完成,准备开始处理消息") return True async def start(self): @@ -353,7 +339,7 @@ class HeartFChatting: # 创建新的循环信息 self._cycle_counter += 1 - self._current_cycle = CycleInfo(self._cycle_counter) + self._current_cycle = CycleDetail(self._cycle_counter) # 初始化周期状态 cycle_timers = {} @@ -386,10 +372,10 @@ class HeartFChatting: # 完成当前循环并保存历史 self._current_cycle.complete_cycle() self._cycle_history.append(self._current_cycle) - + # 保存CycleInfo到文件 try: - filepath = CycleInfo.save_to_file(self._current_cycle, self.stream_id) + filepath = CycleDetail.save_to_file(self._current_cycle, self.stream_id) logger.info(f"{self.log_prefix} 已保存循环信息到文件: {filepath}") except Exception as e: logger.error(f"{self.log_prefix} 保存循环信息到文件时出错: {e}") @@ -436,61 +422,53 @@ class HeartFChatting: if acquired and self._processing_lock.locked(): self._processing_lock.release() - async def _check_new_messages(self, start_time: float) -> bool: - """ - 检查从指定时间点后是否有新消息 - - 参数: - start_time: 开始检查的时间点 - - 返回: - bool: 是否有新消息 - """ - try: - new_msg_count = num_new_messages_since(self.stream_id, start_time) - if new_msg_count > 0: - logger.info(f"{self.log_prefix} 检测到{new_msg_count}条新消息") - return True - return False - except Exception as e: - logger.error(f"{self.log_prefix} 检查新消息时出错: {e}") - return False - async def _think_plan_execute_loop(self, cycle_timers: dict, planner_start_db_time: float) -> tuple[bool, str]: try: + await asyncio.sleep(1) with Timer("观察", cycle_timers): - observation = self.observations[0] - await observation.observe() - + await self.observations[0].observe() + await self.memory_observation.observe() + await self.working_observation.observe() + await self.hfcloop_observation.observe() + observations: List[Observation] = [] + observations.append(self.observations[0]) + observations.append(self.memory_observation) + observations.append(self.working_observation) + observations.append(self.hfcloop_observation) + + for observation in observations: + logger.debug(f"{self.log_prefix} 观察信息: {observation}") + # 记录并行任务开始时间 parallel_start_time = time.time() - logger.debug(f"{self.log_prefix} 开始思考和工具并行任务处理") - + logger.debug(f"{self.log_prefix} 开始信息处理器并行任务") + # 并行执行两个任务:思考和工具执行 - with Timer("思考和工具并行处理", cycle_timers): + with Timer("执行 信息处理器", cycle_timers): # 1. 子思维思考 - 不执行工具调用 - think_task = asyncio.create_task(self._get_submind_thinking_only(cycle_timers)) + think_task = asyncio.create_task(self.mind_processor.process_info(observations=observations)) logger.debug(f"{self.log_prefix} 启动子思维思考任务") - + # 2. 工具执行器 - 专门处理工具调用 - tool_task = asyncio.create_task(self._execute_tools_parallel(self.sub_mind, cycle_timers)) + tool_task = asyncio.create_task(self.tool_processor.process_info(observations=observations)) logger.debug(f"{self.log_prefix} 启动工具执行任务") - + + # 3. 聊天信息处理器 + chatting_info_task = asyncio.create_task( + self.chatting_info_processor.process_info(observations=observations) + ) + logger.debug(f"{self.log_prefix} 启动聊天信息处理器任务") + # 创建任务完成状态追踪 - tasks = { - "思考任务": think_task, - "工具任务": tool_task - } + tasks = {"思考任务": think_task, "工具任务": tool_task, "聊天信息处理任务": chatting_info_task} pending = set(tasks.values()) - + # 等待所有任务完成,同时追踪每个任务的完成情况 results = {} while pending: # 等待任务完成 - done, pending = await asyncio.wait( - pending, return_when=asyncio.FIRST_COMPLETED, timeout=1.0 - ) - + done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED, timeout=1.0) + # 记录完成的任务 for task in done: for name, t in tasks.items(): @@ -500,50 +478,47 @@ class HeartFChatting: logger.info(f"{self.log_prefix} {name}已完成,耗时: {task_duration:.2f}秒") results[name] = task.result() break - + # 如果仍有未完成任务,记录进行中状态 if pending: current_time = time.time() elapsed = current_time - parallel_start_time pending_names = [name for name, t in tasks.items() if t in pending] - logger.info(f"{self.log_prefix} 并行处理已进行{elapsed:.2f}秒,待完成任务: {', '.join(pending_names)}") + logger.info( + f"{self.log_prefix} 并行处理已进行{elapsed:.2f}秒,待完成任务: {', '.join(pending_names)}" + ) # 所有任务完成,从结果中提取数据 - current_mind = results.get("思考任务") - tool_results = results.get("工具任务") - + mind_processed_infos = [] + tool_processed_infos = [] + chatting_info_processed_infos = [] + mind_processed_infos = results.get("思考任务") + tool_processed_infos = results.get("工具任务") + chatting_info_processed_infos = results.get("聊天信息处理任务") + # 记录总耗时 parallel_end_time = time.time() total_duration = parallel_end_time - parallel_start_time logger.info(f"{self.log_prefix} 思考和工具并行任务全部完成,总耗时: {total_duration:.2f}秒") - - # 处理工具结果 - 将结果更新到SubMind - if tool_results: - self.sub_mind.structured_info.extend(tool_results) - self.sub_mind._update_structured_info_str() - logger.debug(f"{self.log_prefix} 工具结果已更新到SubMind,数量: {len(tool_results)}") - - # 记录子思维思考内容 - if self._current_cycle: - self._current_cycle.set_response_info(sub_mind_thinking=current_mind) - - # 串行执行规划器 - 使用刚获取的思考结果 - logger.debug(f"{self.log_prefix} 开始串行规划任务") - with Timer("串行规划", cycle_timers): - # 调用原始的_planner方法而不是_planner_parallel - # _planner方法会使用current_mind作为输入参数,让规划器能够利用子思维的思考结果 - # 而_planner_parallel设计为不依赖current_mind的结果,两者的主要区别在于prompt构建方式 - planner_result = await self._planner(current_mind, cycle_timers) - + all_plan_info = mind_processed_infos + tool_processed_infos + chatting_info_processed_infos + + logger.debug(f"{self.log_prefix} 所有信息处理器处理后的信息: {all_plan_info}") + # 串行执行规划器 - 使用刚获取的思考结果 + logger.debug(f"{self.log_prefix} 开始 规划器") + with Timer("规划器", cycle_timers): + planner_result = await self._planner(all_plan_info, cycle_timers) + action = planner_result.get("action", "error") action_data = planner_result.get("action_data", {}) # 新增获取动作数据 reasoning = planner_result.get("reasoning", "未提供理由") - + logger.debug(f"{self.log_prefix} 动作和动作信息: {action}, {action_data}, {reasoning}") # 更新循环信息 - self._current_cycle.set_action_info(action, reasoning, True) + self._current_cycle.set_action_info( + action_type=action, reasoning=reasoning, action_taken=True, action_data=action_data + ) # 处理LLM错误 if planner_result.get("llm_error"): @@ -560,9 +535,9 @@ class HeartFChatting: logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'") - return await self._handle_action( - action, reasoning, action_data, cycle_timers, planner_start_db_time - ) + self.hfcloop_observation.add_loop_info(self._current_cycle) + + return await self._handle_action(action, reasoning, action_data, cycle_timers, planner_start_db_time) except Exception as e: logger.error(f"{self.log_prefix} 并行+串行处理失败: {e}") @@ -607,96 +582,6 @@ class HeartFChatting: self._lian_xu_deng_dai_shi_jian = 0.0 return False, "" - async def _handle_text_reply(self, reasoning: str, emoji_query: str, cycle_timers: dict) -> tuple[bool, str]: - """ - 处理文本回复 - - 工作流程: - 1. 获取锚点消息 - 2. 创建思考消息 - 3. 生成回复 - 4. 发送消息 - - 参数: - reasoning: 回复原因 - emoji_query: 表情查询 - cycle_timers: 计时器字典 - - 返回: - tuple[bool, str]: (是否回复成功, 思考消息ID) - """ - # 重置连续不回复计数器 - self._lian_xu_bu_hui_fu_ci_shu = 0 - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 - - # 获取锚点消息 - anchor_message = await self._get_anchor_message() - if not anchor_message: - raise PlannerError("无法获取锚点消息") - - # 创建思考消息 - thinking_id = await self._create_thinking_message(anchor_message) - if not thinking_id: - raise PlannerError("无法创建思考消息") - - try: - # 生成回复 - with Timer("生成回复", cycle_timers): - reply = await self._replier_work( - anchor_message=anchor_message, - thinking_id=thinking_id, - reason=reasoning, - ) - - if not reply: - raise ReplierError("回复生成失败") - - # 发送消息 - - with Timer("发送消息", cycle_timers): - await self._sender( - thinking_id=thinking_id, - anchor_message=anchor_message, - response_set=reply, - send_emoji=emoji_query, - ) - - return True, thinking_id - - except (ReplierError, SenderError) as e: - logger.error(f"{self.log_prefix} 回复失败: {e}") - return True, thinking_id # 仍然返回thinking_id以便跟踪 - - async def _handle_emoji_reply(self, reasoning: str, emoji_query: str) -> bool: - """ - 处理表情回复 - - 工作流程: - 1. 获取锚点消息 - 2. 发送表情 - - 参数: - reasoning: 回复原因 - emoji_query: 表情查询 - - 返回: - bool: 是否发送成功 - """ - logger.info(f"{self.log_prefix} 决定回复表情({emoji_query}): {reasoning}") - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间(即使不计数也保持一致性) - - try: - anchor = await self._get_anchor_message() - if not anchor: - raise PlannerError("无法获取锚点消息") - - await self._handle_emoji(anchor, [], emoji_query) - return True - - except Exception as e: - logger.error(f"{self.log_prefix} 表情发送失败: {e}") - return False - async def _handle_no_reply(self, reasoning: str, planner_start_db_time: float, cycle_timers: dict) -> bool: """ 处理不回复的情况 @@ -808,395 +693,6 @@ class HeartFChatting: # 无论如何,重新抛出异常,让上层处理 raise - async def _log_cycle_timers(self, cycle_timers: dict, log_prefix: str): - """记录循环周期的计时器结果""" - if cycle_timers: - timer_strings = [] - for name, elapsed in cycle_timers.items(): - formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" - timer_strings.append(f"{name}: {formatted_time}") - - if timer_strings: - # 在记录前检查关闭标志 - if not self._shutting_down: - logger.debug(f"{log_prefix} 该次决策耗时: {'; '.join(timer_strings)}") - - async def _get_submind_thinking_only(self, cycle_timers: dict) -> str: - """获取子思维的纯思考结果,不执行工具调用""" - try: - start_time = time.time() - logger.debug(f"{self.log_prefix} 子思维纯思考任务开始") - - with Timer("纯思考", cycle_timers): - # 修改SubMind.do_thinking_before_reply方法的参数,添加no_tools=True - current_mind, _past_mind, submind_prompt = await self.sub_mind.do_thinking_before_reply( - history_cycle=self._cycle_history, - parallel_mode=False, # 设为False,因为规划器将依赖思考结果 - no_tools=True, # 添加参数指示不执行工具 - return_prompt=True, # 返回prompt - cycle_info=self._current_cycle, # 传递循环信息对象 - ) - - # 记录SubMind的信息到CycleInfo - if self._current_cycle: - self._current_cycle.set_submind_info( - prompt=submind_prompt, - structured_info=self.sub_mind.structured_info_str, - result=current_mind - ) - - end_time = time.time() - duration = end_time - start_time - logger.debug(f"{self.log_prefix} 子思维纯思考任务完成,耗时: {duration:.2f}秒") - return current_mind - except Exception as e: - logger.error(f"{self.log_prefix}子心流纯思考失败: {e}") - return "[思考时出错]" - - async def _execute_tools_parallel(self, sub_mind, cycle_timers: dict): - """并行执行工具调用""" - try: - start_time = time.time() - logger.debug(f"{self.log_prefix} 工具执行任务开始") - - # 如果还没有工具执行器实例,创建一个 - if not hasattr(self, 'tool_executor'): - self.tool_executor = ToolExecutor(self.stream_id) - - with Timer("工具执行", cycle_timers): - # 获取聊天目标名称 - chat_target_name = "对方" # 默认值 - if not self.is_group_chat and self.chat_target_info: - chat_target_name = ( - self.chat_target_info.get("person_name") - or self.chat_target_info.get("user_nickname") - or chat_target_name - ) - - # 执行工具并获取结果 - tool_results, tools_used, tool_prompt = await self.tool_executor.execute_tools( - sub_mind, - chat_target_name=chat_target_name, - is_group_chat=self.is_group_chat, - return_details=True, # 返回详细信息 - cycle_info=self._current_cycle, # 传递循环信息对象 - ) - - # 记录工具执行信息到CycleInfo - if self._current_cycle: - self._current_cycle.set_tooluse_info( - prompt=tool_prompt, - tools_used=tools_used, - tool_results=tool_results - ) - - end_time = time.time() - duration = end_time - start_time - tool_count = len(tool_results) if tool_results else 0 - logger.debug(f"{self.log_prefix} 工具执行任务完成,耗时: {duration:.2f}秒,工具结果数量: {tool_count}") - return tool_results - except Exception as e: - logger.error(f"{self.log_prefix}并行工具执行失败: {e}") - logger.error(traceback.format_exc()) - return [] - - async def _planner_parallel(self, cycle_timers: dict) -> Dict[str, Any]: - """ - 并行规划器 (Planner): 不依赖SubMind的思考结果,可与SubMind并行执行以节省时间。 - 返回与_planner相同格式的结果。 - """ - start_time = time.time() - logger.debug(f"{self.log_prefix} 并行规划任务开始") - - actions_to_remove_temporarily = [] - # --- 检查历史动作并决定临时移除动作 (逻辑保持不变) --- - lian_xu_wen_ben_hui_fu = 0 - probability_roll = random.random() - for cycle in reversed(self._cycle_history): - if cycle.action_taken: - if cycle.action_type == "text_reply": - lian_xu_wen_ben_hui_fu += 1 - else: - break - if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + ( - len(self._cycle_history) - 4 - ): - break - logger.debug(f"{self.log_prefix}[并行Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}") - - if lian_xu_wen_ben_hui_fu >= 3: - logger.info(f"{self.log_prefix}[并行Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply") - actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) - elif lian_xu_wen_ben_hui_fu == 2: - if probability_roll < 0.8: - logger.info(f"{self.log_prefix}[并行Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (触发)") - actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) - else: - logger.info( - f"{self.log_prefix}[并行Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)" - ) - elif lian_xu_wen_ben_hui_fu == 1: - if probability_roll < 0.4: - logger.info(f"{self.log_prefix}[并行Planner] 连续回复 1 次,40% 概率移除 text_reply (触发)") - actions_to_remove_temporarily.append("text_reply") - else: - logger.info(f"{self.log_prefix}[并行Planner] 连续回复 1 次,40% 概率移除 text_reply (未触发)") - # --- 结束检查历史动作 --- - - # 获取观察信息 - observation = self.observations[0] - # if is_re_planned: # 暂时简化,不处理重新规划 - # await observation.observe() - observed_messages = observation.talking_message - observed_messages_str = observation.talking_message_str_truncate - - # --- 使用 LLM 进行决策 (JSON 输出模式) --- # - action = "no_reply" # 默认动作 - reasoning = "规划器初始化默认" - emoji_query = "" - llm_error = False # LLM 请求或解析错误标志 - prompt = "" # 初始化prompt变量 - llm_content = "" # 初始化LLM响应内容 - - # 获取我们将传递给 prompt 构建器和用于验证的当前可用动作 - current_available_actions = self.action_manager.get_available_actions() - - try: - # --- 应用临时动作移除 --- - if actions_to_remove_temporarily: - self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily) - # 更新 current_available_actions 以反映移除后的状态 - current_available_actions = self.action_manager.get_available_actions() - logger.debug( - f"{self.log_prefix}[并行Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(current_available_actions.keys())}" - ) - - # --- 构建提示词 (与原规划器不同,不依赖 current_mind) --- - prompt = await prompt_builder.build_planner_prompt_parallel( - is_group_chat=self.is_group_chat, - chat_target_info=self.chat_target_info, - cycle_history=self._cycle_history, - observed_messages_str=observed_messages_str, - # 移除 current_mind 参数 - structured_info=self.sub_mind.structured_info_str, - current_available_actions=current_available_actions, - ) - - # --- 调用 LLM (普通文本生成) --- - llm_content = None - try: - with Timer("并行规划LLM调用", cycle_timers): - llm_content, _, _ = await self.planner_llm.generate_response(prompt=prompt) - logger.debug(f"{self.log_prefix}[并行Planner] LLM 原始 JSON 响应 (预期): {llm_content}") - except Exception as req_e: - logger.error(f"{self.log_prefix}[并行Planner] LLM 请求执行失败: {req_e}") - reasoning = f"LLM 请求失败: {req_e}" - llm_error = True - # 直接使用默认动作返回错误结果 - action = "no_reply" # 明确设置为默认值 - emoji_query = "" # 明确设置为空 - - # --- 解析 LLM 返回的 JSON (仅当 LLM 请求未出错时进行) --- - parsed_result = {} # 初始化解析结果 - if not llm_error and llm_content: - try: - # 尝试去除可能的 markdown 代码块标记 - cleaned_content = ( - llm_content.strip().removeprefix("```json").removeprefix("```").removesuffix("```").strip() - ) - if not cleaned_content: - raise json.JSONDecodeError("Cleaned content is empty", cleaned_content, 0) - parsed_json = json.loads(cleaned_content) - parsed_result = parsed_json # 保存解析结果 - - # 提取决策,提供默认值 - extracted_action = parsed_json.get("action", "no_reply") - extracted_reasoning = parsed_json.get("reasoning", "LLM未提供理由") - extracted_emoji_query = parsed_json.get("emoji_query", "") - - # 验证动作是否在当前可用列表中 - if extracted_action not in current_available_actions: - logger.warning( - f"{self.log_prefix}[并行Planner] LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" - ) - action = "no_reply" - reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}" - emoji_query = "" - # 检查 no_reply 是否也恰好被移除了 (极端情况) - if "no_reply" not in current_available_actions: - logger.error( - f"{self.log_prefix}[并行Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。" - ) - action = "error" # 回退到错误状态 - reasoning = "无法执行任何有效动作,包括 no_reply" - llm_error = True # 标记为严重错误 - else: - llm_error = False # 视为逻辑修正而非 LLM 错误 - else: - # 动作有效且可用 - action = extracted_action - reasoning = extracted_reasoning - emoji_query = extracted_emoji_query - llm_error = False # 解析成功 - logger.debug( - f"{self.log_prefix}[并行要做什么]\nPrompt:\n{prompt}\n\n决策结果 (来自JSON): {action}, 理由: {reasoning}, 表情查询: '{emoji_query}'" - ) - - except json.JSONDecodeError as json_e: - logger.warning( - f"{self.log_prefix}[并行Planner] 解析LLM响应JSON失败: {json_e}. LLM原始输出: '{llm_content}'" - ) - reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_reply'." - action = "no_reply" # 解析失败则默认不回复 - emoji_query = "" - llm_error = True # 标记解析错误 - except Exception as parse_e: - logger.error(f"{self.log_prefix}[并行Planner] 处理LLM响应时发生意外错误: {parse_e}") - reasoning = f"处理LLM响应时发生意外错误: {parse_e}. 将使用默认动作 'no_reply'." - action = "no_reply" - emoji_query = "" - llm_error = True - elif not llm_error and not llm_content: - # LLM 请求成功但返回空内容 - logger.warning(f"{self.log_prefix}[并行Planner] LLM 返回了空内容。") - reasoning = "LLM 返回了空内容,使用默认动作 'no_reply'." - action = "no_reply" - emoji_query = "" - llm_error = True # 标记为空响应错误 - - except Exception as outer_e: - logger.error(f"{self.log_prefix}[并行Planner] Planner 处理过程中发生意外错误: {outer_e}") - logger.error(traceback.format_exc()) - action = "error" # 发生未知错误,标记为 error 动作 - reasoning = f"并行Planner 内部处理错误: {outer_e}" - emoji_query = "" - llm_error = True - finally: - # --- 确保动作恢复 --- - # 检查 self._original_actions_backup 是否有值来判断是否需要恢复 - if self.action_manager._original_actions_backup is not None: - self.action_manager.restore_actions() - logger.debug( - f"{self.log_prefix}[并行Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}" - ) - - # 记录Planner信息到CycleInfo - if self._current_cycle: - result_dict = { - "action": action, - "reasoning": reasoning, - "emoji_query": emoji_query, - "llm_error": llm_error - } - self._current_cycle.set_planner_info( - prompt=prompt, - response=llm_content or "", - parsed_result=parsed_result or result_dict - ) - - # --- 概率性忽略文本回复附带的表情 (逻辑保持不变) --- - if action == "text_reply" and emoji_query: - logger.debug(f"{self.log_prefix}[并行Planner] 大模型建议文字回复带表情: '{emoji_query}'") - if random.random() > EMOJI_SEND_PRO: - logger.info( - f"{self.log_prefix}但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'" - ) - emoji_query = "" # 清空表情请求 - else: - logger.info(f"{self.log_prefix}好吧,加上表情 '{emoji_query}'") - # --- 结束概率性忽略 --- - - end_time = time.time() - duration = end_time - start_time - logger.debug(f"{self.log_prefix} 并行规划任务完成,耗时: {duration:.2f}秒,决定动作: {action}") - - # 返回结果字典 - return { - "action": action, - "reasoning": reasoning, - "emoji_query": emoji_query, - "observed_messages": observed_messages, - "llm_error": llm_error, # 返回错误状态 - } - - async def _get_anchor_message(self) -> Optional[MessageRecv]: - """ - 重构观察到的最后一条消息作为回复的锚点, - 如果重构失败或观察为空,则创建一个占位符。 - """ - - try: - placeholder_id = f"mid_pf_{int(time.time() * 1000)}" - placeholder_user = UserInfo( - user_id="system_trigger", user_nickname="System Trigger", platform=self.chat_stream.platform - ) - placeholder_msg_info = BaseMessageInfo( - message_id=placeholder_id, - platform=self.chat_stream.platform, - group_info=self.chat_stream.group_info, - user_info=placeholder_user, - time=time.time(), - ) - placeholder_msg_dict = { - "message_info": placeholder_msg_info.to_dict(), - "processed_plain_text": "[System Trigger Context]", - "raw_message": "", - "time": placeholder_msg_info.time, - } - anchor_message = MessageRecv(placeholder_msg_dict) - anchor_message.update_chat_stream(self.chat_stream) - logger.debug(f"{self.log_prefix} 创建占位符锚点消息: ID={anchor_message.message_info.message_id}") - return anchor_message - - except Exception as e: - logger.error(f"{self.log_prefix} Error getting/creating anchor message: {e}") - logger.error(traceback.format_exc()) - return None - - # --- 发送器 (Sender) --- # - async def _sender( - self, - thinking_id: str, - anchor_message: MessageRecv, - response_set: List[str], - send_emoji: str, # Emoji query decided by planner or tools - ): - """ - 发送器 (Sender): 使用 HeartFCSender 实例发送生成的回复。 - 处理相关的操作,如发送表情和更新关系。 - """ - logger.info(f"{self.log_prefix}开始发送回复 (使用 HeartFCSender)") - - first_bot_msg: Optional[MessageSending] = None - try: - # _send_response_messages 现在将使用 self.sender 内部处理注册和发送 - # 它需要负责创建 MessageThinking 和 MessageSending 对象 - # 并调用 self.sender.register_thinking 和 self.sender.type_and_send_message - first_bot_msg = await self._send_response_messages( - anchor_message=anchor_message, response_set=response_set, thinking_id=thinking_id - ) - - if first_bot_msg: - # --- 处理关联表情(如果指定) --- # - if send_emoji: - logger.info(f"{self.log_prefix}正在发送关联表情: '{send_emoji}'") - # 优先使用 first_bot_msg 作为锚点,否则回退到原始锚点 - emoji_anchor = first_bot_msg - await self._handle_emoji(emoji_anchor, response_set, send_emoji) - else: - # 如果 _send_response_messages 返回 None,表示在发送前就失败或没有消息可发送 - logger.warning( - f"{self.log_prefix}[Sender-{thinking_id}] 未能发送任何回复消息 (_send_response_messages 返回 None)。" - ) - # 这里可能不需要抛出异常,取决于 _send_response_messages 的具体实现 - - except Exception as e: - # 异常现在由 type_and_send_message 内部处理日志,这里只记录发送流程失败 - logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 发送回复过程中遇到错误: {e}") - # 思考状态应已在 type_and_send_message 的 finally 块中清理 - # 可以选择重新抛出或根据业务逻辑处理 - # raise RuntimeError(f"发送回复失败: {e}") from e - async def shutdown(self): """优雅关闭HeartFChatting实例,取消活动循环任务""" logger.info(f"{self.log_prefix} 正在关闭HeartFChatting...") @@ -1225,149 +721,6 @@ class HeartFChatting: logger.info(f"{self.log_prefix} HeartFChatting关闭完成") - async def _build_replan_prompt(self, action: str, reasoning: str) -> str: - """构建 Replanner LLM 的提示词""" - prompt = (await global_prompt_manager.get_prompt_async("replan_prompt")).format( - action=action, - reasoning=reasoning, - ) - - # 在记录循环日志前检查关闭标志 - if not self._shutting_down: - self._current_cycle.complete_cycle() - self._cycle_history.append(self._current_cycle) - - # 记录循环信息和计时器结果 - timer_strings = [] - for name, elapsed in self._current_cycle.timers.items(): - formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" - timer_strings.append(f"{name}: {formatted_time}") - - logger.debug( - f"{self.log_prefix} 第 #{self._current_cycle.cycle_id}次思考完成," - f"耗时: {self._current_cycle.end_time - self._current_cycle.start_time:.2f}秒, " - f"动作: {self._current_cycle.action_type}" - + (f"\n计时器详情: {'; '.join(timer_strings)}" if timer_strings else "") - ) - - return prompt - - async def _send_response_messages( - self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str - ) -> Optional[MessageSending]: - """发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender""" - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法发送回复,缺少有效的锚点消息或聊天流。") - return None - - chat = anchor_message.chat_stream - chat_id = chat.stream_id - stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志 - - # 检查思考过程是否仍在进行,并获取开始时间 - thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id) - - if thinking_start_time is None: - logger.warning(f"[{stream_name}] {thinking_id} 思考过程未找到或已结束,无法发送回复。") - return None - - # 记录锚点消息ID和回复文本(在发送前记录) - self._current_cycle.set_response_info( - response_text=response_set, anchor_message_id=anchor_message.message_info.message_id - ) - - mark_head = False - first_bot_msg: Optional[MessageSending] = None - reply_message_ids = [] # 记录实际发送的消息ID - bot_user_info = UserInfo( - user_id=global_config.BOT_QQ, - user_nickname=global_config.BOT_NICKNAME, - platform=anchor_message.message_info.platform, - ) - - for i, msg_text in enumerate(response_set): - # 为每个消息片段生成唯一ID - part_message_id = f"{thinking_id}_{i}" - message_segment = Seg(type="text", data=msg_text) - bot_message = MessageSending( - message_id=part_message_id, # 使用片段的唯一ID - chat_stream=chat, - bot_user_info=bot_user_info, - sender_info=anchor_message.message_info.user_info, - message_segment=message_segment, - reply=anchor_message, # 回复原始锚点 - is_head=not mark_head, - is_emoji=False, - thinking_start_time=thinking_start_time, # 传递原始思考开始时间 - ) - try: - if not mark_head: - mark_head = True - first_bot_msg = bot_message # 保存第一个成功发送的消息对象 - await self.heart_fc_sender.type_and_send_message(bot_message, typing=False) - else: - await self.heart_fc_sender.type_and_send_message(bot_message, typing=True) - - reply_message_ids.append(part_message_id) # 记录我们生成的ID - - except Exception as e: - logger.error( - f"{self.log_prefix}[Sender-{thinking_id}] 发送回复片段 {i} ({part_message_id}) 时失败: {e}" - ) - # 这里可以选择是继续发送下一个片段还是中止 - - # 在尝试发送完所有片段后,完成原始的 thinking_id 状态 - try: - await self.heart_fc_sender.complete_thinking(chat_id, thinking_id) - except Exception as e: - logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 完成思考状态 {thinking_id} 时出错: {e}") - - self._current_cycle.set_response_info( - response_text=response_set, # 保留原始文本 - anchor_message_id=anchor_message.message_info.message_id, # 保留锚点ID - reply_message_ids=reply_message_ids, # 添加实际发送的ID列表 - ) - - return first_bot_msg # 返回第一个成功发送的消息对象 - - async def _handle_emoji(self, anchor_message: Optional[MessageRecv], response_set: List[str], send_emoji: str = ""): - """处理表情包 (尝试锚定到 anchor_message),使用 HeartFCSender""" - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法处理表情包,缺少有效的锚点消息或聊天流。") - return - - chat = anchor_message.chat_stream - - emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji) - - if emoji_raw: - emoji_path, description = emoji_raw - - emoji_cq = image_path_to_base64(emoji_path) - thinking_time_point = round(time.time(), 2) # 用于唯一ID - message_segment = Seg(type="emoji", data=emoji_cq) - bot_user_info = UserInfo( - user_id=global_config.BOT_QQ, - user_nickname=global_config.BOT_NICKNAME, - platform=anchor_message.message_info.platform, - ) - bot_message = MessageSending( - message_id="me" + str(thinking_time_point), # 表情消息的唯一ID - chat_stream=chat, - bot_user_info=bot_user_info, - sender_info=anchor_message.message_info.user_info, - message_segment=message_segment, - reply=anchor_message, # 回复原始锚点 - is_head=False, # 表情通常不是头部消息 - is_emoji=True, - # 不需要 thinking_start_time - ) - - try: - await self.heart_fc_sender.send_and_store(bot_message) - except Exception as e: - logger.error(f"{self.log_prefix} 发送表情包 {bot_message.message_info.message_id} 时失败: {e}") - def get_cycle_history(self, last_n: Optional[int] = None) -> List[Dict[str, Any]]: """获取循环历史记录 @@ -1388,124 +741,7 @@ class HeartFChatting: return self._cycle_history[-1].to_dict() return None - # --- 回复器 (Replier) 的定义 --- # - async def _replier_work( - self, - in_mind_reply: List[str], - reason: str, - anchor_message: MessageRecv, - thinking_id: str, - ) -> Optional[List[str]]: - """ - 回复器 (Replier): 核心逻辑,负责生成回复文本。 - (已整合原 HeartFCGenerator 的功能) - """ - try: - # 1. 获取情绪影响因子并调整模型温度 - arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier() - current_temp = global_config.llm_normal["temp"] * arousal_multiplier - self.model_normal.temperature = current_temp # 动态调整温度 - - # 2. 获取信息捕捉器 - info_catcher = info_catcher_manager.get_info_catcher(thinking_id) - - # --- Determine sender_name for private chat --- - sender_name_for_prompt = "某人" # Default for group or if info unavailable - if not self.is_group_chat and self.chat_target_info: - # Prioritize person_name, then nickname - sender_name_for_prompt = ( - self.chat_target_info.get("person_name") - or self.chat_target_info.get("user_nickname") - or sender_name_for_prompt - ) - # --- End determining sender_name --- - - # 3. 构建 Prompt - with Timer("构建Prompt", {}): # 内部计时器,可选保留 - prompt = await prompt_builder.build_prompt( - build_mode="focus", - chat_stream=self.chat_stream, # Pass the stream object - in_mind_reply=in_mind_reply, - # Focus specific args: - reason=reason, - current_mind_info=self.sub_mind.current_mind, - structured_info=self.sub_mind.structured_info_str, - sender_name=sender_name_for_prompt, # Pass determined name - # Normal specific args (not used in focus mode): - # message_txt="", - ) - - # 4. 调用 LLM 生成回复 - content = None - reasoning_content = None - model_name = "unknown_model" - if not prompt: - logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。") - return None - - try: - with Timer("LLM生成", {}): # 内部计时器,可选保留 - content, reasoning_content, model_name = await self.model_normal.generate_response(prompt) - # logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n生成回复: {content}\n") - # 捕捉 LLM 输出信息 - info_catcher.catch_after_llm_generated( - prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name - ) - - except Exception as llm_e: - # 精简报错信息 - logger.error(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成失败: {llm_e}") - return None # LLM 调用失败则无法生成回复 - - # 5. 处理 LLM 响应 - if not content: - logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成了空内容。") - return None - - with Timer("处理响应", {}): # 内部计时器,可选保留 - processed_response = process_llm_response(content) - - if not processed_response: - logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] 处理后的回复为空。") - return None - - return processed_response - - except Exception as e: - # 更通用的错误处理,精简信息 - logger.error(f"{self.log_prefix}[Replier-{thinking_id}] 回复生成意外失败: {e}") - # logger.error(traceback.format_exc()) # 可以取消注释这行以在调试时查看完整堆栈 - return None - - # --- Methods moved from HeartFCController start --- - async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]: - """创建思考消息 (尝试锚定到 anchor_message)""" - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。") - return None - - chat = anchor_message.chat_stream - messageinfo = anchor_message.message_info - bot_user_info = UserInfo( - user_id=global_config.BOT_QQ, - user_nickname=global_config.BOT_NICKNAME, - platform=messageinfo.platform, - ) - - thinking_time_point = round(time.time(), 2) - thinking_id = "mt" + str(thinking_time_point) - thinking_message = MessageThinking( - message_id=thinking_id, - chat_stream=chat, - bot_user_info=bot_user_info, - reply=anchor_message, # 回复的是锚点消息 - thinking_start_time=thinking_time_point, - ) - # Access MessageManager directly (using heart_fc_sender) - await self.heart_fc_sender.register_thinking(thinking_message) - return thinking_id - - async def _planner(self, current_mind: str, cycle_timers: dict, is_re_planned: bool = False) -> Dict[str, Any]: + async def _planner(self, all_plan_info: List[InfoBase], cycle_timers: dict) -> Dict[str, Any]: """ 规划器 (Planner): 使用LLM根据上下文决定是否和如何回复。 重构为:让LLM返回结构化JSON文本,然后在代码中解析。 @@ -1515,7 +751,7 @@ class HeartFChatting: cycle_timers: 计时器字典 is_re_planned: 是否为重新规划 (此重构中暂时简化,不处理 is_re_planned 的特殊逻辑) """ - logger.info(f"{self.log_prefix}开始想要做什么") + logger.info(f"{self.log_prefix}开始 规划") actions_to_remove_temporarily = [] # --- 检查历史动作并决定临时移除动作 (逻辑保持不变) --- @@ -1553,16 +789,29 @@ class HeartFChatting: # --- 结束检查历史动作 --- # 获取观察信息 - observation = self.observations[0] - # if is_re_planned: # 暂时简化,不处理重新规划 - # await observation.observe() - observed_messages = observation.talking_message - observed_messages_str = observation.talking_message_str_truncate + for info in all_plan_info: + if isinstance(info, ObsInfo): + logger.debug(f"{self.log_prefix} 观察信息: {info}") + observed_messages = info.get_talking_message() + observed_messages_str = info.get_talking_message_str_truncate() + chat_type = info.get_chat_type() + if chat_type == "group": + is_group_chat = True + else: + is_group_chat = False + elif isinstance(info, MindInfo): + logger.debug(f"{self.log_prefix} 思维信息: {info}") + current_mind = info.get_current_mind() + elif isinstance(info, CycleInfo): + logger.debug(f"{self.log_prefix} 循环信息: {info}") + cycle_info = info.get_observe_info() + elif isinstance(info, StructuredInfo): + logger.debug(f"{self.log_prefix} 结构化信息: {info}") + structured_info = info.get_data() # --- 使用 LLM 进行决策 (JSON 输出模式) --- # action = "no_reply" # 默认动作 reasoning = "规划器初始化默认" - emoji_query = "" llm_error = False # LLM 请求或解析错误标志 # 获取我们将传递给 prompt 构建器和用于验证的当前可用动作 @@ -1580,22 +829,18 @@ class HeartFChatting: # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- prompt = await prompt_builder.build_planner_prompt( - is_group_chat=self.is_group_chat, # <-- Pass HFC state - chat_target_info=self.chat_target_info, # <-- Pass HFC state - cycle_history=self._cycle_history, # <-- Pass HFC state + is_group_chat=is_group_chat, # <-- Pass HFC state + chat_target_info=None, observed_messages_str=observed_messages_str, # <-- Pass local variable current_mind=current_mind, # <-- Pass argument - structured_info=self.sub_mind.structured_info_str, # <-- Pass SubMind info + structured_info=structured_info, # <-- Pass SubMind info current_available_actions=current_available_actions, # <-- Pass determined actions + cycle_info=cycle_info, # <-- Pass cycle info ) # --- 调用 LLM (普通文本生成) --- llm_content = None try: - # 假设 LLMRequest 有 generate_response 方法返回 (content, reasoning, model_name) - # 我们只需要 content - # !! 注意:这里假设 self.planner_llm 有 generate_response 方法 - # !! 如果你的 LLMRequest 类使用的是其他方法名,请相应修改 llm_content, _, _ = await self.planner_llm.generate_response(prompt=prompt) logger.debug(f"{self.log_prefix}[Planner] LLM 原始 JSON 响应 (预期): {llm_content}") except Exception as req_e: @@ -1604,9 +849,6 @@ class HeartFChatting: llm_error = True # 直接使用默认动作返回错误结果 action = "no_reply" # 明确设置为默认值 - emoji_query = "" # 明确设置为空 - # 不再立即返回,而是继续执行 finally 块以恢复动作 - # return { ... } # --- 解析 LLM 返回的 JSON (仅当 LLM 请求未出错时进行) --- if not llm_error and llm_content: @@ -1628,11 +870,12 @@ class HeartFChatting: if extracted_action == "reply": action_data = { "text": parsed_json.get("text", []), - "emojis": parsed_json.get("emojis", []) + "emojis": parsed_json.get("emojis", []), + "target": parsed_json.get("target", ""), } else: action_data = {} # 其他动作可能不需要额外数据 - + # 验证动作是否在当前可用列表中 # !! 使用调用 prompt 时实际可用的动作列表进行验证 if extracted_action not in current_available_actions: @@ -1641,7 +884,6 @@ class HeartFChatting: ) action = "no_reply" reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}" - emoji_query = "" # 检查 no_reply 是否也恰好被移除了 (极端情况) if "no_reply" not in current_available_actions: logger.error( @@ -1662,56 +904,40 @@ class HeartFChatting: ) logger.debug(f"{self.log_prefix}动作信息: '{action_data}'") - except json.JSONDecodeError as json_e: + except Exception as json_e: logger.warning( f"{self.log_prefix}[Planner] 解析LLM响应JSON失败: {json_e}. LLM原始输出: '{llm_content}'" ) reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_reply'." action = "no_reply" # 解析失败则默认不回复 - emoji_query = "" llm_error = True # 标记解析错误 - except Exception as parse_e: - logger.error(f"{self.log_prefix}[Planner] 处理LLM响应时发生意外错误: {parse_e}") - reasoning = f"处理LLM响应时发生意外错误: {parse_e}. 将使用默认动作 'no_reply'." - action = "no_reply" - emoji_query = "" - llm_error = True elif not llm_error and not llm_content: # LLM 请求成功但返回空内容 logger.warning(f"{self.log_prefix}[Planner] LLM 返回了空内容。") reasoning = "LLM 返回了空内容,使用默认动作 'no_reply'." action = "no_reply" - emoji_query = "" llm_error = True # 标记为空响应错误 - # 如果 llm_error 在此阶段为 True,意味着请求成功但解析失败或返回空 - # 如果 llm_error 在请求阶段就为 True,则跳过了此解析块 - except Exception as outer_e: logger.error(f"{self.log_prefix}[Planner] Planner 处理过程中发生意外错误: {outer_e}") - logger.error(traceback.format_exc()) + traceback.print_exc() action = "error" # 发生未知错误,标记为 error 动作 reasoning = f"Planner 内部处理错误: {outer_e}" - emoji_query = "" llm_error = True finally: # --- 确保动作恢复 --- - # 检查 self._original_actions_backup 是否有值来判断是否需要恢复 if self.action_manager._original_actions_backup is not None: self.action_manager.restore_actions() logger.debug( f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}" ) - # --- 结束确保动作恢复 --- # --- 概率性忽略文本回复附带的表情 (逻辑保持不变) --- emoji = action_data.get("emojis") if action == "reply" and emoji: logger.debug(f"{self.log_prefix}[Planner] 大模型建议文字回复带表情: '{emoji}'") if random.random() > EMOJI_SEND_PRO: - logger.info( - f"{self.log_prefix}但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji}'" - ) + logger.info(f"{self.log_prefix}但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji}'") action_data["emojis"] = "" # 清空表情请求 else: logger.info(f"{self.log_prefix}好吧,加上表情 '{emoji}'") @@ -1730,7 +956,7 @@ class HeartFChatting: async def _handle_reply(self, reasoning: str, reply_data: dict, cycle_timers: dict) -> tuple[bool, str]: """ 处理统一的回复动作 - 可包含文本和表情,顺序任意 - + reply_data格式: { "text": ["你好啊", "今天天气真不错"], # 文本内容列表(可选) @@ -1740,56 +966,23 @@ class HeartFChatting: # 重置连续不回复计数器 self._lian_xu_bu_hui_fu_ci_shu = 0 self._lian_xu_deng_dai_shi_jian = 0.0 - - # 获取锚点消息 - anchor_message = await self._get_anchor_message() + + # 获取锚定消息 + observations: ChattingObservation = self.observations[0] + anchor_message = observations.serch_message_by_text(reply_data["target"]) + + # 如果没有找到锚点消息,创建一个占位符 if not anchor_message: - raise PlannerError("无法获取锚点消息") - - # 创建思考消息 - thinking_id = await self._create_thinking_message(anchor_message) - if not thinking_id: - raise PlannerError("无法创建思考消息") - - try: - has_sent_something = False - - # 处理文本部分 - text_parts = reply_data.get("text", []) - if text_parts: - with Timer("生成回复", cycle_timers): - # 可以保留原有的文本处理逻辑或进行适当调整 - reply = await self._replier_work( - in_mind_reply = text_parts, - anchor_message=anchor_message, - thinking_id=thinking_id, - reason=reasoning, - ) - - if reply: - with Timer("发送文本消息", cycle_timers): - await self._sender( - thinking_id=thinking_id, - anchor_message=anchor_message, - response_set=reply, - send_emoji="" # 不在这里处理表情 - ) - has_sent_something = True - else: - logger.warning(f"{self.log_prefix} 文本回复生成失败") - - # 处理表情部分 - emoji_keywords = reply_data.get("emojis", []) - for emoji in emoji_keywords: - if emoji: - await self._handle_emoji(anchor_message, [], emoji) - has_sent_something = True - - if not has_sent_something: - logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容") - - return has_sent_something, thinking_id - - except (ReplierError, SenderError) as e: - logger.error(f"{self.log_prefix} 回复失败: {e}") - return False, thinking_id + logger.info(f"{self.log_prefix} 未找到锚点消息,创建占位符") + anchor_message = await _create_empty_anchor_message( + self.chat_stream.platform, self.chat_stream.group_info, self.chat_stream + ) + if not anchor_message: + logger.error(f"{self.log_prefix} 创建占位符失败,无法继续处理回复") + return False, "" + else: + anchor_message.update_chat_stream(self.chat_stream) + + return await self.expressor.deal_reply( + cycle_timers=cycle_timers, action_data=reply_data, anchor_message=anchor_message, reasoning=reasoning + ) diff --git a/src/plugins/heartFC_chat/heartFC_sender.py b/src/plugins/heartFC_chat/heartFC_sender.py index b193ae44a..ca8fd1c03 100644 --- a/src/plugins/heartFC_chat/heartFC_sender.py +++ b/src/plugins/heartFC_chat/heartFC_sender.py @@ -99,9 +99,13 @@ class HeartFCSender: _ = message.update_thinking_time() # --- 条件应用 set_reply 逻辑 --- - if message.apply_set_reply_logic and message.is_head and not message.is_private_message(): + if ( + message.is_head + and not message.is_private_message() + and message.reply.processed_plain_text != "[System Trigger Context]" + ): logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...") - message.set_reply() + message.set_reply(message.reply) # --- 结束条件 set_reply --- await message.process() diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index b788fe4d2..e2f5de7ad 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -15,29 +15,51 @@ from ..memory_system.Hippocampus import HippocampusManager from ..schedule.schedule_generator import bot_schedule from ..knowledge.knowledge_lib import qa_manager import traceback -from .heartFC_Cycleinfo import CycleInfo +from .heartFC_Cycleinfo import CycleDetail logger = get_logger("prompt") def init_prompt(): + # Prompt( + # """ + # {info_from_tools} + # {chat_target} + # {chat_talking_prompt} + # 现在你想要在群里发言或者回复。\n + # 你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"。 + # 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。 + # 看到以上聊天记录,你刚刚在想: + + # {current_mind_info} + # 因为上述想法,你决定发言,原因是:{reason} + # 依照这些内容组织回复:{in_mind_reply},不要原句回复,根据下面的要求,对其进行修改 + # 要求:是尽量简短一些。把握聊天内容,{reply_style2}。不要复读自己说的话。{prompt_ger} + # {reply_style1},说中文,不要刻意突出自身学科背景。 + # {moderation_prompt}。不要浮夸,平淡一些。 + # 注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", + # "heart_flow_prompt", + # ) Prompt( """ -{info_from_tools} + 你可以参考以下的语言习惯: + 当表示惊叹时,使用 我嘞个xxxx + 当表示惊讶无语是,使用 不是? + 当表示无语时,使用 阿这 + 当表示震惊时,使用 卧槽 + {chat_target} -{chat_talking_prompt} 现在你想要在群里发言或者回复。\n 你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"。 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。 -看到以上聊天记录,你刚刚在想: -{current_mind_info} -因为上述想法,你决定发言,原因是:{reason} -依照这些内容组织回复:{in_mind_reply},不要原句回复,根据下面的要求,对其进行修改 -要求:是尽量简短一些。把握聊天内容,{reply_style2}。不要复读自己说的话。{prompt_ger} -{reply_style1},说中文,不要刻意突出自身学科背景。 -{moderation_prompt}。不要浮夸,平淡一些。 +你想表达:{in_mind_reply} +原因是:{reason} +请根据你想表达的内容,参考上述语言习惯,和下面的要求,给出回复 +回复要求: +尽量简短一些。{reply_style2}。{prompt_ger} +{reply_style1},说中文,不要刻意突出自身学科背景。不要浮夸,平淡一些。 注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", "heart_flow_prompt", ) @@ -71,14 +93,19 @@ def init_prompt(): 2. 回复(reply)适用: - 有实质性内容需要表达 - 有人提到你,但你还没有回应他 - - 可以追加emoji_query表达情绪(emoji_query填写表情包的适用场合,也就是当前场合) - - 不要追加太多表情 + - 在合适的时候添加表情(不要总是添加) + - 如果你要回复特定某人的某句话,或者你想回复较早的消息,请在target中指定那句话的原始文本 -3. 回复要求: +3. 回复target选择: + -如果选择了target,不用特别提到某个人的人名 + - 除非有明确的回复目标,否则不要添加target + +4. 回复要求: -不要太浮夸 -一次只回复一个人 + -一次只回复一个话题 -4. 自我对话处理: +5. 自我对话处理: - 如果是自己发的消息想继续,需自然衔接 - 避免重复或评价自己的发言 - 不要和自己聊天 @@ -95,8 +122,9 @@ def init_prompt(): 如果选择reply,请按以下JSON格式返回: {{ "action": "reply", - "text": ["第一段文本", "第二段文本"], // 可选,如果想发送文本 - "emojis": ["表情关键词1", "表情关键词2"] // 可选,如果想发送表情 + "text": "你想表达的内容", + "emojis": "表情关键词", + "target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)", "reasoning": "你的决策理由", }} @@ -196,7 +224,9 @@ def init_prompt(): ) -async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_stream, sender_name, in_mind_reply) -> str: +async def _build_prompt_focus( + reason, current_mind_info, structured_info, chat_stream, sender_name, in_mind_reply +) -> str: individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(x_person=0, level=2) @@ -265,19 +295,20 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s prompt = await global_prompt_manager.format_prompt( template_name, - info_from_tools=structured_info_prompt, + # info_from_tools=structured_info_prompt, chat_target=chat_target_1, # Used in group template - chat_talking_prompt=chat_talking_prompt, + # chat_talking_prompt=chat_talking_prompt, bot_name=global_config.BOT_NICKNAME, - prompt_personality=prompt_personality, + # prompt_personality=prompt_personality, + prompt_personality="", chat_target_2=chat_target_2, # Used in group template - current_mind_info=current_mind_info, + # current_mind_info=current_mind_info, reply_style2=reply_style2_chosen, reply_style1=reply_style1_chosen, reason=reason, in_mind_reply=in_mind_reply, prompt_ger=prompt_ger, - moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), + # moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), # sender_name is not used in the group template ) else: # Private chat @@ -766,11 +797,11 @@ class PromptBuilder: self, is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument - cycle_history: Deque["CycleInfo"], # Now passed as argument (Type hint needs import or string) observed_messages_str: str, current_mind: Optional[str], structured_info: Dict[str, Any], current_available_actions: Dict[str, str], + cycle_info: Optional[str], # replan_prompt: str, # Replan logic still simplified ) -> str: """构建 Planner LLM 的提示词 (获取模板并填充数据)""" @@ -809,35 +840,6 @@ class PromptBuilder: else: current_mind_block = "你的内心想法:\n[没有特别的想法]" - # Cycle info block (using passed cycle_history) - cycle_info_block = "" - recent_active_cycles = [] - for cycle in reversed(cycle_history): - if cycle.action_taken: - recent_active_cycles.append(cycle) - if len(recent_active_cycles) == 3: - break - consecutive_text_replies = 0 - responses_for_prompt = [] - for cycle in recent_active_cycles: - if cycle.action_type == "text_reply": - consecutive_text_replies += 1 - response_text = cycle.response_info.get("response_text", []) - formatted_response = "[空回复]" if not response_text else " ".join(response_text) - responses_for_prompt.append(formatted_response) - else: - break - if consecutive_text_replies >= 3: - cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' - elif consecutive_text_replies == 2: - cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' - elif consecutive_text_replies == 1: - cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' - if cycle_info_block: - cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" - else: - cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n" - individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(x_person=2, level=2) @@ -857,7 +859,7 @@ class PromptBuilder: structured_info_block=structured_info_block, chat_content_block=chat_content_block, current_mind_block=current_mind_block, - cycle_info_block=cycle_info_block, + cycle_info_block=cycle_info, action_options_text=action_options_text, # example_action=example_action_key, ) @@ -870,9 +872,9 @@ class PromptBuilder: async def build_planner_prompt_parallel( self, - is_group_chat: bool, + is_group_chat: bool, chat_target_info: Optional[dict], - cycle_history: Deque["CycleInfo"], + cycle_history: Deque["CycleDetail"], observed_messages_str: str, structured_info: str, current_available_actions: Dict[str, str], @@ -931,10 +933,10 @@ class PromptBuilder: recent_active_cycles.append(cycle) if len(recent_active_cycles) == 3: break - + consecutive_text_replies = 0 responses_for_prompt = [] - + for cycle in recent_active_cycles: if cycle.action_type == "text_reply": consecutive_text_replies += 1 @@ -943,14 +945,14 @@ class PromptBuilder: responses_for_prompt.append(formatted_response) else: break - + if consecutive_text_replies >= 3: cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' elif consecutive_text_replies == 2: cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' elif consecutive_text_replies == 1: cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' - + if cycle_info_block: cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" else: diff --git a/src/plugins/heartFC_chat/hfc_utils.py b/src/plugins/heartFC_chat/hfc_utils.py new file mode 100644 index 000000000..0e0ae57f7 --- /dev/null +++ b/src/plugins/heartFC_chat/hfc_utils.py @@ -0,0 +1,44 @@ +import time +import traceback +from typing import Optional +from src.plugins.chat.message import MessageRecv, BaseMessageInfo +from src.plugins.chat.chat_stream import ChatStream +from src.plugins.chat.message import UserInfo +from src.common.logger_manager import get_logger + +logger = get_logger(__name__) + + +async def _create_empty_anchor_message( + platform: str, group_info: dict, chat_stream: ChatStream +) -> Optional[MessageRecv]: + """ + 重构观察到的最后一条消息作为回复的锚点, + 如果重构失败或观察为空,则创建一个占位符。 + """ + + try: + placeholder_id = f"mid_pf_{int(time.time() * 1000)}" + placeholder_user = UserInfo(user_id="system_trigger", user_nickname="System Trigger", platform=platform) + placeholder_msg_info = BaseMessageInfo( + message_id=placeholder_id, + platform=platform, + group_info=group_info, + user_info=placeholder_user, + time=time.time(), + ) + placeholder_msg_dict = { + "message_info": placeholder_msg_info.to_dict(), + "processed_plain_text": "[System Trigger Context]", + "raw_message": "", + "time": placeholder_msg_info.time, + } + anchor_message = MessageRecv(placeholder_msg_dict) + anchor_message.update_chat_stream(chat_stream) + logger.debug(f"创建占位符锚点消息: ID={anchor_message.message_info.message_id}") + return anchor_message + + except Exception as e: + logger.error(f"Error getting/creating anchor message: {e}") + logger.error(traceback.format_exc()) + return None diff --git a/src/plugins/heartFC_chat/info_processors/base_processor.py b/src/plugins/heartFC_chat/info_processors/base_processor.py new file mode 100644 index 000000000..b1780af27 --- /dev/null +++ b/src/plugins/heartFC_chat/info_processors/base_processor.py @@ -0,0 +1,48 @@ +from abc import ABC, abstractmethod +from typing import List, Any, Optional +from src.heart_flow.info.info_base import InfoBase +from src.heart_flow.chatting_observation import Observation +from src.common.logger_manager import get_logger + +logger = get_logger("base_processor") + + +class BaseProcessor(ABC): + """信息处理器基类 + + 所有具体的信息处理器都应该继承这个基类,并实现process_info方法。 + 支持处理InfoBase和Observation类型的输入。 + """ + + @abstractmethod + def __init__(self): + """初始化处理器""" + pass + + @abstractmethod + async def process_info( + self, infos: List[InfoBase], observations: Optional[List[Observation]] = None, **kwargs: Any + ) -> List[InfoBase]: + """处理信息对象的抽象方法 + + Args: + infos: InfoBase对象列表 + observations: 可选的Observation对象列表 + **kwargs: 其他可选参数 + + Returns: + List[InfoBase]: 处理后的InfoBase实例列表 + """ + pass + + def _create_processed_item(self, info_type: str, info_data: Any) -> dict: + """创建处理后的信息项 + + Args: + info_type: 信息类型 + info_data: 信息数据 + + Returns: + dict: 处理后的信息项 + """ + return {"type": info_type, "id": f"info_{info_type}", "content": info_data, "ttl": 3} diff --git a/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py b/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py new file mode 100644 index 000000000..8a3f3b832 --- /dev/null +++ b/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py @@ -0,0 +1,70 @@ +from typing import List, Optional, Any +from src.heart_flow.info.obs_info import ObsInfo +from src.heart_flow.chatting_observation import Observation +from src.heart_flow.info.info_base import InfoBase +from .base_processor import BaseProcessor +from src.common.logger_manager import get_logger +from src.heart_flow.chatting_observation import ChattingObservation +from src.heart_flow.hfcloop_observation import HFCloopObservation +from src.heart_flow.info.cycle_info import CycleInfo + +logger = get_logger("observation") + + +class ChattingInfoProcessor(BaseProcessor): + """观察处理器 + + 用于处理Observation对象,将其转换为ObsInfo对象。 + """ + + def __init__(self): + """初始化观察处理器""" + super().__init__() + + async def process_info(self, observations: Optional[List[Observation]] = None, **kwargs: Any) -> List[InfoBase]: + """处理Observation对象 + + Args: + infos: InfoBase对象列表 + observations: 可选的Observation对象列表 + **kwargs: 其他可选参数 + + Returns: + List[InfoBase]: 处理后的ObsInfo实例列表 + """ + print(f"observations: {observations}") + processed_infos = [] + + # 处理Observation对象 + if observations: + for obs in observations: + print(f"obs: {obs}") + if isinstance(obs, ChattingObservation): + obs_info = ObsInfo() + + # 设置说话消息 + if hasattr(obs, "talking_message_str"): + obs_info.set_talking_message(obs.talking_message_str) + + # 设置截断后的说话消息 + if hasattr(obs, "talking_message_str_truncate"): + obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate) + + # 设置聊天类型 + is_group_chat = obs.is_group_chat + if is_group_chat: + chat_type = "group" + else: + chat_type = "private" + obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人")) + obs_info.set_chat_type(chat_type) + + logger.debug(f"聊天信息处理器处理后的信息: {obs_info}") + + processed_infos.append(obs_info) + if isinstance(obs, HFCloopObservation): + obs_info = CycleInfo() + obs_info.set_observe_info(obs.observe_info) + processed_infos.append(obs_info) + + return processed_infos diff --git a/src/heart_flow/sub_mind.py b/src/plugins/heartFC_chat/info_processors/mind_processor.py similarity index 57% rename from src/heart_flow/sub_mind.py rename to src/plugins/heartFC_chat/info_processors/mind_processor.py index 9379b8b0c..227fcd9be 100644 --- a/src/heart_flow/sub_mind.py +++ b/src/plugins/heartFC_chat/info_processors/mind_processor.py @@ -1,4 +1,4 @@ -from .observation import ChattingObservation +from src.heart_flow.chatting_observation import ChattingObservation, Observation from src.plugins.models.utils_model import LLMRequest from src.config.config import global_config import time @@ -6,17 +6,21 @@ import traceback from src.common.logger_manager import get_logger from src.individuality.individuality import Individuality import random -from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager -from src.do_tool.tool_use import ToolUser -from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls -from src.heart_flow.chat_state_info import ChatStateInfo +from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager +from src.plugins.utils.json_utils import safe_json_dumps from src.plugins.chat.chat_stream import chat_manager -from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo import difflib from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.memory_system.Hippocampus import HippocampusManager -import jieba - +from .base_processor import BaseProcessor +from src.heart_flow.info.mind_info import MindInfo +from typing import List, Optional +from src.heart_flow.memory_observation import MemoryObservation +from src.heart_flow.hfcloop_observation import HFCloopObservation +from src.plugins.heartFC_chat.info_processors.processor_utils import ( + calculate_similarity, + calculate_replacement_probability, + get_spark, +) logger = get_logger("sub_heartflow") @@ -67,43 +71,9 @@ def init_prompt(): Prompt(private_prompt, "sub_heartflow_prompt_private_before") -def calculate_similarity(text_a: str, text_b: str) -> float: - """ - 计算两个文本字符串的相似度。 - """ - if not text_a or not text_b: - return 0.0 - matcher = difflib.SequenceMatcher(None, text_a, text_b) - return matcher.ratio() - - -def calculate_replacement_probability(similarity: float) -> float: - """ - 根据相似度计算替换的概率。 - 规则: - - 相似度 <= 0.4: 概率 = 0 - - 相似度 >= 0.9: 概率 = 1 - - 相似度 == 0.6: 概率 = 0.7 - - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) - - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) - """ - if similarity <= 0.4: - return 0.0 - elif similarity >= 0.9: - return 1.0 - elif 0.4 < similarity <= 0.6: - # p = 3.5 * s - 1.4 - probability = 3.5 * similarity - 1.4 - return max(0.0, probability) - else: # 0.6 < similarity < 0.9 - # p = s + 0.1 - probability = similarity + 0.1 - return min(1.0, max(0.0, probability)) - - -class SubMind: - def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: ChattingObservation): - self.last_active_time = None +class MindProcessor(BaseProcessor): + def __init__(self, subheartflow_id: str): + super().__init__() self.subheartflow_id = subheartflow_id self.llm_model = LLMRequest( @@ -113,14 +83,11 @@ class SubMind: request_type="sub_heart_flow", ) - self.chat_state = chat_state - self.observations = observations - self.current_mind = "" self.past_mind = [] self.structured_info = [] self.structured_info_str = "" - + name = chat_manager.get_stream_name(self.subheartflow_id) self.log_prefix = f"[{name}] " self._update_structured_info_str() @@ -153,16 +120,28 @@ class SubMind: self.structured_info_str = "\n".join(lines) logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}") - async def do_thinking_before_reply(self, history_cycle: list[CycleInfo] = None, parallel_mode: bool = True, no_tools: bool = True, return_prompt: bool = False, cycle_info: CycleInfo = None): + async def process_info(self, observations: Optional[List[Observation]] = None, *infos) -> List[dict]: + """处理信息对象 + + Args: + *infos: 可变数量的InfoBase类型的信息对象 + + Returns: + List[dict]: 处理后的结构化信息列表 + """ + current_mind = await self.do_thinking_before_reply(observations) + + mind_info = MindInfo() + mind_info.set_current_mind(current_mind) + + return [mind_info] + + async def do_thinking_before_reply(self, observations: Optional[List[Observation]] = None): """ 在回复前进行思考,生成内心想法并收集工具调用结果 - + 参数: - history_cycle: 历史循环信息 - parallel_mode: 是否在并行模式下执行,默认为True - no_tools: 是否禁用工具调用,默认为True - return_prompt: 是否返回prompt,默认为False - cycle_info: 循环信息对象,可用于记录详细执行信息 + observations: 观察信息 返回: 如果return_prompt为False: @@ -170,8 +149,6 @@ class SubMind: 如果return_prompt为True: tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt """ - # 更新活跃时间 - self.last_active_time = time.time() # ---------- 0. 更新和清理 structured_info ---------- if self.structured_info: @@ -191,68 +168,25 @@ class SubMind: # ---------- 1. 准备基础数据 ---------- # 获取现有想法和情绪状态 previous_mind = self.current_mind if self.current_mind else "" - mood_info = self.chat_state.mood - # 获取观察对象 - observation: ChattingObservation = self.observations[0] if self.observations else None - if not observation or not hasattr(observation, "is_group_chat"): # Ensure it's ChattingObservation or similar - logger.error(f"{self.log_prefix} 无法获取有效的观察对象或缺少聊天类型信息") - self.update_current_mind("(观察出错了...)") - return self.current_mind, self.past_mind - - is_group_chat = observation.is_group_chat - - chat_target_info = observation.chat_target_info - chat_target_name = "对方" # Default for private - if not is_group_chat and chat_target_info: - chat_target_name = ( - chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name - ) - - # 获取观察内容 - chat_observe_info = observation.get_observe_info() - person_list = observation.person_list - - # ---------- 2. 获取记忆 ---------- - try: - # 从聊天内容中提取关键词 - chat_words = set(jieba.cut(chat_observe_info)) - # 过滤掉停用词和单字词 - keywords = [word for word in chat_words if len(word) > 1] - # 去重并限制数量 - keywords = list(set(keywords))[:5] - - logger.debug(f"{self.log_prefix} 提取的关键词: {keywords}") - # 检查已有记忆,过滤掉已存在的主题 - existing_topics = set() - for item in self.structured_info: - if item["type"] == "memory": - existing_topics.add(item["id"]) - - # 过滤掉已存在的主题 - filtered_keywords = [k for k in keywords if k not in existing_topics] - - if not filtered_keywords: - logger.debug(f"{self.log_prefix} 所有关键词对应的记忆都已存在,跳过记忆提取") - else: - # 调用记忆系统获取相关记忆 - related_memory = await HippocampusManager.get_instance().get_memory_from_topic( - valid_keywords=filtered_keywords, max_memory_num=3, max_memory_length=2, max_depth=3 - ) - - logger.debug(f"{self.log_prefix} 获取到的记忆: {related_memory}") - - if related_memory: - for topic, memory in related_memory: - new_item = {"type": "memory", "id": topic, "content": memory, "ttl": 3} - self.structured_info.append(new_item) - logger.debug(f"{self.log_prefix} 添加新记忆: {topic} - {memory}") - else: - logger.debug(f"{self.log_prefix} 没有找到相关记忆") - - except Exception as e: - logger.error(f"{self.log_prefix} 获取记忆时出错: {e}") - logger.error(traceback.format_exc()) + for observation in observations: + if isinstance(observation, ChattingObservation): + # 获取聊天元信息 + is_group_chat = observation.is_group_chat + chat_target_info = observation.chat_target_info + chat_target_name = "对方" # 私聊默认名称 + if not is_group_chat and chat_target_info: + # 优先使用person_name,其次user_nickname,最后回退到默认值 + chat_target_name = ( + chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name + ) + # 获取聊天内容 + chat_observe_info = observation.get_observe_info() + person_list = observation.person_list + if isinstance(observation, MemoryObservation): + memory_observe_info = observation.get_observe_info() + if isinstance(observation, HFCloopObservation): + hfcloop_observe_info = observation.get_observe_info() # ---------- 3. 准备个性化数据 ---------- # 获取个性化信息 @@ -268,75 +202,12 @@ class SubMind: # 获取当前时间 time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - # ---------- 4. 构建思考指导部分 ---------- - # 创建本地随机数生成器,基于分钟数作为种子 - local_random = random.Random() - current_minute = int(time.strftime("%M")) - local_random.seed(current_minute) - - # 思考指导选项和权重 - hf_options = [ - ("可以参考之前的想法,在原来想法的基础上继续思考", 0.2), - ("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4), - ("不要太深入", 0.2), - ("进行深入思考", 0.2), - ] - - # 准备循环信息块 (分析最近的活动循环) - recent_active_cycles = [] - for cycle in reversed(history_cycle): - # 只关心实际执行了动作的循环 - if cycle.action_taken: - recent_active_cycles.append(cycle) - # 最多找最近的3个活动循环 - if len(recent_active_cycles) == 3: - break - - cycle_info_block = "" - consecutive_text_replies = 0 - responses_for_prompt = [] - - # 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看) - for cycle in recent_active_cycles: - if cycle.action_type == "text_reply": - consecutive_text_replies += 1 - # 获取回复内容,如果不存在则返回'[空回复]' - response_text = cycle.response_info.get("response_text", []) - # 使用简单的 join 来格式化回复内容列表 - formatted_response = "[空回复]" if not response_text else " ".join(response_text) - responses_for_prompt.append(formatted_response) - else: - # 一旦遇到非文本回复,连续性中断 - break - - # 根据连续文本回复的数量构建提示信息 - # 注意: responses_for_prompt 列表是从最近到最远排序的 - if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复 - cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' - elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复 - cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' - elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复 - cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' - - # 包装提示块,增加可读性,即使没有连续回复也给个标记 - if cycle_info_block: - cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" - else: - # 如果最近的活动循环不是文本回复,或者没有活动循环 - cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n" - - # 加权随机选择思考指导 - hf_do_next = local_random.choices( - [option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1 - )[0] + spark_prompt = get_spark() # ---------- 5. 构建最终提示词 ---------- - # --- 根据聊天类型选择模板 --- - logger.debug(f"is_group_chat: {is_group_chat}") - template_name = "sub_heartflow_prompt_before" if is_group_chat else "sub_heartflow_prompt_private_before" logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板") - + prompt = (await global_prompt_manager.get_prompt_async(template_name)).format( extra_info=self.structured_info_str, prompt_personality=prompt_personality, @@ -344,32 +215,22 @@ class SubMind: bot_name=individuality.name, time_now=time_now, chat_observe_info=chat_observe_info, - mood_info=mood_info, - hf_do_next=hf_do_next, - last_mind = previous_mind, - cycle_info_block=cycle_info_block, + mood_info="mood_info", + hf_do_next=spark_prompt, + last_mind=previous_mind, + cycle_info_block=hfcloop_observe_info, chat_target_name=chat_target_name, ) # 在构建完提示词后,生成最终的prompt字符串 final_prompt = prompt - - # ---------- 6. 调用LLM ---------- - # 如果指定了cycle_info,记录structured_info和prompt - if cycle_info: - cycle_info.set_submind_info( - prompt=final_prompt, - structured_info=self.structured_info_str - ) content = "" # 初始化内容变量 try: # 调用LLM生成响应 - response = await self.llm_model.generate_response_async( - prompt=final_prompt - ) - + response, _ = await self.llm_model.generate_response_async(prompt=final_prompt) + # 直接使用LLM返回的文本响应作为 content content = response if response else "" @@ -380,15 +241,26 @@ class SubMind: content = "思考过程中出现错误" # 记录初步思考结果 - logger.debug(f"{self.log_prefix} 初步心流思考结果: {content}\nprompt: {final_prompt}\n") + logger.debug(f"{self.log_prefix} 思考prompt: \n{final_prompt}\n") # 处理空响应情况 if not content: content = "(不知道该想些什么...)" logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。") - # ---------- 7. 应用概率性去重和修饰 ---------- - new_content = content # 保存 LLM 直接输出的结果 + # ---------- 8. 更新思考状态并返回结果 ---------- + logger.info(f"{self.log_prefix} 思考结果: {content}") + # 更新当前思考内容 + self.update_current_mind(content) + + return content + + def update_current_mind(self, response): + if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind + self.past_mind.append(self.current_mind) + self.current_mind = response + + def de_similar(self, previous_mind, new_content): try: similarity = calculate_similarity(previous_mind, new_content) replacement_prob = calculate_replacement_probability(similarity) @@ -422,36 +294,44 @@ class SubMind: else: # 相似度较高但非100%,执行标准去重逻辑 logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...") - logger.debug(f"{self.log_prefix} previous_mind类型: {type(previous_mind)}, new_content类型: {type(new_content)}") - + logger.debug( + f"{self.log_prefix} previous_mind类型: {type(previous_mind)}, new_content类型: {type(new_content)}" + ) + matcher = difflib.SequenceMatcher(None, previous_mind, new_content) logger.debug(f"{self.log_prefix} matcher类型: {type(matcher)}") - + deduplicated_parts = [] last_match_end_in_b = 0 - + # 获取并记录所有匹配块 matching_blocks = matcher.get_matching_blocks() logger.debug(f"{self.log_prefix} 匹配块数量: {len(matching_blocks)}") - logger.debug(f"{self.log_prefix} 匹配块示例(前3个): {matching_blocks[:3] if len(matching_blocks) > 3 else matching_blocks}") - + logger.debug( + f"{self.log_prefix} 匹配块示例(前3个): {matching_blocks[:3] if len(matching_blocks) > 3 else matching_blocks}" + ) + # get_matching_blocks()返回形如[(i, j, n), ...]的列表,其中i是a中的索引,j是b中的索引,n是匹配的长度 for idx, match in enumerate(matching_blocks): if not isinstance(match, tuple): logger.error(f"{self.log_prefix} 匹配块 {idx} 不是元组类型,而是 {type(match)}: {match}") continue - + try: _i, j, n = match # 解包元组为三个变量 logger.debug(f"{self.log_prefix} 匹配块 {idx}: i={_i}, j={j}, n={n}") - + if last_match_end_in_b < j: # 确保添加的是字符串,而不是元组 try: non_matching_part = new_content[last_match_end_in_b:j] - logger.debug(f"{self.log_prefix} 添加非匹配部分: '{non_matching_part}', 类型: {type(non_matching_part)}") + logger.debug( + f"{self.log_prefix} 添加非匹配部分: '{non_matching_part}', 类型: {type(non_matching_part)}" + ) if not isinstance(non_matching_part, str): - logger.warning(f"{self.log_prefix} 非匹配部分不是字符串类型: {type(non_matching_part)}") + logger.warning( + f"{self.log_prefix} 非匹配部分不是字符串类型: {type(non_matching_part)}" + ) non_matching_part = str(non_matching_part) deduplicated_parts.append(non_matching_part) except Exception as e: @@ -461,20 +341,20 @@ class SubMind: except Exception as e: logger.error(f"{self.log_prefix} 处理匹配块时出错: {e}") logger.error(traceback.format_exc()) - + logger.debug(f"{self.log_prefix} 去重前部分列表: {deduplicated_parts}") logger.debug(f"{self.log_prefix} 列表元素类型: {[type(part) for part in deduplicated_parts]}") - + # 确保所有元素都是字符串 deduplicated_parts = [str(part) for part in deduplicated_parts] - + # 防止列表为空 if not deduplicated_parts: logger.warning(f"{self.log_prefix} 去重后列表为空,添加空字符串") deduplicated_parts = [""] - + logger.debug(f"{self.log_prefix} 处理后的部分列表: {deduplicated_parts}") - + try: deduplicated_content = "".join(deduplicated_parts).strip() logger.debug(f"{self.log_prefix} 拼接后的去重内容: '{deduplicated_content}'") @@ -482,7 +362,7 @@ class SubMind: logger.error(f"{self.log_prefix} 拼接去重内容时出错: {e}") logger.error(traceback.format_exc()) deduplicated_content = "" - + if deduplicated_content: # 根据概率决定是否添加词语 prefix_str = "" @@ -511,31 +391,7 @@ class SubMind: # 出错时保留原始 content content = new_content - # ---------- 8. 更新思考状态并返回结果 ---------- - logger.info(f"{self.log_prefix} 最终心流思考结果: {content}") - # 更新当前思考内容 - self.update_current_mind(content) - - # 在原始代码的return语句前,记录结果并根据return_prompt决定返回值 - if cycle_info: - cycle_info.set_submind_info( - result=content - ) - - if return_prompt: - return content, self.past_mind, final_prompt - else: - return content, self.past_mind - - def update_current_mind(self, response): - if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind - self.past_mind.append(self.current_mind) - # 可以考虑限制 past_mind 的大小,例如: - # max_past_mind_size = 10 - # if len(self.past_mind) > max_past_mind_size: - # self.past_mind.pop(0) # 移除最旧的 - - self.current_mind = response + return content init_prompt() diff --git a/src/plugins/heartFC_chat/info_processors/processor_utils.py b/src/plugins/heartFC_chat/info_processors/processor_utils.py new file mode 100644 index 000000000..77cdc7a6b --- /dev/null +++ b/src/plugins/heartFC_chat/info_processors/processor_utils.py @@ -0,0 +1,56 @@ +import difflib +import random +import time + + +def calculate_similarity(text_a: str, text_b: str) -> float: + """ + 计算两个文本字符串的相似度。 + """ + if not text_a or not text_b: + return 0.0 + matcher = difflib.SequenceMatcher(None, text_a, text_b) + return matcher.ratio() + + +def calculate_replacement_probability(similarity: float) -> float: + """ + 根据相似度计算替换的概率。 + 规则: + - 相似度 <= 0.4: 概率 = 0 + - 相似度 >= 0.9: 概率 = 1 + - 相似度 == 0.6: 概率 = 0.7 + - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) + - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) + """ + if similarity <= 0.4: + return 0.0 + elif similarity >= 0.9: + return 1.0 + elif 0.4 < similarity <= 0.6: + # p = 3.5 * s - 1.4 + probability = 3.5 * similarity - 1.4 + return max(0.0, probability) + else: # 0.6 < similarity < 0.9 + # p = s + 0.1 + probability = similarity + 0.1 + return min(1.0, max(0.0, probability)) + + +def get_spark(): + local_random = random.Random() + current_minute = int(time.strftime("%M")) + local_random.seed(current_minute) + + hf_options = [ + ("可以参考之前的想法,在原来想法的基础上继续思考", 0.2), + ("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4), + ("不要太深入", 0.2), + ("进行深入思考", 0.2), + ] + # 加权随机选择思考指导 + hf_do_next = local_random.choices( + [option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1 + )[0] + + return hf_do_next diff --git a/src/plugins/heartFC_chat/info_processors/tool_processor.py b/src/plugins/heartFC_chat/info_processors/tool_processor.py new file mode 100644 index 000000000..fa2cb20b9 --- /dev/null +++ b/src/plugins/heartFC_chat/info_processors/tool_processor.py @@ -0,0 +1,200 @@ +from src.heart_flow.chatting_observation import ChattingObservation +from src.plugins.models.utils_model import LLMRequest +from src.config.config import global_config +import time +from src.common.logger_manager import get_logger +from src.individuality.individuality import Individuality +from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager +from src.do_tool.tool_use import ToolUser +from src.plugins.utils.json_utils import process_llm_tool_calls +from src.plugins.person_info.relationship_manager import relationship_manager +from .base_processor import BaseProcessor +from typing import List, Optional +from src.heart_flow.chatting_observation import Observation +from src.heart_flow.working_observation import WorkingObservation +from src.heart_flow.info.structured_info import StructuredInfo + +logger = get_logger("tool_use") + + +def init_prompt(): + # ... 原有代码 ... + + # 添加工具执行器提示词 + tool_executor_prompt = """ +你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。 + +你要在群聊中扮演以下角色: +{prompt_personality} + +你当前的额外信息: +{extra_info} + +你的心情是:{mood_info} + +{relation_prompt} + +群里正在进行的聊天内容: +{chat_observe_info} + +请仔细分析聊天内容,考虑以下几点: +1. 内容中是否包含需要查询信息的问题 +2. 是否需要执行特定操作 +3. 是否有明确的工具使用指令 +4. 考虑用户与你的关系以及当前的对话氛围 + +如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。 +尽量只在确实必要时才使用工具。 +""" + Prompt(tool_executor_prompt, "tool_executor_prompt") + + +class ToolProcessor(BaseProcessor): + def __init__(self, subheartflow_id: str): + super().__init__() + self.subheartflow_id = subheartflow_id + self.log_prefix = f"[{subheartflow_id}:ToolExecutor] " + self.llm_model = LLMRequest( + model=global_config.llm_normal, + max_tokens=500, + request_type="tool_execution", + ) + self.structured_info = [] + + async def process_info(self, observations: Optional[List[Observation]] = None, *infos) -> List[dict]: + """处理信息对象 + + Args: + *infos: 可变数量的InfoBase类型的信息对象 + + Returns: + list: 处理后的结构化信息列表 + """ + + if observations: + for observation in observations: + if isinstance(observation, ChattingObservation): + result, used_tools, prompt = await self.execute_tools(observation) + + # 更新WorkingObservation中的结构化信息 + for observation in observations: + if isinstance(observation, WorkingObservation): + for structured_info in result: + logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}") + observation.add_structured_info(structured_info) + + working_infos = observation.get_observe_info() + logger.debug(f"{self.log_prefix} 获取更新后WorkingObservation中的结构化信息: {working_infos}") + + structured_info = StructuredInfo() + for working_info in working_infos: + structured_info.set_info(working_info.get("type"), working_info.get("content")) + + return [structured_info] + + async def execute_tools(self, observation: ChattingObservation): + """ + 并行执行工具,返回结构化信息 + + 参数: + sub_mind: 子思维对象 + chat_target_name: 聊天目标名称,默认为"对方" + is_group_chat: 是否为群聊,默认为False + return_details: 是否返回详细信息,默认为False + cycle_info: 循环信息对象,可用于记录详细执行信息 + + 返回: + 如果return_details为False: + List[Dict]: 工具执行结果的结构化信息列表 + 如果return_details为True: + Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词) + """ + tool_instance = ToolUser() + tools = tool_instance._define_tools() + + logger.debug(f"observation: {observation}") + logger.debug(f"observation.chat_target_info: {observation.chat_target_info}") + logger.debug(f"observation.is_group_chat: {observation.is_group_chat}") + logger.debug(f"observation.person_list: {observation.person_list}") + + is_group_chat = observation.is_group_chat + if not is_group_chat: + chat_target_name = ( + observation.chat_target_info.get("person_name") + or observation.chat_target_info.get("user_nickname") + or "对方" + ) + else: + chat_target_name = "群聊" + + chat_observe_info = observation.get_observe_info() + person_list = observation.person_list + + # 构建关系信息 + relation_prompt = "【关系信息】\n" + for person in person_list: + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) + + # 获取个性信息 + individuality = Individuality.get_instance() + prompt_personality = individuality.get_prompt(x_person=2, level=2) + + # 获取心情信息 + mood_info = observation.chat_state.mood if hasattr(observation, "chat_state") else "" + + # 获取时间信息 + time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + + # 构建专用于工具调用的提示词 + prompt = await global_prompt_manager.format_prompt( + "tool_executor_prompt", + extra_info="extra_structured_info", + chat_observe_info=chat_observe_info, + # chat_target_name=chat_target_name, + is_group_chat=is_group_chat, + relation_prompt=relation_prompt, + prompt_personality=prompt_personality, + mood_info=mood_info, + bot_name=individuality.name, + time_now=time_now, + ) + + # 调用LLM,专注于工具使用 + logger.info(f"开始执行工具调用{prompt}") + response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools) + + logger.debug(f"获取到工具原始输出:\n{tool_calls}") + # 处理工具调用和结果收集,类似于SubMind中的逻辑 + new_structured_items = [] + used_tools = [] # 记录使用了哪些工具 + + if tool_calls: + success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls) + if success and valid_tool_calls: + for tool_call in valid_tool_calls: + try: + # 记录使用的工具名称 + tool_name = tool_call.get("name", "unknown_tool") + used_tools.append(tool_name) + + result = await tool_instance._execute_tool_call(tool_call) + + name = result.get("type", "unknown_type") + content = result.get("content", "") + + logger.info(f"工具{name},获得信息:{content}") + if result: + new_item = { + "type": result.get("type", "unknown_type"), + "id": result.get("id", f"tool_exec_{time.time()}"), + "content": result.get("content", ""), + "ttl": 3, + } + new_structured_items.append(new_item) + except Exception as e: + logger.error(f"{self.log_prefix}工具执行失败: {e}") + + return new_structured_items, used_tools, prompt + + +init_prompt() diff --git a/src/plugins/heartFC_chat/normal_chat.py b/src/plugins/heartFC_chat/normal_chat.py index e921f85ce..d874e4427 100644 --- a/src/plugins/heartFC_chat/normal_chat.py +++ b/src/plugins/heartFC_chat/normal_chat.py @@ -352,6 +352,9 @@ class NormalChat: # --- 新增:处理初始高兴趣消息的私有方法 --- async def _process_initial_interest_messages(self): """处理启动时存在于 interest_dict 中的高兴趣消息。""" + if not self.interest_dict: + return # 如果 interest_dict 为 None 或空,直接返回 + items_to_process = list(self.interest_dict.items()) if not items_to_process: return # 没有初始消息,直接返回