diff --git a/.gitignore b/.gitignore index 040a445bf..df3ab670f 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,7 @@ MaiBot-Napcat-Adapter nonebot-maibot-adapter/ *.zip run.bat +log_debug/ run_none.bat run.py message_queue_content.txt @@ -26,8 +27,8 @@ message_queue_window.bat message_queue_window.txt queue_update.txt memory_graph.gml -/src/do_tool/tool_can_use/auto_create_tool.py -/src/do_tool/tool_can_use/execute_python_code_tool.py +/src/tools/tool_can_use/auto_create_tool.py +/src/tools/tool_can_use/execute_python_code_tool.py .env .env.* .cursor diff --git a/run_voice.bat b/run_voice.bat new file mode 100644 index 000000000..d4c8b0c64 --- /dev/null +++ b/run_voice.bat @@ -0,0 +1,2 @@ +@echo off +start "Voice Adapter" cmd /k "call conda activate maicore && cd /d C:\GitHub\maimbot_tts_adapter && echo Running Napcat Adapter... && python maimbot_pipeline.py" \ No newline at end of file diff --git a/scripts/import_openie.py b/scripts/import_openie.py index 16bf1aa72..90579bcef 100644 --- a/scripts/import_openie.py +++ b/scripts/import_openie.py @@ -10,13 +10,13 @@ from time import sleep sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from src.plugins.knowledge.src.lpmmconfig import PG_NAMESPACE, global_config -from src.plugins.knowledge.src.embedding_store import EmbeddingManager -from src.plugins.knowledge.src.llm_client import LLMClient -from src.plugins.knowledge.src.open_ie import OpenIE -from src.plugins.knowledge.src.kg_manager import KGManager +from src.chat.knowledge.src.lpmmconfig import PG_NAMESPACE, global_config +from src.chat.knowledge.src.embedding_store import EmbeddingManager +from src.chat.knowledge.src.llm_client import LLMClient +from src.chat.knowledge.src.open_ie import OpenIE +from src.chat.knowledge.src.kg_manager import KGManager from src.common.logger import get_module_logger -from src.plugins.knowledge.src.utils.hash import get_sha256 +from src.chat.knowledge.src.utils.hash import get_sha256 # 添加项目根目录到 sys.path diff --git a/scripts/info_extraction.py b/scripts/info_extraction.py index 44ded983a..29e327300 100644 --- a/scripts/info_extraction.py +++ b/scripts/info_extraction.py @@ -13,11 +13,11 @@ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from rich.progress import Progress # 替换为 rich 进度条 from src.common.logger import get_module_logger -from src.plugins.knowledge.src.lpmmconfig import global_config -from src.plugins.knowledge.src.ie_process import info_extract_from_str -from src.plugins.knowledge.src.llm_client import LLMClient -from src.plugins.knowledge.src.open_ie import OpenIE -from src.plugins.knowledge.src.raw_processing import load_raw_data +from src.chat.knowledge.src.lpmmconfig import global_config +from src.chat.knowledge.src.ie_process import info_extract_from_str +from src.chat.knowledge.src.llm_client import LLMClient +from src.chat.knowledge.src.open_ie import OpenIE +from src.chat.knowledge.src.raw_processing import load_raw_data from rich.progress import ( BarColumn, TimeElapsedColumn, diff --git a/scripts/raw_data_preprocessor.py b/scripts/raw_data_preprocessor.py index 33fdede9e..5ac3dd67c 100644 --- a/scripts/raw_data_preprocessor.py +++ b/scripts/raw_data_preprocessor.py @@ -6,7 +6,7 @@ import datetime # 新增导入 sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from src.common.logger_manager import get_logger -from src.plugins.knowledge.src.lpmmconfig import global_config +from src.chat.knowledge.src.lpmmconfig import global_config logger = get_logger("lpmm") ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) diff --git a/src/heart_flow/0.6Bing.md b/src/0.6Bing.md similarity index 83% rename from src/heart_flow/0.6Bing.md rename to src/0.6Bing.md index de5628e78..80a29a84d 100644 --- a/src/heart_flow/0.6Bing.md +++ b/src/0.6Bing.md @@ -24,14 +24,6 @@ - 目标:提升 `HeartFlowChatInstance` (HFC) 回复的多样性、一致性和真实感。 - 前置:需要重构 Prompt 构建逻辑,可能引入 `PromptBuilder` 并提供标准接口 (认为是必须步骤)。 -- **扩展观察系统 (Observation System)**: - - 目前主要依赖 `ChattingObservation` 获取消息。 - - 计划引入更多 `Observation` 类型,为 `SubHeartflow` 提供更丰富的上下文: - - Mai 的全局状态 (`MaiStateInfo`)。 - - `SubHeartflow` 自身的聊天状态 (`ChatStateInfo`) 和参数配置。 - - Mai 的系统配置、连接平台信息。 - - 其他相关聊天或系统的聚合信息。 - - 目标:让 `SubHeartflow` 基于更全面的信息进行决策。 - **增强工具调用能力 (Enhanced Tool Usage)**: - 扩展 `HeartFlowChatInstance` (HFC) 可用的工具集。 @@ -59,13 +51,6 @@ - 让 LLM 分析提供的文本材料(如小说、背景故事)来提取人格特质和相关信息。 - **优势**: 替代易出错且标准不一的手动配置,生成更丰富、一致、包含配套资源且易于系统理解和应用的人格包。 -- **优化表情包处理与理解 (Enhanced Emoji Handling and Understanding)**: - - **面临挑战**: - - **历史记录表示**: 如何在聊天历史中有效表示表情包,供 LLM 理解。 - - **语义理解**: 如何让 LLM 准确把握表情包的含义、情感和语境。 - - **场景判断与选择**: 如何让 LLM 判断何时适合使用表情包,并选择最贴切的一个。 - - **目标**: 提升 Mai 理解和运用表情包的能力,使交互更自然生动。 - - **说明**: 可能需要较多时间进行数据处理和模型调优,但对改善体验潜力巨大。 - **探索高级记忆检索机制 (GE 系统概念):** - 研究超越简单关键词/近期性检索的记忆模型。 diff --git a/src/MaiBot0.6roadmap.md b/src/MaiBot0.6roadmap.md deleted file mode 100644 index 54774197e..000000000 --- a/src/MaiBot0.6roadmap.md +++ /dev/null @@ -1,16 +0,0 @@ -MaiCore/MaiBot 0.6路线图 draft - -0.6.3:解决0.6.x版本核心问题,改进功能 -主要功能加入 -LPMM全面替代旧知识库 -采用新的HFC回复模式,取代旧心流 -合并推理模式和心流模式,根据麦麦自己决策回复模式 -提供新的表情包系统 - -0.6.4:提升用户体验,交互优化 -加入webui -提供麦麦 API -修复prompt建构的各种问题 -修复各种bug -调整代码文件结构,重构部分落后设计 - diff --git a/src/heart_flow/README.md b/src/README.md similarity index 100% rename from src/heart_flow/README.md rename to src/README.md diff --git a/src/api/apiforgui.py b/src/api/apiforgui.py index a266f8e86..d6f223297 100644 --- a/src/api/apiforgui.py +++ b/src/api/apiforgui.py @@ -1,5 +1,5 @@ -from src.heart_flow.heartflow import heartflow -from src.heart_flow.sub_heartflow import ChatState +from src.chat.heart_flow.heartflow import heartflow +from src.chat.heart_flow.sub_heartflow import ChatState from src.common.logger_manager import get_logger logger = get_logger("api") diff --git a/src/api/config_api.py b/src/api/config_api.py index 275938045..0b23fb993 100644 --- a/src/api/config_api.py +++ b/src/api/config_api.py @@ -34,14 +34,6 @@ class APIBotConfig: gender: str # 性别 appearance: str # 外貌特征描述 - # schedule - enable_schedule_gen: bool # 是否启用日程表 - enable_schedule_interaction: bool # 日程表是否影响回复模式 - prompt_schedule_gen: str # 日程生成提示词 - schedule_doing_update_interval: int # 日程表更新间隔(秒) - schedule_temperature: float # 日程表温度 - time_zone: str # 时区 - # platforms platforms: Dict[str, str] # 平台信息 @@ -164,7 +156,6 @@ class APIBotConfig: "groups", "personality", "identity", - "schedule", "platforms", "chat", "normal_chat", diff --git a/src/api/main.py b/src/api/main.py index 48b03b586..5e9322827 100644 --- a/src/api/main.py +++ b/src/api/main.py @@ -3,7 +3,7 @@ from strawberry.fastapi import GraphQLRouter import os import sys -# from src.heart_flow.heartflow import heartflow +# from src.chat.heart_flow.heartflow import heartflow sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) # from src.config.config import BotConfig from src.common.logger_manager import get_logger @@ -15,7 +15,7 @@ from src.api.apiforgui import ( get_subheartflow_cycle_info, get_all_states, ) -from src.heart_flow.sub_heartflow import ChatState +from src.chat.heart_flow.sub_heartflow import ChatState from src.api.basic_info_api import get_all_basic_info # 新增导入 # import uvicorn diff --git a/src/chat/__init__.py b/src/chat/__init__.py new file mode 100644 index 000000000..931c30ff3 --- /dev/null +++ b/src/chat/__init__.py @@ -0,0 +1,17 @@ +""" +MaiMBot插件系统 +包含聊天、情绪、记忆、日程等功能模块 +""" + +from src.chat.message_receive.chat_stream import chat_manager +from src.chat.emoji_system.emoji_manager import emoji_manager +from src.chat.person_info.relationship_manager import relationship_manager +from src.chat.normal_chat.willing.willing_manager import willing_manager + +# 导出主要组件供外部使用 +__all__ = [ + "chat_manager", + "emoji_manager", + "relationship_manager", + "willing_manager", +] diff --git a/src/plugins/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py similarity index 99% rename from src/plugins/emoji_system/emoji_manager.py rename to src/chat/emoji_system/emoji_manager.py index d105e0b8b..5d800866f 100644 --- a/src/plugins/emoji_system/emoji_manager.py +++ b/src/chat/emoji_system/emoji_manager.py @@ -12,7 +12,7 @@ import re from ...common.database import db from ...config.config import global_config -from ..chat.utils_image import image_path_to_base64, image_manager +from ..utils.utils_image import image_path_to_base64, image_manager from ..models.utils_model import LLMRequest from src.common.logger_manager import get_logger from rich.traceback import install diff --git a/src/chat/focus_chat/cycle_analyzer.py b/src/chat/focus_chat/cycle_analyzer.py new file mode 100644 index 000000000..23374ced9 --- /dev/null +++ b/src/chat/focus_chat/cycle_analyzer.py @@ -0,0 +1,216 @@ +import os +import time +from typing import List, Dict, Any, Tuple +from src.chat.focus_chat.heartFC_Cycleinfo import CycleInfo +from src.common.logger_manager import get_logger + +logger = get_logger("cycle_analyzer") + + +class CycleAnalyzer: + """循环信息分析类,提供查询和分析CycleInfo的工具""" + + def __init__(self, base_dir: str = "log_debug"): + """ + 初始化分析器 + + 参数: + base_dir: 存储CycleInfo的基础目录,默认为log_debug + """ + self.base_dir = base_dir + + def list_streams(self) -> List[str]: + """ + 获取所有聊天流ID列表 + + 返回: + List[str]: 聊天流ID列表 + """ + try: + if not os.path.exists(self.base_dir): + return [] + + return [d for d in os.listdir(self.base_dir) if os.path.isdir(os.path.join(self.base_dir, d))] + except Exception as e: + logger.error(f"获取聊天流列表时出错: {e}") + return [] + + def get_stream_cycle_count(self, stream_id: str) -> int: + """ + 获取指定聊天流的循环数量 + + 参数: + stream_id: 聊天流ID + + 返回: + int: 循环数量 + """ + try: + files = CycleInfo.list_cycles(stream_id, self.base_dir) + return len(files) + except Exception as e: + logger.error(f"获取聊天流循环数量时出错: {e}") + return 0 + + def get_stream_cycles(self, stream_id: str, start: int = 0, limit: int = -1) -> List[str]: + """ + 获取指定聊天流的循环文件列表 + + 参数: + stream_id: 聊天流ID + start: 起始索引,默认为0 + limit: 返回的最大数量,默认为-1(全部) + + 返回: + List[str]: 循环文件路径列表 + """ + try: + files = CycleInfo.list_cycles(stream_id, self.base_dir) + if limit < 0: + return files[start:] + else: + return files[start : start + limit] + except Exception as e: + logger.error(f"获取聊天流循环文件列表时出错: {e}") + return [] + + def get_cycle_content(self, filepath: str) -> str: + """ + 获取循环文件的内容 + + 参数: + filepath: 文件路径 + + 返回: + str: 文件内容 + """ + try: + if not os.path.exists(filepath): + return f"文件不存在: {filepath}" + + with open(filepath, "r", encoding="utf-8") as f: + return f.read() + except Exception as e: + logger.error(f"读取循环文件内容时出错: {e}") + return f"读取文件出错: {e}" + + def analyze_stream_cycles(self, stream_id: str) -> Dict[str, Any]: + """ + 分析指定聊天流的所有循环,生成统计信息 + + 参数: + stream_id: 聊天流ID + + 返回: + Dict[str, Any]: 统计信息 + """ + try: + files = CycleInfo.list_cycles(stream_id, self.base_dir) + if not files: + return {"error": "没有找到循环记录"} + + total_cycles = len(files) + action_counts = {"text_reply": 0, "emoji_reply": 0, "no_reply": 0, "unknown": 0} + total_duration = 0 + tool_usage = {} + + for filepath in files: + with open(filepath, "r", encoding="utf-8") as f: + content = f.read() + + # 解析动作类型 + for line in content.split("\n"): + if line.startswith("动作:"): + action = line[3:].strip() + action_counts[action] = action_counts.get(action, 0) + 1 + + # 解析耗时 + elif line.startswith("耗时:"): + try: + duration = float(line[3:].strip().split("秒")[0]) + total_duration += duration + except Exception as e: + logger.error(f"解析耗时时出错: {e}") + pass + + # 解析工具使用 + elif line.startswith("使用的工具:"): + tools = line[6:].strip().split(", ") + for tool in tools: + tool_usage[tool] = tool_usage.get(tool, 0) + 1 + + avg_duration = total_duration / total_cycles if total_cycles > 0 else 0 + + return { + "总循环数": total_cycles, + "动作统计": action_counts, + "平均耗时": f"{avg_duration:.2f}秒", + "总耗时": f"{total_duration:.2f}秒", + "工具使用次数": tool_usage, + } + except Exception as e: + logger.error(f"分析聊天流循环时出错: {e}") + return {"error": f"分析出错: {e}"} + + def get_latest_cycles(self, count: int = 10) -> List[Tuple[str, str]]: + """ + 获取所有聊天流中最新的几个循环 + + 参数: + count: 获取的数量,默认为10 + + 返回: + List[Tuple[str, str]]: 聊天流ID和文件路径的元组列表 + """ + try: + all_cycles = [] + streams = self.list_streams() + + for stream_id in streams: + files = CycleInfo.list_cycles(stream_id, self.base_dir) + for filepath in files: + try: + # 从文件名中提取时间戳 + filename = os.path.basename(filepath) + timestamp_str = filename.split("_", 2)[2].split(".")[0] + timestamp = time.mktime(time.strptime(timestamp_str, "%Y%m%d_%H%M%S")) + all_cycles.append((timestamp, stream_id, filepath)) + except Exception as e: + logger.error(f"从文件名中提取时间戳时出错: {e}") + continue + + # 按时间戳排序,取最新的count个 + all_cycles.sort(reverse=True) + return [(item[1], item[2]) for item in all_cycles[:count]] + except Exception as e: + logger.error(f"获取最新循环时出错: {e}") + return [] + + +# 使用示例 +if __name__ == "__main__": + analyzer = CycleAnalyzer() + + # 列出所有聊天流 + streams = analyzer.list_streams() + print(f"找到 {len(streams)} 个聊天流: {streams}") + + # 分析第一个聊天流的循环 + if streams: + stream_id = streams[0] + stats = analyzer.analyze_stream_cycles(stream_id) + print(f"\n聊天流 {stream_id} 的统计信息:") + for key, value in stats.items(): + print(f" {key}: {value}") + + # 获取最新的循环 + cycles = analyzer.get_stream_cycles(stream_id, limit=1) + if cycles: + print("\n最新循环内容:") + print(analyzer.get_cycle_content(cycles[0])) + + # 获取所有聊天流中最新的3个循环 + latest_cycles = analyzer.get_latest_cycles(3) + print(f"\n所有聊天流中最新的 {len(latest_cycles)} 个循环:") + for stream_id, filepath in latest_cycles: + print(f" 聊天流 {stream_id}: {os.path.basename(filepath)}") diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py new file mode 100644 index 000000000..7cb69498c --- /dev/null +++ b/src/chat/focus_chat/expressors/default_expressor.py @@ -0,0 +1,345 @@ +import traceback +from typing import List, Optional, Dict, Any, Tuple +from src.chat.message_receive.message import MessageRecv, MessageThinking, MessageSending +from src.chat.message_receive.message import Seg # Local import needed after move +from src.chat.message_receive.message import UserInfo +from src.chat.message_receive.chat_stream import chat_manager +from src.common.logger_manager import get_logger +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move +from src.chat.utils.timer_calculator import Timer # <--- Import Timer +from src.chat.emoji_system.emoji_manager import emoji_manager +from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder +from src.chat.focus_chat.heartFC_sender import HeartFCSender +from src.chat.utils.utils import process_llm_response +from src.chat.utils.info_catcher import info_catcher_manager +from src.manager.mood_manager import mood_manager +from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp + +logger = get_logger("expressor") + + +class DefaultExpressor: + def __init__(self, chat_id: str): + self.log_prefix = "expressor" + self.express_model = LLMRequest( + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=256, + request_type="response_heartflow", + ) + self.heart_fc_sender = HeartFCSender() + + self.chat_id = chat_id + self.chat_stream: Optional[ChatStream] = None + self.is_group_chat = True + self.chat_target_info = None + + async def initialize(self): + self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id) + + async def _create_thinking_message(self, anchor_message: Optional[MessageRecv], thinking_id: str): + """创建思考消息 (尝试锚定到 anchor_message)""" + if not anchor_message or not anchor_message.chat_stream: + logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。") + return None + + chat = anchor_message.chat_stream + messageinfo = anchor_message.message_info + thinking_time_point = parse_thinking_id_to_timestamp(thinking_id) + bot_user_info = UserInfo( + user_id=global_config.BOT_QQ, + user_nickname=global_config.BOT_NICKNAME, + platform=messageinfo.platform, + ) + # logger.debug(f"创建思考消息:{anchor_message}") + # logger.debug(f"创建思考消息chat:{chat}") + # logger.debug(f"创建思考消息bot_user_info:{bot_user_info}") + # logger.debug(f"创建思考消息messageinfo:{messageinfo}") + thinking_message = MessageThinking( + message_id=thinking_id, + chat_stream=chat, + bot_user_info=bot_user_info, + reply=anchor_message, # 回复的是锚点消息 + thinking_start_time=thinking_time_point, + ) + logger.debug(f"创建思考消息thinking_message:{thinking_message}") + + await self.heart_fc_sender.register_thinking(thinking_message) + + async def deal_reply( + self, + cycle_timers: dict, + action_data: Dict[str, Any], + reasoning: str, + anchor_message: MessageRecv, + thinking_id: str, + ) -> tuple[bool, Optional[List[Tuple[str, str]]]]: + # 创建思考消息 + await self._create_thinking_message(anchor_message, thinking_id) + + reply = None # 初始化 reply,防止未定义 + try: + has_sent_something = False + + # 处理文本部分 + text_part = action_data.get("text", []) + if text_part: + with Timer("生成回复", cycle_timers): + # 可以保留原有的文本处理逻辑或进行适当调整 + reply = await self.express( + in_mind_reply=text_part, + anchor_message=anchor_message, + thinking_id=thinking_id, + reason=reasoning, + action_data=action_data, + ) + + with Timer("选择表情", cycle_timers): + emoji_keyword = action_data.get("emojis", []) + emoji_base64 = await self._choose_emoji(emoji_keyword) + if emoji_base64: + reply.append(("emoji", emoji_base64)) + + if reply: + with Timer("发送消息", cycle_timers): + await self._send_response_messages( + anchor_message=anchor_message, + thinking_id=thinking_id, + response_set=reply, + ) + has_sent_something = True + else: + logger.warning(f"{self.log_prefix} 文本回复生成失败") + + if not has_sent_something: + logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容") + + return has_sent_something, reply + + except Exception as e: + logger.error(f"回复失败: {e}") + return False, None + + # --- 回复器 (Replier) 的定义 --- # + + async def express( + self, + in_mind_reply: str, + reason: str, + anchor_message: MessageRecv, + thinking_id: str, + action_data: Dict[str, Any], + ) -> Optional[List[str]]: + """ + 回复器 (Replier): 核心逻辑,负责生成回复文本。 + (已整合原 HeartFCGenerator 的功能) + """ + try: + # 1. 获取情绪影响因子并调整模型温度 + arousal_multiplier = mood_manager.get_arousal_multiplier() + current_temp = float(global_config.llm_normal["temp"]) * arousal_multiplier + self.express_model.params["temperature"] = current_temp # 动态调整温度 + + # 2. 获取信息捕捉器 + info_catcher = info_catcher_manager.get_info_catcher(thinking_id) + + # --- Determine sender_name for private chat --- + sender_name_for_prompt = "某人" # Default for group or if info unavailable + if not self.is_group_chat and self.chat_target_info: + # Prioritize person_name, then nickname + sender_name_for_prompt = ( + self.chat_target_info.get("person_name") + or self.chat_target_info.get("user_nickname") + or sender_name_for_prompt + ) + # --- End determining sender_name --- + + target_message = action_data.get("target", "") + + # 3. 构建 Prompt + with Timer("构建Prompt", {}): # 内部计时器,可选保留 + prompt = await prompt_builder.build_prompt( + build_mode="focus", + chat_stream=self.chat_stream, # Pass the stream object + in_mind_reply=in_mind_reply, + reason=reason, + current_mind_info="", + structured_info="", + sender_name=sender_name_for_prompt, # Pass determined name + target_message=target_message, + ) + + # 4. 调用 LLM 生成回复 + content = None + reasoning_content = None + model_name = "unknown_model" + if not prompt: + logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。") + return None + + try: + with Timer("LLM生成", {}): # 内部计时器,可选保留 + # logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n") + content, reasoning_content, model_name = await self.express_model.generate_response(prompt) + + logger.info(f"{self.log_prefix}\nPrompt:\n{prompt}\n---------------------------\n") + + logger.info(f"想要表达:{in_mind_reply}") + logger.info(f"理由:{reason}") + logger.info(f"生成回复: {content}\n") + + info_catcher.catch_after_llm_generated( + prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name + ) + + except Exception as llm_e: + # 精简报错信息 + logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}") + return None # LLM 调用失败则无法生成回复 + + processed_response = process_llm_response(content) + + # 5. 处理 LLM 响应 + if not content: + logger.warning(f"{self.log_prefix}LLM 生成了空内容。") + return None + if not processed_response: + logger.warning(f"{self.log_prefix}处理后的回复为空。") + return None + + reply_set = [] + for str in processed_response: + reply_seg = ("text", str) + reply_set.append(reply_seg) + + return reply_set + + except Exception as e: + logger.error(f"{self.log_prefix}回复生成意外失败: {e}") + traceback.print_exc() + return None + + # --- 发送器 (Sender) --- # + + async def _send_response_messages( + self, anchor_message: Optional[MessageRecv], response_set: List[Tuple[str, str]], thinking_id: str + ) -> Optional[MessageSending]: + """发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender""" + chat = self.chat_stream + chat_id = self.chat_id + if chat is None: + logger.error(f"{self.log_prefix} 无法发送回复,chat_stream 为空。") + return None + if not anchor_message: + logger.error(f"{self.log_prefix} 无法发送回复,anchor_message 为空。") + return None + + stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志 + + # 检查思考过程是否仍在进行,并获取开始时间 + thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id) + + if thinking_start_time is None: + logger.error(f"[{stream_name}]思考过程未找到或已结束,无法发送回复。") + return None + + mark_head = False + first_bot_msg: Optional[MessageSending] = None + reply_message_ids = [] # 记录实际发送的消息ID + + for i, msg_text in enumerate(response_set): + # 为每个消息片段生成唯一ID + type = msg_text[0] + data = msg_text[1] + + part_message_id = f"{thinking_id}_{i}" + message_segment = Seg(type=type, data=data) + + if type == "emoji": + is_emoji = True + else: + is_emoji = False + reply_to = not mark_head + + bot_message = await self._build_single_sending_message( + anchor_message=anchor_message, + message_id=part_message_id, + message_segment=message_segment, + reply_to=reply_to, + is_emoji=is_emoji, + thinking_id=thinking_id, + ) + + try: + if not mark_head: + mark_head = True + first_bot_msg = bot_message # 保存第一个成功发送的消息对象 + typing = False + else: + typing = True + + if type == "emoji": + typing = False + + await self.heart_fc_sender.send_message(bot_message, has_thinking=True, typing=typing) + + reply_message_ids.append(part_message_id) # 记录我们生成的ID + + except Exception as e: + logger.error(f"{self.log_prefix}发送回复片段 {i} ({part_message_id}) 时失败: {e}") + # 这里可以选择是继续发送下一个片段还是中止 + + # 在尝试发送完所有片段后,完成原始的 thinking_id 状态 + try: + await self.heart_fc_sender.complete_thinking(chat_id, thinking_id) + except Exception as e: + logger.error(f"{self.log_prefix}完成思考状态 {thinking_id} 时出错: {e}") + + return first_bot_msg # 返回第一个成功发送的消息对象 + + async def _choose_emoji(self, send_emoji: str): + """ + 选择表情,根据send_emoji文本选择表情,返回表情base64 + """ + emoji_base64 = "" + emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji) + if emoji_raw: + emoji_path, _description = emoji_raw + emoji_base64 = image_path_to_base64(emoji_path) + return emoji_base64 + + async def _build_single_sending_message( + self, + anchor_message: MessageRecv, + message_id: str, + message_segment: Seg, + reply_to: bool, + is_emoji: bool, + thinking_id: str, + ) -> MessageSending: + """构建单个发送消息""" + + thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(self.chat_id, thinking_id) + bot_user_info = UserInfo( + user_id=global_config.BOT_QQ, + user_nickname=global_config.BOT_NICKNAME, + platform=self.chat_stream.platform, + ) + + bot_message = MessageSending( + message_id=message_id, # 使用片段的唯一ID + chat_stream=self.chat_stream, + bot_user_info=bot_user_info, + sender_info=anchor_message.message_info.user_info, + message_segment=message_segment, + reply=anchor_message, # 回复原始锚点 + is_head=reply_to, + is_emoji=is_emoji, + thinking_start_time=thinking_start_time, # 传递原始思考开始时间 + ) + + return bot_message diff --git a/src/chat/focus_chat/expressors/exprssion_learner.py b/src/chat/focus_chat/expressors/exprssion_learner.py new file mode 100644 index 000000000..57908402e --- /dev/null +++ b/src/chat/focus_chat/expressors/exprssion_learner.py @@ -0,0 +1,320 @@ +import time +import random +from typing import List, Dict, Optional, Any, Tuple +from src.common.logger_manager import get_logger +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_readable_messages +from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager +import os +import json + + +MAX_EXPRESSION_COUNT = 300 + +logger = get_logger("expressor") + + +def init_prompt() -> None: + learn_style_prompt = """ +{chat_str} + +请从上面这段群聊中概括除了人名为"麦麦"之外的人的语言风格,只考虑文字,不要考虑表情包和图片 +不要涉及具体的人名,只考虑语言风格 +语言风格包含特殊内容和情感 +思考有没有特殊的梗,一并总结成语言风格 +总结成如下格式的规律,总结的内容要详细,但具有概括性: +当"xxx"时,可以"xxx", xxx不超过10个字 + +例如: +当"表示十分惊叹"时,使用"我嘞个xxxx" +当"表示讽刺的赞同,不想讲道理"时,使用"对对对" +当"想说明某个观点,但懒得明说",使用"懂的都懂" + +注意不要总结你自己的发言 +现在请你概括 +""" + Prompt(learn_style_prompt, "learn_style_prompt") + + personality_expression_prompt = """ +{personality} + +请从以上人设中总结出这个角色可能的语言风格 +思考回复的特殊内容和情感 +思考有没有特殊的梗,一并总结成语言风格 +总结成如下格式的规律,总结的内容要详细,但具有概括性: +当"xxx"时,可以"xxx", xxx不超过10个字 + +例如: +当"表示十分惊叹"时,使用"我嘞个xxxx" +当"表示讽刺的赞同,不想讲道理"时,使用"对对对" +当"想说明某个观点,但懒得明说",使用"懂的都懂" + +现在请你概括 +""" + Prompt(personality_expression_prompt, "personality_expression_prompt") + + learn_grammar_prompt = """ +{chat_str} + +请从上面这段群聊中概括除了人名为"麦麦"之外的人的语法和句法特点,只考虑纯文字,不要考虑表情包和图片 +不要总结【图片】,【动画表情】,[图片],[动画表情],不总结 表情符号 at @ 回复 和[回复] +不要涉及具体的人名,只考虑语法和句法特点, +语法和句法特点要包括,句子长短(具体字数),有何种语病,如何拆分句子。 +总结成如下格式的规律,总结的内容要简洁,不浮夸: +当"xxx"时,可以"xxx" + +例如: +当"表达观点较复杂"时,使用"省略主语"的句法 +当"不用详细说明的一般表达"时,使用"非常简洁的句子"的句法 +当"需要单纯简单的确认"时,使用"单字或几个字的肯定"的句法 + +注意不要总结你自己的发言 +现在请你概括 +""" + Prompt(learn_grammar_prompt, "learn_grammar_prompt") + + +class ExpressionLearner: + def __init__(self) -> None: + self.express_learn_model: LLMRequest = LLMRequest( + model=global_config.llm_normal, + temperature=0.1, + max_tokens=256, + request_type="response_heartflow", + ) + + async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]: + """ + 读取/data/expression/learnt/{chat_id}/expressions.json和/data/expression/personality/expressions.json + 返回(learnt_expressions, personality_expressions) + """ + learnt_style_file = os.path.join("data", "expression", "learnt_style", str(chat_id), "expressions.json") + learnt_grammar_file = os.path.join("data", "expression", "learnt_grammar", str(chat_id), "expressions.json") + personality_file = os.path.join("data", "expression", "personality", "expressions.json") + learnt_style_expressions = [] + learnt_grammar_expressions = [] + personality_expressions = [] + if os.path.exists(learnt_style_file): + with open(learnt_style_file, "r", encoding="utf-8") as f: + learnt_style_expressions = json.load(f) + if os.path.exists(learnt_grammar_file): + with open(learnt_grammar_file, "r", encoding="utf-8") as f: + learnt_grammar_expressions = json.load(f) + if os.path.exists(personality_file): + with open(personality_file, "r", encoding="utf-8") as f: + personality_expressions = json.load(f) + return learnt_style_expressions, learnt_grammar_expressions, personality_expressions + + def is_similar(self, s1: str, s2: str) -> bool: + """ + 判断两个字符串是否相似(只考虑长度大于5且有80%以上重合,不考虑子串) + """ + if not s1 or not s2: + return False + min_len = min(len(s1), len(s2)) + if min_len < 5: + return False + same = sum(1 for a, b in zip(s1, s2) if a == b) + return same / min_len > 0.8 + + async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]: + """ + 学习并存储表达方式,分别学习语言风格和句法特点 + """ + learnt_style: Optional[List[Tuple[str, str, str]]] = await self.learn_and_store(type="style", num=3) + if not learnt_style: + return [] + + learnt_grammar: Optional[List[Tuple[str, str, str]]] = await self.learn_and_store(type="grammar", num=2) + if not learnt_grammar: + return [] + + return learnt_style, learnt_grammar + + async def learn_and_store(self, type: str, num: int = 10) -> List[Tuple[str, str, str]]: + """ + 选择从当前到最近1小时内的随机num条消息,然后学习这些消息的表达方式 + type: "style" or "grammar" + """ + if type == "style": + type_str = "语言风格" + elif type == "grammar": + type_str = "句法特点" + else: + raise ValueError(f"Invalid type: {type}") + logger.info(f"开始学习{type_str}...") + learnt_expressions: Optional[List[Tuple[str, str, str]]] = await self.learn_expression(type, num) + logger.info(f"学习到{len(learnt_expressions) if learnt_expressions else 0}条{type_str}") + # learnt_expressions: List[(chat_id, situation, style)] + + if not learnt_expressions: + logger.info(f"没有学习到{type_str}") + return [] + + # 按chat_id分组 + chat_dict: Dict[str, List[Dict[str, str]]] = {} + for chat_id, situation, style in learnt_expressions: + if chat_id not in chat_dict: + chat_dict[chat_id] = [] + chat_dict[chat_id].append({"situation": situation, "style": style}) + # 存储到/data/expression/对应chat_id/expressions.json + for chat_id, expr_list in chat_dict.items(): + dir_path = os.path.join("data", "expression", f"learnt_{type}", str(chat_id)) + os.makedirs(dir_path, exist_ok=True) + file_path = os.path.join(dir_path, "expressions.json") + # 若已存在,先读出合并 + if os.path.exists(file_path): + old_data: List[Dict[str, str, str]] = [] + try: + with open(file_path, "r", encoding="utf-8") as f: + old_data = json.load(f) + except Exception: + old_data = [] + else: + old_data = [] + # 超过最大数量时,20%概率移除count=1的项 + if len(old_data) >= MAX_EXPRESSION_COUNT: + new_old_data = [] + for item in old_data: + if item.get("count", 1) == 1 and random.random() < 0.2: + continue # 20%概率移除 + new_old_data.append(item) + old_data = new_old_data + # 合并逻辑 + for new_expr in expr_list: + found = False + for old_expr in old_data: + if self.is_similar(new_expr["situation"], old_expr.get("situation", "")) and self.is_similar( + new_expr["style"], old_expr.get("style", "") + ): + found = True + # 50%概率替换 + if random.random() < 0.5: + old_expr["situation"] = new_expr["situation"] + old_expr["style"] = new_expr["style"] + old_expr["count"] = old_expr.get("count", 1) + 1 + break + if not found: + new_expr["count"] = 1 + old_data.append(new_expr) + with open(file_path, "w", encoding="utf-8") as f: + json.dump(old_data, f, ensure_ascii=False, indent=2) + return learnt_expressions + + async def learn_expression(self, type: str, num: int = 10) -> Optional[List[Tuple[str, str, str]]]: + """选择从当前到最近1小时内的随机num条消息,然后学习这些消息的表达方式 + + Args: + type: "style" or "grammar" + """ + if type == "style": + type_str = "语言风格" + prompt = "learn_style_prompt" + elif type == "grammar": + type_str = "句法特点" + prompt = "learn_grammar_prompt" + else: + raise ValueError(f"Invalid type: {type}") + + current_time = time.time() + random_msg: Optional[List[Dict[str, Any]]] = get_raw_msg_by_timestamp_random( + current_time - 3600 * 24, current_time, limit=num + ) + if not random_msg: + return None + # 转化成str + chat_id: str = random_msg[0]["chat_id"] + random_msg_str: str = await build_readable_messages(random_msg, timestamp_mode="normal") + + prompt: str = await global_prompt_manager.format_prompt( + prompt, + chat_str=random_msg_str, + ) + + logger.debug(f"学习{type_str}的prompt: {prompt}") + + try: + response, _ = await self.express_learn_model.generate_response_async(prompt) + except Exception as e: + logger.error(f"学习{type_str}失败: {e}") + return None + + logger.debug(f"学习{type_str}的response: {response}") + + expressions: List[Tuple[str, str, str]] = self.parse_expression_response(response, chat_id) + + return expressions + + def parse_expression_response(self, response: str, chat_id: str) -> List[Tuple[str, str, str]]: + """ + 解析LLM返回的表达风格总结,每一行提取"当"和"使用"之间的内容,存储为(situation, style)元组 + """ + expressions: List[Tuple[str, str, str]] = [] + for line in response.splitlines(): + line = line.strip() + if not line: + continue + # 查找"当"和下一个引号 + idx_when = line.find('当"') + if idx_when == -1: + continue + idx_quote1 = idx_when + 1 + idx_quote2 = line.find('"', idx_quote1 + 1) + if idx_quote2 == -1: + continue + situation = line[idx_quote1 + 1 : idx_quote2] + # 查找"使用" + idx_use = line.find('使用"', idx_quote2) + if idx_use == -1: + continue + idx_quote3 = idx_use + 2 + idx_quote4 = line.find('"', idx_quote3 + 1) + if idx_quote4 == -1: + continue + style = line[idx_quote3 + 1 : idx_quote4] + expressions.append((chat_id, situation, style)) + return expressions + + async def extract_and_store_personality_expressions(self): + """ + 检查data/expression/personality目录,不存在则创建。 + 用peronality变量作为chat_str,调用LLM生成表达风格,解析后count=100,存储到expressions.json。 + """ + dir_path = os.path.join("data", "expression", "personality") + os.makedirs(dir_path, exist_ok=True) + file_path = os.path.join(dir_path, "expressions.json") + + # 构建prompt + prompt = await global_prompt_manager.format_prompt( + "personality_expression_prompt", + personality=global_config.expression_style, + ) + logger.info(f"个性表达方式提取prompt: {prompt}") + + try: + response, _ = await self.express_learn_model.generate_response_async(prompt) + except Exception as e: + logger.error(f"个性表达方式提取失败: {e}") + return + + logger.info(f"个性表达方式提取response: {response}") + # chat_id用personality + expressions = self.parse_expression_response(response, "personality") + # 转为dict并count=100 + result = [] + for _, situation, style in expressions: + result.append({"situation": situation, "style": style, "count": 100}) + # 超过50条时随机删除多余的,只保留50条 + if len(result) > 50: + remove_count = len(result) - 50 + remove_indices = set(random.sample(range(len(result)), remove_count)) + result = [item for idx, item in enumerate(result) if idx not in remove_indices] + with open(file_path, "w", encoding="utf-8") as f: + json.dump(result, f, ensure_ascii=False, indent=2) + logger.info(f"已写入{len(result)}条表达到{file_path}") + + +init_prompt() + +expression_learner = ExpressionLearner() diff --git a/src/chat/focus_chat/heartFC_Cycleinfo.py b/src/chat/focus_chat/heartFC_Cycleinfo.py new file mode 100644 index 000000000..80864e83c --- /dev/null +++ b/src/chat/focus_chat/heartFC_Cycleinfo.py @@ -0,0 +1,307 @@ +import time +import os +import json +from typing import List, Optional, Dict, Any + + +class CycleDetail: + """循环信息记录类""" + + def __init__(self, cycle_id: int): + self.cycle_id = cycle_id + self.start_time = time.time() + self.end_time: Optional[float] = None + self.action_taken = False + self.action_type = "unknown" + self.reasoning = "" + self.timers: Dict[str, float] = {} + self.thinking_id = "" + self.replanned = False + + # 添加响应信息相关字段 + self.response_info: Dict[str, Any] = { + "response_text": [], # 回复的文本列表 + "emoji_info": "", # 表情信息 + "anchor_message_id": "", # 锚点消息ID + "reply_message_ids": [], # 回复消息ID列表 + "sub_mind_thinking": "", # 子思维思考内容 + "in_mind_reply": [], # 子思维思考内容 + } + + # 添加SubMind相关信息 + self.submind_info: Dict[str, Any] = { + "prompt": "", # SubMind输入的prompt + "structured_info": "", # 结构化信息 + "result": "", # SubMind的思考结果 + } + + # 添加ToolUse相关信息 + self.tooluse_info: Dict[str, Any] = { + "prompt": "", # 工具使用的prompt + "tools_used": [], # 使用了哪些工具 + "tool_results": [], # 工具获得的信息 + } + + # 添加Planner相关信息 + self.planner_info: Dict[str, Any] = { + "prompt": "", # 规划器的prompt + "response": "", # 规划器的原始回复 + "parsed_result": {}, # 解析后的结果 + } + + def to_dict(self) -> Dict[str, Any]: + """将循环信息转换为字典格式""" + return { + "cycle_id": self.cycle_id, + "start_time": self.start_time, + "end_time": self.end_time, + "action_taken": self.action_taken, + "action_type": self.action_type, + "reasoning": self.reasoning, + "timers": self.timers, + "thinking_id": self.thinking_id, + "response_info": self.response_info, + "submind_info": self.submind_info, + "tooluse_info": self.tooluse_info, + "planner_info": self.planner_info, + } + + def complete_cycle(self): + """完成循环,记录结束时间""" + self.end_time = time.time() + + def set_action_info( + self, action_type: str, reasoning: str, action_taken: bool, action_data: Optional[Dict[str, Any]] = None + ): + """设置动作信息""" + self.action_type = action_type + self.action_data = action_data + self.reasoning = reasoning + self.action_taken = action_taken + + def set_thinking_id(self, thinking_id: str): + """设置思考消息ID""" + self.thinking_id = thinking_id + + def set_response_info( + self, + response_text: Optional[List[str]] = None, + emoji_info: Optional[str] = None, + anchor_message_id: Optional[str] = None, + reply_message_ids: Optional[List[str]] = None, + sub_mind_thinking: Optional[str] = None, + ): + """设置响应信息""" + if response_text is not None: + self.response_info["response_text"] = response_text + if emoji_info is not None: + self.response_info["emoji_info"] = emoji_info + if anchor_message_id is not None: + self.response_info["anchor_message_id"] = anchor_message_id + if reply_message_ids is not None: + self.response_info["reply_message_ids"] = reply_message_ids + if sub_mind_thinking is not None: + self.response_info["sub_mind_thinking"] = sub_mind_thinking + + def set_submind_info( + self, + prompt: Optional[str] = None, + structured_info: Optional[str] = None, + result: Optional[str] = None, + ): + """设置SubMind信息""" + if prompt is not None: + self.submind_info["prompt"] = prompt + if structured_info is not None: + self.submind_info["structured_info"] = structured_info + if result is not None: + self.submind_info["result"] = result + + def set_tooluse_info( + self, + prompt: Optional[str] = None, + tools_used: Optional[List[str]] = None, + tool_results: Optional[List[Dict[str, Any]]] = None, + ): + """设置ToolUse信息""" + if prompt is not None: + self.tooluse_info["prompt"] = prompt + if tools_used is not None: + self.tooluse_info["tools_used"] = tools_used + if tool_results is not None: + self.tooluse_info["tool_results"] = tool_results + + def set_planner_info( + self, + prompt: Optional[str] = None, + response: Optional[str] = None, + parsed_result: Optional[Dict[str, Any]] = None, + ): + """设置Planner信息""" + if prompt is not None: + self.planner_info["prompt"] = prompt + if response is not None: + self.planner_info["response"] = response + if parsed_result is not None: + self.planner_info["parsed_result"] = parsed_result + + @staticmethod + def save_to_file(cycle_info: "CycleDetail", stream_id: str, base_dir: str = "log_debug") -> str: + """ + 将CycleInfo保存到文件 + + 参数: + cycle_info: CycleInfo对象 + stream_id: 聊天流ID + base_dir: 基础目录,默认为log_debug + + 返回: + str: 保存的文件路径 + """ + try: + # 创建目录结构 + stream_dir = os.path.join(base_dir, stream_id) + os.makedirs(stream_dir, exist_ok=True) + + # 生成文件名和路径 + timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime(cycle_info.start_time)) + filename = f"cycle_{cycle_info.cycle_id}_{timestamp}.txt" + filepath = os.path.join(stream_dir, filename) + + # 格式化输出成易读的格式 + with open(filepath, "w", encoding="utf-8") as f: + # 写入基本信息 + f.write(f"循环ID: {cycle_info.cycle_id}\n") + f.write(f"开始时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cycle_info.start_time))}\n") + if cycle_info.end_time: + f.write(f"结束时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cycle_info.end_time))}\n") + duration = cycle_info.end_time - cycle_info.start_time + f.write(f"耗时: {duration:.2f}秒\n") + f.write(f"动作: {cycle_info.action_type}\n") + f.write(f"原因: {cycle_info.reasoning}\n") + f.write(f"执行状态: {'已执行' if cycle_info.action_taken else '未执行'}\n") + f.write(f"思考ID: {cycle_info.thinking_id}\n") + f.write(f"是否为重新规划: {'是' if cycle_info.replanned else '否'}\n\n") + + # 写入计时器信息 + if cycle_info.timers: + f.write("== 计时器信息 ==\n") + for name, elapsed in cycle_info.timers.items(): + formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" + f.write(f"{name}: {formatted_time}\n") + f.write("\n") + + # 写入响应信息 + f.write("== 响应信息 ==\n") + f.write(f"锚点消息ID: {cycle_info.response_info['anchor_message_id']}\n") + if cycle_info.response_info["response_text"]: + f.write("回复文本:\n") + for i, text in enumerate(cycle_info.response_info["response_text"]): + f.write(f" [{i + 1}] {text}\n") + if cycle_info.response_info["emoji_info"]: + f.write(f"表情信息: {cycle_info.response_info['emoji_info']}\n") + if cycle_info.response_info["reply_message_ids"]: + f.write(f"回复消息ID: {', '.join(cycle_info.response_info['reply_message_ids'])}\n") + f.write("\n") + + # 写入SubMind信息 + f.write("== SubMind信息 ==\n") + f.write(f"结构化信息:\n{cycle_info.submind_info['structured_info']}\n\n") + f.write(f"思考结果:\n{cycle_info.submind_info['result']}\n\n") + f.write("SubMind Prompt:\n") + f.write(f"{cycle_info.submind_info['prompt']}\n\n") + + # 写入ToolUse信息 + f.write("== 工具使用信息 ==\n") + if cycle_info.tooluse_info["tools_used"]: + f.write(f"使用的工具: {', '.join(cycle_info.tooluse_info['tools_used'])}\n") + else: + f.write("未使用工具\n") + + if cycle_info.tooluse_info["tool_results"]: + f.write("工具结果:\n") + for i, result in enumerate(cycle_info.tooluse_info["tool_results"]): + f.write(f" [{i + 1}] 类型: {result.get('type', '未知')}, 内容: {result.get('content', '')}\n") + f.write("\n") + f.write("工具执行 Prompt:\n") + f.write(f"{cycle_info.tooluse_info['prompt']}\n\n") + + # 写入Planner信息 + f.write("== Planner信息 ==\n") + f.write("Planner Prompt:\n") + f.write(f"{cycle_info.planner_info['prompt']}\n\n") + f.write("原始回复:\n") + f.write(f"{cycle_info.planner_info['response']}\n\n") + f.write("解析结果:\n") + f.write(f"{json.dumps(cycle_info.planner_info['parsed_result'], ensure_ascii=False, indent=2)}\n") + + return filepath + except Exception as e: + print(f"保存CycleInfo到文件时出错: {e}") + return "" + + @staticmethod + def load_from_file(filepath: str) -> Optional[Dict[str, Any]]: + """ + 从文件加载CycleInfo信息(只加载JSON格式的数据,不解析文本格式) + + 参数: + filepath: 文件路径 + + 返回: + Optional[Dict[str, Any]]: 加载的CycleInfo数据,失败则返回None + """ + try: + if not os.path.exists(filepath): + print(f"文件不存在: {filepath}") + return None + + # 尝试从文件末尾读取JSON数据 + with open(filepath, "r", encoding="utf-8") as f: + lines = f.readlines() + + # 查找"解析结果:"后的JSON数据 + for i, line in enumerate(lines): + if "解析结果:" in line and i + 1 < len(lines): + # 尝试解析后面的行 + json_data = "" + for j in range(i + 1, len(lines)): + json_data += lines[j] + + try: + return json.loads(json_data) + except json.JSONDecodeError: + continue + + # 如果没有找到JSON数据,则返回None + return None + except Exception as e: + print(f"从文件加载CycleInfo时出错: {e}") + return None + + @staticmethod + def list_cycles(stream_id: str, base_dir: str = "log_debug") -> List[str]: + """ + 列出指定stream_id的所有循环文件 + + 参数: + stream_id: 聊天流ID + base_dir: 基础目录,默认为log_debug + + 返回: + List[str]: 文件路径列表 + """ + try: + stream_dir = os.path.join(base_dir, stream_id) + if not os.path.exists(stream_dir): + return [] + + files = [ + os.path.join(stream_dir, f) + for f in os.listdir(stream_dir) + if f.startswith("cycle_") and f.endswith(".txt") + ] + return sorted(files) + except Exception as e: + print(f"列出循环文件时出错: {e}") + return [] diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py new file mode 100644 index 000000000..001754b0e --- /dev/null +++ b/src/chat/focus_chat/heartFC_chat.py @@ -0,0 +1,981 @@ +import asyncio +import contextlib +import json # <--- 确保导入 json +import random # <--- 添加导入 +import time +import traceback +from collections import deque +from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.message_receive.chat_stream import chat_manager +from rich.traceback import install +from src.common.logger_manager import get_logger +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.chat.utils.timer_calculator import Timer +from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder +from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.focus_chat.info.info_base import InfoBase +from src.chat.focus_chat.info.obs_info import ObsInfo +from src.chat.focus_chat.info.cycle_info import CycleInfo +from src.chat.focus_chat.info.mind_info import MindInfo +from src.chat.focus_chat.info.structured_info import StructuredInfo +from src.chat.focus_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor +from src.chat.focus_chat.info_processors.mind_processor import MindProcessor +from src.chat.heart_flow.observation.memory_observation import MemoryObservation +from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation +from src.chat.heart_flow.observation.working_observation import WorkingObservation +from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor +from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor +from src.chat.focus_chat.hfc_utils import create_empty_anchor_message, parse_thinking_id_to_timestamp +from src.chat.focus_chat.memory_activator import MemoryActivator + +install(extra_lines=3) + + +WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒 + +EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发 + +CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值 + +logger = get_logger("hfc") # Logger Name Changed + + +# 默认动作定义 +DEFAULT_ACTIONS = {"no_reply": "不操作,继续浏览", "reply": "表达想法,可以只包含文本、表情或两者都有"} + + +class ActionManager: + """动作管理器:控制每次决策可以使用的动作""" + + def __init__(self): + # 初始化为新的默认动作集 + self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy() + self._original_actions_backup: Optional[Dict[str, str]] = None + + def get_available_actions(self) -> Dict[str, str]: + """获取当前可用的动作集""" + return self._available_actions.copy() # 返回副本以防外部修改 + + def add_action(self, action_name: str, description: str) -> bool: + """ + 添加新的动作 + + 参数: + action_name: 动作名称 + description: 动作描述 + + 返回: + bool: 是否添加成功 + """ + if action_name in self._available_actions: + return False + self._available_actions[action_name] = description + return True + + def remove_action(self, action_name: str) -> bool: + """ + 移除指定动作 + + 参数: + action_name: 动作名称 + + 返回: + bool: 是否移除成功 + """ + if action_name not in self._available_actions: + return False + del self._available_actions[action_name] + return True + + def temporarily_remove_actions(self, actions_to_remove: List[str]): + """ + 临时移除指定的动作,备份原始动作集。 + 如果已经有备份,则不重复备份。 + """ + if self._original_actions_backup is None: + self._original_actions_backup = self._available_actions.copy() + + actions_actually_removed = [] + for action_name in actions_to_remove: + if action_name in self._available_actions: + del self._available_actions[action_name] + actions_actually_removed.append(action_name) + # logger.debug(f"临时移除了动作: {actions_actually_removed}") # 可选日志 + + def restore_actions(self): + """ + 恢复之前备份的原始动作集。 + """ + if self._original_actions_backup is not None: + self._available_actions = self._original_actions_backup.copy() + self._original_actions_backup = None + # logger.debug("恢复了原始动作集") # 可选日志 + + +async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str): + """处理循环延迟""" + cycle_duration = time.monotonic() - cycle_start_time + + try: + sleep_duration = 0.0 + if not action_taken_this_cycle and cycle_duration < 1: + sleep_duration = 1 - cycle_duration + elif cycle_duration < 0.2: + sleep_duration = 0.2 + + if sleep_duration > 0: + await asyncio.sleep(sleep_duration) + + except asyncio.CancelledError: + logger.info(f"{log_prefix} Sleep interrupted, loop likely cancelling.") + raise + + +class HeartFChatting: + """ + 管理一个连续的Plan-Replier-Sender循环 + 用于在特定聊天流中生成回复。 + 其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。 + """ + + def __init__( + self, + chat_id: str, + observations: list[Observation], + on_consecutive_no_reply_callback: Callable[[], Coroutine[None, None, None]], + ): + """ + HeartFChatting 初始化函数 + + 参数: + chat_id: 聊天流唯一标识符(如stream_id) + observations: 关联的观察列表 + on_consecutive_no_reply_callback: 连续不回复达到阈值时调用的异步回调函数 + """ + # 基础属性 + self.stream_id: str = chat_id # 聊天流ID + self.chat_stream: Optional[ChatStream] = None # 关联的聊天流 + self.observations: List[Observation] = observations # 关联的观察列表,用于监控聊天流状态 + self.on_consecutive_no_reply_callback = on_consecutive_no_reply_callback + + self.chatting_info_processor = ChattingInfoProcessor() + self.mind_processor = MindProcessor(subheartflow_id=self.stream_id) + + self.memory_observation = MemoryObservation(observe_id=self.stream_id) + self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id) + self.tool_processor = ToolProcessor(subheartflow_id=self.stream_id) + self.working_observation = WorkingObservation(observe_id=self.stream_id) + self.memory_activator = MemoryActivator() + + # 日志前缀 + self.log_prefix: str = str(chat_id) # Initial default, will be updated + + # --- Initialize attributes (defaults) --- + self.is_group_chat: bool = False + self.chat_target_info: Optional[dict] = None + # --- End Initialization --- + self.expressor = DefaultExpressor(chat_id=self.stream_id) + + # 动作管理器 + self.action_manager = ActionManager() + + # 初始化状态控制 + self._initialized = False + self._processing_lock = asyncio.Lock() + + # LLM规划器配置 + self.planner_llm = LLMRequest( + model=global_config.llm_plan, + max_tokens=1000, + request_type="action_planning", # 用于动作规划 + ) + + # 循环控制内部状态 + self._loop_active: bool = False # 循环是否正在运行 + self._loop_task: Optional[asyncio.Task] = None # 主循环任务 + + # 添加循环信息管理相关的属性 + self._cycle_counter = 0 + self._cycle_history: Deque[CycleDetail] = deque(maxlen=10) # 保留最近10个循环的信息 + self._current_cycle: Optional[CycleDetail] = None + self.total_no_reply_count: int = 0 # <--- 新增:连续不回复计数器 + self._shutting_down: bool = False # <--- 新增:关闭标志位 + self.total_waiting_time: float = 0.0 # <--- 新增:累计等待时间 + + async def _initialize(self) -> bool: + """ + 执行懒初始化操作 + + 功能: + 1. 获取聊天类型(群聊/私聊)和目标信息 + 2. 获取聊天流对象 + 3. 设置日志前缀 + + 返回: + bool: 初始化是否成功 + + 注意: + - 如果已经初始化过会直接返回True + - 需要获取chat_stream对象才能继续后续操作 + """ + # 如果已经初始化过,直接返回成功 + if self._initialized: + return True + + try: + self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.stream_id) + await self.expressor.initialize() + self.chat_stream = await asyncio.to_thread(chat_manager.get_stream, self.stream_id) + self.expressor.chat_stream = self.chat_stream + self.log_prefix = f"[{chat_manager.get_stream_name(self.stream_id) or self.stream_id}]" + except Exception as e: + logger.error(f"[HFC:{self.stream_id}] 初始化HFC时发生错误: {e}") + return False + + # 标记初始化完成 + self._initialized = True + logger.debug(f"{self.log_prefix} 初始化完成,准备开始处理消息") + return True + + async def start(self): + """ + 启动 HeartFChatting 的主循环。 + 注意:调用此方法前必须确保已经成功初始化。 + """ + logger.info(f"{self.log_prefix} 开始认真水群(HFC)...") + await self._start_loop_if_needed() + + async def _start_loop_if_needed(self): + """检查是否需要启动主循环,如果未激活则启动。""" + # 如果循环已经激活,直接返回 + if self._loop_active: + return + + # 标记为活动状态,防止重复启动 + self._loop_active = True + + # 检查是否已有任务在运行(理论上不应该,因为 _loop_active=False) + if self._loop_task and not self._loop_task.done(): + logger.warning(f"{self.log_prefix} 发现之前的循环任务仍在运行(不符合预期)。取消旧任务。") + self._loop_task.cancel() + try: + # 等待旧任务确实被取消 + await asyncio.wait_for(self._loop_task, timeout=0.5) + except (asyncio.CancelledError, asyncio.TimeoutError): + pass # 忽略取消或超时错误 + self._loop_task = None # 清理旧任务引用 + + logger.debug(f"{self.log_prefix} 启动认真水群(HFC)主循环...") + # 创建新的循环任务 + self._loop_task = asyncio.create_task(self._hfc_loop()) + # 添加完成回调 + self._loop_task.add_done_callback(self._handle_loop_completion) + + def _handle_loop_completion(self, task: asyncio.Task): + """当 _hfc_loop 任务完成时执行的回调。""" + try: + exception = task.exception() + if exception: + logger.error(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天(异常): {exception}") + logger.error(traceback.format_exc()) # Log full traceback for exceptions + else: + # Loop completing normally now means it was cancelled/shutdown externally + logger.info(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天 (外部停止)") + except asyncio.CancelledError: + logger.info(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天(任务取消)") + finally: + self._loop_active = False + self._loop_task = None + if self._processing_lock.locked(): + logger.warning(f"{self.log_prefix} HeartFChatting: 处理锁在循环结束时仍被锁定,强制释放。") + self._processing_lock.release() + + async def _hfc_loop(self): + """主循环,持续进行计划并可能回复消息,直到被外部取消。""" + try: + while True: # 主循环 + logger.debug(f"{self.log_prefix} 开始第{self._cycle_counter}次循环") + # --- 在循环开始处检查关闭标志 --- + if self._shutting_down: + logger.info(f"{self.log_prefix} 检测到关闭标志,退出 HFC 循环。") + break + # -------------------------------- + + # 创建新的循环信息 + self._cycle_counter += 1 + self._current_cycle = CycleDetail(self._cycle_counter) + + # 初始化周期状态 + cycle_timers = {} + loop_cycle_start_time = time.monotonic() + + # 执行规划和处理阶段 + async with self._get_cycle_context() as acquired_lock: + if not acquired_lock: + # 如果未能获取锁(理论上不太可能,除非 shutdown 过程中释放了但又被抢了?) + # 或者也可以在这里再次检查 self._shutting_down + if self._shutting_down: + break # 再次检查,确保退出 + logger.warning(f"{self.log_prefix} 未能获取循环处理锁,跳过本次循环。") + await asyncio.sleep(0.1) # 短暂等待避免空转 + continue + + # thinking_id 是思考过程的ID,用于标记每一轮思考 + thinking_id = "tid" + str(round(time.time(), 2)) + + # 主循环:思考->决策->执行 + + action_taken = await self._think_plan_execute_loop(cycle_timers, thinking_id) + + # 更新循环信息 + self._current_cycle.set_thinking_id(thinking_id) + self._current_cycle.timers = cycle_timers + + # 防止循环过快消耗资源 + await _handle_cycle_delay(action_taken, loop_cycle_start_time, self.log_prefix) + + # 完成当前循环并保存历史 + self._current_cycle.complete_cycle() + self._cycle_history.append(self._current_cycle) + + # 保存CycleInfo到文件 + try: + filepath = CycleDetail.save_to_file(self._current_cycle, self.stream_id) + logger.info(f"{self.log_prefix} 已保存循环信息到文件: {filepath}") + except Exception as e: + logger.error(f"{self.log_prefix} 保存循环信息到文件时出错: {e}") + + # 记录循环信息和计时器结果 + timer_strings = [] + for name, elapsed in cycle_timers.items(): + formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" + timer_strings.append(f"{name}: {formatted_time}") + + logger.debug( + f"{self.log_prefix} 第 #{self._current_cycle.cycle_id}次思考完成," + f"耗时: {self._current_cycle.end_time - self._current_cycle.start_time:.2f}秒, " + f"动作: {self._current_cycle.action_type}" + + (f"\n计时器详情: {'; '.join(timer_strings)}" if timer_strings else "") + ) + + except asyncio.CancelledError: + # 设置了关闭标志位后被取消是正常流程 + if not self._shutting_down: + logger.warning(f"{self.log_prefix} HeartFChatting: 麦麦的认真水群(HFC)循环意外被取消") + else: + logger.info(f"{self.log_prefix} HeartFChatting: 麦麦的认真水群(HFC)循环已取消 (正常关闭)") + except Exception as e: + logger.error(f"{self.log_prefix} HeartFChatting: 意外错误: {e}") + logger.error(traceback.format_exc()) + + @contextlib.asynccontextmanager + async def _get_cycle_context(self): + """ + 循环周期的上下文管理器 + + 用于确保资源的正确获取和释放: + 1. 获取处理锁 + 2. 执行操作 + 3. 释放锁 + """ + acquired = False + try: + await self._processing_lock.acquire() + acquired = True + yield acquired + finally: + if acquired and self._processing_lock.locked(): + self._processing_lock.release() + + async def _think_plan_execute_loop(self, cycle_timers: dict, thinking_id: str) -> tuple[bool, str]: + try: + with Timer("观察", cycle_timers): + await self.observations[0].observe() + await self.memory_observation.observe() + await self.working_observation.observe() + await self.hfcloop_observation.observe() + observations: List[Observation] = [] + observations.append(self.observations[0]) + observations.append(self.memory_observation) + observations.append(self.working_observation) + observations.append(self.hfcloop_observation) + + for observation in observations: + logger.debug(f"{self.log_prefix} 观察信息: {observation}") + + with Timer("回忆", cycle_timers): + running_memorys = await self.memory_activator.activate_memory(observations) + + # 记录并行任务开始时间 + parallel_start_time = time.time() + logger.debug(f"{self.log_prefix} 开始信息处理器并行任务") + + # 并行执行两个任务:思考和工具执行 + with Timer("执行 信息处理器", cycle_timers): + # 1. 子思维思考 - 不执行工具调用 + think_task = asyncio.create_task( + self.mind_processor.process_info(observations=observations, running_memorys=running_memorys) + ) + logger.debug(f"{self.log_prefix} 启动子思维思考任务") + + # 2. 工具执行器 - 专门处理工具调用 + tool_task = asyncio.create_task( + self.tool_processor.process_info(observations=observations, running_memorys=running_memorys) + ) + logger.debug(f"{self.log_prefix} 启动工具执行任务") + + # 3. 聊天信息处理器 + chatting_info_task = asyncio.create_task( + self.chatting_info_processor.process_info( + observations=observations, running_memorys=running_memorys + ) + ) + logger.debug(f"{self.log_prefix} 启动聊天信息处理器任务") + + # 创建任务完成状态追踪 + tasks = {"思考任务": think_task, "工具任务": tool_task, "聊天信息处理任务": chatting_info_task} + pending = set(tasks.values()) + + # 等待所有任务完成,同时追踪每个任务的完成情况 + results: dict[str, list[InfoBase]] = {} + while pending: + # 等待任务完成 + done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED, timeout=1.0) + + # 记录完成的任务 + for task in done: + for name, t in tasks.items(): + if task == t: + task_end_time = time.time() + task_duration = task_end_time - parallel_start_time + logger.info(f"{self.log_prefix} {name}已完成,耗时: {task_duration:.2f}秒") + results[name] = task.result() + break + + # 如果仍有未完成任务,记录进行中状态 + if pending: + current_time = time.time() + elapsed = current_time - parallel_start_time + pending_names = [name for name, t in tasks.items() if t in pending] + logger.info( + f"{self.log_prefix} 并行处理已进行{elapsed:.2f}秒,待完成任务: {', '.join(pending_names)}" + ) + + # 所有任务完成,从结果中提取数据 + mind_processed_infos = results.get("思考任务", []) + tool_processed_infos = results.get("工具任务", []) + chatting_info_processed_infos = results.get("聊天信息处理任务", []) + + # 记录总耗时 + parallel_end_time = time.time() + total_duration = parallel_end_time - parallel_start_time + logger.info(f"{self.log_prefix} 思考和工具并行任务全部完成,总耗时: {total_duration:.2f}秒") + + all_plan_info = mind_processed_infos + tool_processed_infos + chatting_info_processed_infos + + logger.debug(f"{self.log_prefix} 所有信息处理器处理后的信息: {all_plan_info}") + # 串行执行规划器 - 使用刚获取的思考结果 + logger.debug(f"{self.log_prefix} 开始 规划器") + with Timer("规划器", cycle_timers): + planner_result = await self._planner(all_plan_info, cycle_timers) + + action = planner_result.get("action", "error") + action_data = planner_result.get("action_data", {}) # 新增获取动作数据 + reasoning = planner_result.get("reasoning", "未提供理由") + + logger.debug(f"{self.log_prefix} 动作和动作信息: {action}, {action_data}, {reasoning}") + + # 更新循环信息 + self._current_cycle.set_action_info( + action_type=action, + action_data=action_data, + reasoning=reasoning, + action_taken=True, + ) + + # 处理LLM错误 + if planner_result.get("llm_error"): + logger.error(f"{self.log_prefix} LLM失败: {reasoning}") + return False, "" + + # 在此处添加日志记录 + if action == "reply": + action_str = "回复" + elif action == "no_reply": + action_str = "不回复" + else: + action_str = "位置动作" + + logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'") + + self.hfcloop_observation.add_loop_info(self._current_cycle) + + return await self._handle_action(action, reasoning, action_data, cycle_timers, thinking_id) + + except Exception as e: + logger.error(f"{self.log_prefix} 并行+串行处理失败: {e}") + logger.error(traceback.format_exc()) + return False, "" + + async def _handle_action( + self, + action: str, + reasoning: str, + action_data: dict, + cycle_timers: dict, + thinking_id: str, + ) -> tuple[bool, str]: + """ + 处理规划动作 + + 参数: + action: 动作类型 + reasoning: 决策理由 + action_data: 动作数据,包含不同动作需要的参数 + cycle_timers: 计时器字典 + planner_start_db_time: 规划开始时间 + + 返回: + tuple[bool, str]: (是否执行了动作, 思考消息ID) + """ + action_handlers = { + "reply": self._handle_reply, + "no_reply": self._handle_no_reply, + } + + handler = action_handlers.get(action) + if not handler: + logger.warning(f"{self.log_prefix} 未知动作: {action}, 原因: {reasoning}") + return False, "" + + try: + if action == "reply": + return await handler(reasoning, action_data, cycle_timers, thinking_id) + else: # no_reply + return await handler(reasoning, cycle_timers, thinking_id) + except Exception as e: + logger.error(f"{self.log_prefix} 处理{action}时出错: {e}") + traceback.print_exc() + return False, "" + + async def _handle_no_reply(self, reasoning: str, cycle_timers: dict, thinking_id: str) -> bool: + """ + 处理不回复的情况 + + 工作流程: + 1. 等待新消息、超时或关闭信号 + 2. 根据等待结果更新连续不回复计数 + 3. 如果达到阈值,触发回调 + + 参数: + reasoning: 不回复的原因 + planner_start_db_time: 规划开始时间 + cycle_timers: 计时器字典 + + 返回: + bool: 是否成功处理 + """ + logger.info(f"{self.log_prefix} 决定不回复: {reasoning}") + + observation = self.observations[0] if self.observations else None + + try: + with Timer("等待新消息", cycle_timers): + # 等待新消息、超时或关闭信号,并获取结果 + await self._wait_for_new_message(observation, thinking_id, self.log_prefix) + # 从计时器获取实际等待时间 + current_waiting = cycle_timers.get("等待新消息", 0.0) + + if not self._shutting_down: + self.total_no_reply_count += 1 + self.total_waiting_time += current_waiting # 累加等待时间 + logger.debug( + f"{self.log_prefix} 连续不回复计数增加: {self.total_no_reply_count}/{CONSECUTIVE_NO_REPLY_THRESHOLD}, " + f"本次等待: {current_waiting:.2f}秒, 累计等待: {self.total_waiting_time:.2f}秒" + ) + + # 检查是否同时达到次数和时间阈值 + time_threshold = 0.66 * WAITING_TIME_THRESHOLD * CONSECUTIVE_NO_REPLY_THRESHOLD + if ( + self.total_no_reply_count >= CONSECUTIVE_NO_REPLY_THRESHOLD + and self.total_waiting_time >= time_threshold + ): + logger.info( + f"{self.log_prefix} 连续不回复达到阈值 ({self.total_no_reply_count}次) " + f"且累计等待时间达到 {self.total_waiting_time:.2f}秒 (阈值 {time_threshold}秒)," + f"调用回调请求状态转换" + ) + # 调用回调。注意:这里不重置计数器和时间,依赖回调函数成功改变状态来隐式重置上下文。 + await self.on_consecutive_no_reply_callback() + elif self.total_no_reply_count >= CONSECUTIVE_NO_REPLY_THRESHOLD: + # 仅次数达到阈值,但时间未达到 + logger.debug( + f"{self.log_prefix} 连续不回复次数达到阈值 ({self.total_no_reply_count}次) " + f"但累计等待时间 {self.total_waiting_time:.2f}秒 未达到时间阈值 ({time_threshold}秒),暂不调用回调" + ) + # else: 次数和时间都未达到阈值,不做处理 + + return True, thinking_id + + except asyncio.CancelledError: + logger.info(f"{self.log_prefix} 处理 'no_reply' 时等待被中断 (CancelledError)") + raise + except Exception as e: # 捕获调用管理器或其他地方可能发生的错误 + logger.error(f"{self.log_prefix} 处理 'no_reply' 时发生错误: {e}") + logger.error(traceback.format_exc()) + return False, thinking_id + + async def _wait_for_new_message(self, observation: ChattingObservation, thinking_id: str, log_prefix: str) -> bool: + """ + 等待新消息 或 检测到关闭信号 + + 参数: + observation: 观察实例 + planner_start_db_time: 开始等待的时间 + log_prefix: 日志前缀 + + 返回: + bool: 是否检测到新消息 (如果因关闭信号退出则返回 False) + """ + wait_start_time = time.monotonic() + while True: + # --- 在每次循环开始时检查关闭标志 --- + if self._shutting_down: + logger.info(f"{log_prefix} 等待新消息时检测到关闭信号,中断等待。") + return False # 表示因为关闭而退出 + # ----------------------------------- + + thinking_id_timestamp = parse_thinking_id_to_timestamp(thinking_id) + + # 检查新消息 + if await observation.has_new_messages_since(thinking_id_timestamp): + logger.info(f"{log_prefix} 检测到新消息") + return True + + # 检查超时 (放在检查新消息和关闭之后) + if time.monotonic() - wait_start_time > WAITING_TIME_THRESHOLD: + logger.warning(f"{log_prefix} 等待新消息超时({WAITING_TIME_THRESHOLD}秒)") + return False + + try: + # 短暂休眠,让其他任务有机会运行,并能更快响应取消或关闭 + await asyncio.sleep(0.5) # 缩短休眠时间 + except asyncio.CancelledError: + # 如果在休眠时被取消,再次检查关闭标志 + # 如果是正常关闭,则不需要警告 + if not self._shutting_down: + logger.warning(f"{log_prefix} _wait_for_new_message 的休眠被意外取消") + # 无论如何,重新抛出异常,让上层处理 + raise + + async def shutdown(self): + """优雅关闭HeartFChatting实例,取消活动循环任务""" + logger.info(f"{self.log_prefix} 正在关闭HeartFChatting...") + self._shutting_down = True # <-- 在开始关闭时设置标志位 + + # 取消循环任务 + if self._loop_task and not self._loop_task.done(): + logger.info(f"{self.log_prefix} 正在取消HeartFChatting循环任务") + self._loop_task.cancel() + try: + await asyncio.wait_for(self._loop_task, timeout=1.0) + logger.info(f"{self.log_prefix} HeartFChatting循环任务已取消") + except (asyncio.CancelledError, asyncio.TimeoutError): + pass + except Exception as e: + logger.error(f"{self.log_prefix} 取消循环任务出错: {e}") + else: + logger.info(f"{self.log_prefix} 没有活动的HeartFChatting循环任务") + + # 清理状态 + self._loop_active = False + self._loop_task = None + if self._processing_lock.locked(): + self._processing_lock.release() + logger.warning(f"{self.log_prefix} 已释放处理锁") + + logger.info(f"{self.log_prefix} HeartFChatting关闭完成") + + def get_cycle_history(self, last_n: Optional[int] = None) -> List[Dict[str, Any]]: + """获取循环历史记录 + + 参数: + last_n: 获取最近n个循环的信息,如果为None则获取所有历史记录 + + 返回: + List[Dict[str, Any]]: 循环历史记录列表 + """ + history = list(self._cycle_history) + if last_n is not None: + history = history[-last_n:] + return [cycle.to_dict() for cycle in history] + + async def _planner(self, all_plan_info: List[InfoBase], cycle_timers: dict) -> Dict[str, Any]: + """ + 规划器 (Planner): 使用LLM根据上下文决定是否和如何回复。 + 重构为:让LLM返回结构化JSON文本,然后在代码中解析。 + + 参数: + current_mind: 子思维的当前思考结果 + cycle_timers: 计时器字典 + is_re_planned: 是否为重新规划 (此重构中暂时简化,不处理 is_re_planned 的特殊逻辑) + """ + logger.info(f"{self.log_prefix}开始 规划") + + actions_to_remove_temporarily = [] + # --- 检查历史动作并决定临时移除动作 (逻辑保持不变) --- + lian_xu_wen_ben_hui_fu = 0 + probability_roll = random.random() + for cycle in reversed(self._cycle_history): + if cycle.action_taken: + if cycle.action_type == "text_reply": + lian_xu_wen_ben_hui_fu += 1 + else: + break + if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + ( + len(self._cycle_history) - 4 + ): + break + logger.debug(f"{self.log_prefix}[Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}") + + if lian_xu_wen_ben_hui_fu >= 3: + logger.info(f"{self.log_prefix}[Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply") + actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) + elif lian_xu_wen_ben_hui_fu == 2: + if probability_roll < 0.8: + logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (触发)") + actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) + else: + logger.info( + f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)" + ) + elif lian_xu_wen_ben_hui_fu == 1: + if probability_roll < 0.4: + logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (触发)") + actions_to_remove_temporarily.append("text_reply") + else: + logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (未触发)") + # --- 结束检查历史动作 --- + + # 获取观察信息 + for info in all_plan_info: + if isinstance(info, ObsInfo): + logger.debug(f"{self.log_prefix} 观察信息: {info}") + observed_messages = info.get_talking_message() + observed_messages_str = info.get_talking_message_str_truncate() + chat_type = info.get_chat_type() + if chat_type == "group": + is_group_chat = True + else: + is_group_chat = False + elif isinstance(info, MindInfo): + logger.debug(f"{self.log_prefix} 思维信息: {info}") + current_mind = info.get_current_mind() + elif isinstance(info, CycleInfo): + logger.debug(f"{self.log_prefix} 循环信息: {info}") + cycle_info = info.get_observe_info() + elif isinstance(info, StructuredInfo): + logger.debug(f"{self.log_prefix} 结构化信息: {info}") + structured_info = info.get_data() + + # --- 使用 LLM 进行决策 (JSON 输出模式) --- # + action = "no_reply" # 默认动作 + reasoning = "规划器初始化默认" + llm_error = False # LLM 请求或解析错误标志 + + # 获取我们将传递给 prompt 构建器和用于验证的当前可用动作 + current_available_actions = self.action_manager.get_available_actions() + + try: + # --- 应用临时动作移除 --- + if actions_to_remove_temporarily: + self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily) + # 更新 current_available_actions 以反映移除后的状态 + current_available_actions = self.action_manager.get_available_actions() + logger.debug( + f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(current_available_actions.keys())}" + ) + + # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- + prompt = await prompt_builder.build_planner_prompt( + is_group_chat=is_group_chat, # <-- Pass HFC state + chat_target_info=None, + observed_messages_str=observed_messages_str, # <-- Pass local variable + current_mind=current_mind, # <-- Pass argument + structured_info=structured_info, # <-- Pass SubMind info + current_available_actions=current_available_actions, # <-- Pass determined actions + cycle_info=cycle_info, # <-- Pass cycle info + ) + + # --- 调用 LLM (普通文本生成) --- + llm_content = None + try: + llm_content, _, _ = await self.planner_llm.generate_response(prompt=prompt) + logger.debug(f"{self.log_prefix}[Planner] LLM 原始 JSON 响应 (预期): {llm_content}") + except Exception as req_e: + logger.error(f"{self.log_prefix}[Planner] LLM 请求执行失败: {req_e}") + reasoning = f"LLM 请求失败: {req_e}" + llm_error = True + # 直接使用默认动作返回错误结果 + action = "no_reply" # 明确设置为默认值 + + # --- 解析 LLM 返回的 JSON (仅当 LLM 请求未出错时进行) --- + if not llm_error and llm_content: + try: + # 尝试去除可能的 markdown 代码块标记 + cleaned_content = ( + llm_content.strip().removeprefix("```json").removeprefix("```").removesuffix("```").strip() + ) + if not cleaned_content: + raise json.JSONDecodeError("Cleaned content is empty", cleaned_content, 0) + parsed_json = json.loads(cleaned_content) + + # 提取决策,提供默认值 + extracted_action = parsed_json.get("action", "no_reply") + extracted_reasoning = parsed_json.get("reasoning", "LLM未提供理由") + # extracted_emoji_query = parsed_json.get("emoji_query", "") + + # 新的reply格式 + if extracted_action == "reply": + action_data = { + "text": parsed_json.get("text", []), + "emojis": parsed_json.get("emojis", []), + "target": parsed_json.get("target", ""), + } + else: + action_data = {} # 其他动作可能不需要额外数据 + + # 验证动作是否在当前可用列表中 + # !! 使用调用 prompt 时实际可用的动作列表进行验证 + if extracted_action not in current_available_actions: + logger.warning( + f"{self.log_prefix}[Planner] LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" + ) + action = "no_reply" + reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}" + # 检查 no_reply 是否也恰好被移除了 (极端情况) + if "no_reply" not in current_available_actions: + logger.error( + f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。" + ) + action = "error" # 回退到错误状态 + reasoning = "无法执行任何有效动作,包括 no_reply" + llm_error = True # 标记为严重错误 + else: + llm_error = False # 视为逻辑修正而非 LLM 错误 + else: + # 动作有效且可用 + action = extracted_action + reasoning = extracted_reasoning + llm_error = False # 解析成功 + logger.debug( + f"{self.log_prefix}[要做什么]\nPrompt:\n{prompt}\n\n决策结果 (来自JSON): {action}, 理由: {reasoning}" + ) + logger.debug(f"{self.log_prefix}动作信息: '{action_data}'") + + except Exception as json_e: + logger.warning( + f"{self.log_prefix}[Planner] 解析LLM响应JSON失败: {json_e}. LLM原始输出: '{llm_content}'" + ) + reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_reply'." + action = "no_reply" # 解析失败则默认不回复 + llm_error = True # 标记解析错误 + elif not llm_error and not llm_content: + # LLM 请求成功但返回空内容 + logger.warning(f"{self.log_prefix}[Planner] LLM 返回了空内容。") + reasoning = "LLM 返回了空内容,使用默认动作 'no_reply'." + action = "no_reply" + llm_error = True # 标记为空响应错误 + + except Exception as outer_e: + logger.error(f"{self.log_prefix}[Planner] Planner 处理过程中发生意外错误: {outer_e}") + traceback.print_exc() + action = "error" # 发生未知错误,标记为 error 动作 + reasoning = f"Planner 内部处理错误: {outer_e}" + llm_error = True + finally: + # --- 确保动作恢复 --- + if self.action_manager._original_actions_backup is not None: + self.action_manager.restore_actions() + logger.debug( + f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}" + ) + + # --- 概率性忽略文本回复附带的表情 (逻辑保持不变) --- + emoji = action_data.get("emojis") + if action == "reply" and emoji: + logger.debug(f"{self.log_prefix}[Planner] 大模型建议文字回复带表情: '{emoji}'") + if random.random() > EMOJI_SEND_PRO: + logger.info(f"{self.log_prefix}但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji}'") + action_data["emojis"] = "" # 清空表情请求 + else: + logger.info(f"{self.log_prefix}好吧,加上表情 '{emoji}'") + # --- 结束概率性忽略 --- + + # 返回结果字典 + return { + "action": action, + "action_data": action_data, + "reasoning": reasoning, + "current_mind": current_mind, + "observed_messages": observed_messages, + "llm_error": llm_error, # 返回错误状态 + } + + async def _handle_reply( + self, reasoning: str, reply_data: dict, cycle_timers: dict, thinking_id: str + ) -> tuple[bool, str]: + """ + 处理统一的回复动作 - 可包含文本和表情,顺序任意 + + reply_data格式: + { + "text": "你好啊" # 文本内容列表(可选) + "target": "锚定消息", # 锚定消息的文本内容 + "emojis": "微笑" # 表情关键词列表(可选) + } + """ + # 重置连续不回复计数器 + self.total_no_reply_count = 0 + self.total_waiting_time = 0.0 + + # 从聊天观察获取锚定消息 + observations: ChattingObservation = self.observations[0] + anchor_message = observations.serch_message_by_text(reply_data["target"]) + + # 如果没有找到锚点消息,创建一个占位符 + if not anchor_message: + logger.info(f"{self.log_prefix} 未找到锚点消息,创建占位符") + anchor_message = await create_empty_anchor_message( + self.chat_stream.platform, self.chat_stream.group_info, self.chat_stream + ) + else: + anchor_message.update_chat_stream(self.chat_stream) + + success, reply_set = await self.expressor.deal_reply( + cycle_timers=cycle_timers, + action_data=reply_data, + anchor_message=anchor_message, + reasoning=reasoning, + thinking_id=thinking_id, + ) + + reply_text = "" + for reply in reply_set: + type = reply[0] + data = reply[1] + if type == "text": + reply_text += data + elif type == "emoji": + reply_text += data + + self._current_cycle.set_response_info( + response_text=reply_text, + ) + + return success, reply_text diff --git a/src/plugins/heartFC_chat/heartFC_sender.py b/src/chat/focus_chat/heartFC_sender.py similarity index 53% rename from src/plugins/heartFC_chat/heartFC_sender.py rename to src/chat/focus_chat/heartFC_sender.py index b193ae44a..846ad1fe8 100644 --- a/src/plugins/heartFC_chat/heartFC_sender.py +++ b/src/chat/focus_chat/heartFC_sender.py @@ -1,15 +1,13 @@ -# src/plugins/heartFC_chat/heartFC_sender.py -import asyncio # 重新导入 asyncio +import asyncio from typing import Dict, Optional # 重新导入类型 -from ..chat.message import MessageSending, MessageThinking # 只保留 MessageSending 和 MessageThinking - -# from ..message import global_api -from src.plugins.message.api import global_api -from ..storage.storage import MessageStorage -from ..chat.utils import truncate_message +from src.chat.message_receive.message import MessageSending, MessageThinking +from src.common.message.api import global_api +from src.chat.message_receive.storage import MessageStorage +from src.chat.utils.utils import truncate_message from src.common.logger_manager import get_logger -from src.plugins.chat.utils import calculate_typing_time +from src.chat.utils.utils import calculate_typing_time from rich.traceback import install +import traceback install(extra_lines=3) @@ -19,17 +17,16 @@ logger = get_logger("sender") async def send_message(message: MessageSending) -> None: """合并后的消息发送函数,包含WS发送和日志记录""" - message_preview = truncate_message(message.processed_plain_text) + message_preview = truncate_message(message.processed_plain_text, max_length=40) try: # 直接调用API发送消息 await global_api.send_message(message) - logger.success(f"发送消息 '{message_preview}' 成功") + logger.success(f"已将消息 '{message_preview}' 发往平台'{message.message_info.platform}'") except Exception as e: - logger.error(f"发送消息 '{message_preview}' 失败: {str(e)}") - if not message.message_info.platform: - raise ValueError(f"未找到平台:{message.message_info.platform} 的url配置,请检查配置文件") from e + logger.error(f"发送消息 '{message_preview}' 发往平台'{message.message_info.platform}' 失败: {str(e)}") + traceback.print_exc() raise e # 重新抛出其他异常 @@ -69,21 +66,24 @@ class HeartFCSender: del self.thinking_messages[chat_id] logger.debug(f"[{chat_id}] Removed empty thinking message container.") - def is_thinking(self, chat_id: str, message_id: str) -> bool: - """检查指定的消息 ID 是否当前正处于思考状态。""" - return chat_id in self.thinking_messages and message_id in self.thinking_messages[chat_id] - async def get_thinking_start_time(self, chat_id: str, message_id: str) -> Optional[float]: """获取已注册思考消息的开始时间。""" async with self._thinking_lock: thinking_message = self.thinking_messages.get(chat_id, {}).get(message_id) return thinking_message.thinking_start_time if thinking_message else None - async def type_and_send_message(self, message: MessageSending, typing=False): + async def send_message(self, message: MessageSending, has_thinking=False, typing=False): """ - 立即处理、发送并存储单个 MessageSending 消息。 - 调用此方法前,应先调用 register_thinking 注册对应的思考消息。 - 此方法执行后会调用 complete_thinking 清理思考状态。 + 处理、发送并存储一条消息。 + + 参数: + message: MessageSending 对象,待发送的消息。 + has_thinking: 是否管理思考状态,表情包无思考状态(如需调用 register_thinking/complete_thinking)。 + typing: 是否模拟打字等待(根据 has_thinking 控制等待时长)。 + + 用法: + - has_thinking=True 时,自动处理思考消息的时间和清理。 + - typing=True 时,发送前会有打字等待。 """ if not message.chat_stream: logger.error("消息缺少 chat_stream,无法发送") @@ -96,23 +96,29 @@ class HeartFCSender: message_id = message.message_info.message_id try: - _ = message.update_thinking_time() + if has_thinking: + _ = message.update_thinking_time() - # --- 条件应用 set_reply 逻辑 --- - if message.apply_set_reply_logic and message.is_head and not message.is_private_message(): - logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...") - message.set_reply() - # --- 结束条件 set_reply --- + # --- 条件应用 set_reply 逻辑 --- + if ( + message.is_head + and not message.is_private_message() + and message.reply.processed_plain_text != "[System Trigger Context]" + ): + logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...") await message.process() if typing: - typing_time = calculate_typing_time( - input_string=message.processed_plain_text, - thinking_start_time=message.thinking_start_time, - is_emoji=message.is_emoji, - ) - await asyncio.sleep(typing_time) + if has_thinking: + typing_time = calculate_typing_time( + input_string=message.processed_plain_text, + thinking_start_time=message.thinking_start_time, + is_emoji=message.is_emoji, + ) + await asyncio.sleep(typing_time) + else: + await asyncio.sleep(0.5) await send_message(message) await self.storage.store_message(message, message.chat_stream) @@ -122,30 +128,3 @@ class HeartFCSender: raise e finally: await self.complete_thinking(chat_id, message_id) - - async def send_and_store(self, message: MessageSending): - """处理、发送并存储单个消息,不涉及思考状态管理。""" - if not message.chat_stream: - logger.error(f"[{message.message_info.platform or 'UnknownPlatform'}] 消息缺少 chat_stream,无法发送") - return - if not message.message_info or not message.message_info.message_id: - logger.error( - f"[{message.chat_stream.stream_id if message.chat_stream else 'UnknownStream'}] 消息缺少 message_info 或 message_id,无法发送" - ) - return - - chat_id = message.chat_stream.stream_id - message_id = message.message_info.message_id # 获取消息ID用于日志 - - try: - await message.process() - - await asyncio.sleep(0.5) - - await send_message(message) # 使用现有的发送方法 - await self.storage.store_message(message, message.chat_stream) # 使用现有的存储方法 - - except Exception as e: - logger.error(f"[{chat_id}] 处理或存储消息 {message_id} 时出错: {e}") - # 重新抛出异常,让调用者知道失败了 - raise e diff --git a/src/plugins/heartFC_chat/heartflow_processor.py b/src/chat/focus_chat/heartflow_processor.py similarity index 85% rename from src/plugins/heartFC_chat/heartflow_processor.py rename to src/chat/focus_chat/heartflow_processor.py index 5bd63b14a..bbfa4ce46 100644 --- a/src/plugins/heartFC_chat/heartflow_processor.py +++ b/src/chat/focus_chat/heartflow_processor.py @@ -2,16 +2,17 @@ import time import traceback from ..memory_system.Hippocampus import HippocampusManager from ...config.config import global_config -from ..chat.message import MessageRecv -from ..storage.storage import MessageStorage -from ..chat.utils import is_mentioned_bot_in_message +from ..message_receive.message import MessageRecv +from ..message_receive.storage import MessageStorage +from ..utils.utils import is_mentioned_bot_in_message from maim_message import Seg -from src.heart_flow.heartflow import heartflow +from src.chat.heart_flow.heartflow import heartflow from src.common.logger_manager import get_logger -from ..chat.chat_stream import chat_manager -from ..chat.message_buffer import message_buffer +from ..message_receive.chat_stream import chat_manager + +# from ..message_receive.message_buffer import message_buffer from ..utils.timer_calculator import Timer -from src.plugins.person_info.relationship_manager import relationship_manager +from src.chat.person_info.relationship_manager import relationship_manager from typing import Optional, Tuple, Dict, Any logger = get_logger("chat") @@ -169,7 +170,7 @@ class HeartFCProcessor: messageinfo = message.message_info # 2. 消息缓冲与流程序化 - await message_buffer.start_caching_messages(message) + # await message_buffer.start_caching_messages(message) chat = await chat_manager.get_or_create_stream( platform=messageinfo.platform, @@ -188,16 +189,16 @@ class HeartFCProcessor: return # 4. 缓冲检查 - buffer_result = await message_buffer.query_buffer_result(message) - if not buffer_result: - msg_type = _get_message_type(message) - type_messages = { - "text": f"触发缓冲,消息:{message.processed_plain_text}", - "image": "触发缓冲,表情包/图片等待中", - "seglist": "触发缓冲,消息列表等待中", - } - logger.debug(type_messages.get(msg_type, "触发未知类型缓冲")) - return + # buffer_result = await message_buffer.query_buffer_result(message) + # if not buffer_result: + # msg_type = _get_message_type(message) + # type_messages = { + # "text": f"触发缓冲,消息:{message.processed_plain_text}", + # "image": "触发缓冲,表情包/图片等待中", + # "seglist": "触发缓冲,消息列表等待中", + # } + # logger.debug(type_messages.get(msg_type, "触发未知类型缓冲")) + # return # 5. 消息存储 await self.storage.store_message(message, chat) @@ -210,12 +211,12 @@ class HeartFCProcessor: # 7. 日志记录 mes_name = chat.group_info.group_name if chat.group_info else "私聊" - current_time = time.strftime("%H点%M分%S秒", time.localtime(message.message_info.time)) + current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time)) logger.info( f"[{current_time}][{mes_name}]" f"{userinfo.user_nickname}:" f"{message.processed_plain_text}" - f"[兴趣度: {interested_rate:.2f}]" + f"[激活: {interested_rate:.1f}]" ) # 8. 关系处理 diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/chat/focus_chat/heartflow_prompt_builder.py similarity index 79% rename from src/plugins/heartFC_chat/heartflow_prompt_builder.py rename to src/chat/focus_chat/heartflow_prompt_builder.py index c4a137a78..9d39ae83d 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/chat/focus_chat/heartflow_prompt_builder.py @@ -1,21 +1,21 @@ -import random -from ...config.config import global_config +from src.config.config import global_config from src.common.logger_manager import get_logger -from ...individuality.individuality import Individuality -from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager -from src.plugins.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat -from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.chat.utils import get_embedding +from src.individuality.individuality import Individuality +from src.chat.utils.prompt_builder import Prompt, global_prompt_manager +from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat +from src.chat.person_info.relationship_manager import relationship_manager +from src.chat.utils.utils import get_embedding import time -from typing import Union, Optional, Deque, Dict, Any -from ...common.database import db -from ..chat.utils import get_recent_group_speaker +from typing import Union, Optional, Dict, Any +from src.common.database import db +from src.chat.utils.utils import get_recent_group_speaker from src.manager.mood_manager import mood_manager -from ..memory_system.Hippocampus import HippocampusManager -from ..schedule.schedule_generator import bot_schedule -from ..knowledge.knowledge_lib import qa_manager +from src.chat.memory_system.Hippocampus import HippocampusManager +from src.chat.knowledge.knowledge_lib import qa_manager +from src.chat.focus_chat.expressors.exprssion_learner import expression_learner import traceback -from .heartFC_Cycleinfo import CycleInfo +import random + logger = get_logger("prompt") @@ -23,20 +23,23 @@ logger = get_logger("prompt") def init_prompt(): Prompt( """ -{info_from_tools} +你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中: +{style_habbits} + +你现在正在群里聊天,以下是群里正在进行的聊天内容: +{chat_info} + +以上是聊天内容,你需要了解聊天记录中的内容 + {chat_target} -{chat_talking_prompt} -现在你想要在群里发言或者回复。\n -你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"。 -你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。 -看到以上聊天记录,你刚刚在想: - -{current_mind_info} -因为上述想法,你决定发言,原因是:{reason} - -回复尽量简短一些。请注意把握聊天内容,{reply_style2}。请一次只回复一个话题,不要同时回复多个人。{prompt_ger} -{reply_style1},说中文,不要刻意突出自身学科背景,注意只输出回复内容。 -{moderation_prompt}。注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", +你的名字是{bot_name},{prompt_personality},在这聊天中,"{target_message}"引起了你的注意,对这句话,你想表达:{in_mind_reply},原因是:{reason}。你现在要思考怎么回复 +你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。 +请你根据情景使用以下句法: +{grammar_habbits} +回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,你可以完全重组回复,保留最基本的表达含义就好,但注意回复要简短,但重组后保持语意通顺。 +回复不要浮夸,不要用夸张修辞,平淡一些。不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。 +现在,你说: +""", "heart_flow_prompt", ) @@ -54,59 +57,58 @@ def init_prompt(): """你的名字是{bot_name},{prompt_personality},{chat_context_description}。需要基于以下信息决定如何参与对话: {structured_info_block} {chat_content_block} -{current_mind_block} +{mind_info_prompt} {cycle_info_block} 请综合分析聊天内容和你看到的新消息,参考内心想法,并根据以下原则和可用动作做出决策。 【回复原则】 -1. 不回复(no_reply)适用: - - 话题无关/无聊/不感兴趣 - - 最后一条消息是你自己发的且无人回应你 - - 讨论你不懂的专业话题 - - 你发送了太多消息,且无人回复 +1. 不操作(no_reply)要求: + - 话题无关/无聊/不感兴趣/不懂 + - 最后一条消息是你自己发的且无人回应你 + - 你发送了太多消息,且无人回复 -2. 文字回复(text_reply)适用: - - 有实质性内容需要表达 - - 有人提到你,但你还没有回应他 - - 可以追加emoji_query表达情绪(emoji_query填写表情包的适用场合,也就是当前场合) - - 不要追加太多表情 - -3. 纯表情回复(emoji_reply)适用: - - 适合用表情回应的场景 - - 需提供明确的emoji_query - -4. 自我对话处理: - - 如果是自己发的消息想继续,需自然衔接 - - 避免重复或评价自己的发言 - - 不要和自己聊天 - -决策任务 -{action_options_text} +2. 回复(reply)要求: + - 有实质性内容需要表达 + - 有人提到你,但你还没有回应他 + - 在合适的时候添加表情(不要总是添加) + - 如果你要回复特定某人的某句话,或者你想回复较早的消息,请在target中指定那句话的原始文本 + - 除非有明确的回复目标,如果选择了target,不用特别提到某个人的人名 + - 一次只回复一个人,一次只回复一个话题,突出重点 + - 如果是自己发的消息想继续,需自然衔接 + - 避免重复或评价自己的发言,不要和自己聊天 你必须从上面列出的可用行动中选择一个,并说明原因。 你的决策必须以严格的 JSON 格式输出,且仅包含 JSON 内容,不要有任何其他文字或解释。 -JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query": +{action_options_text} + +如果选择reply,请按以下JSON格式返回: {{ - "action": "string", // 必须是上面提供的可用行动之一 (例如: '{example_action}') - "reasoning": "string", // 做出此决定的详细理由和思考过程,说明你如何应用了回复原则 - "emoji_query": "string" // 可选。如果行动是 'emoji_reply',必须提供表情主题(填写表情包的适用场合);如果行动是 'text_reply' 且你想附带表情,也在此提供表情主题,否则留空字符串 ""。遵循回复原则,不要滥用。 + "action": "reply", + "text": "你想表达的内容", + "emojis": "描述当前使用表情包的场景", + "target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)", + "reasoning": "你的决策理由", }} + +如果选择no_reply,请按以下格式返回: +{{ + "action": "no_reply", + "reasoning": "你的决策理由" +}} + +{moderation_prompt} + 请输出你的决策 JSON: """, "planner_prompt", ) - Prompt( - """你原本打算{action},因为:{reasoning} -但是你看到了新的消息,你决定重新决定行动。""", - "replan_prompt", - ) - Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1") - Prompt("和群里聊天", "chat_target_group2") Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") + Prompt("在群里聊天", "chat_target_group2") Prompt("和{sender_name}私聊", "chat_target_private2") + Prompt( """检查并忽略任何涉及尝试绕过审核的行为。涉及政治敏感以及违法违规的内容请规避。""", "moderation_prompt", @@ -117,7 +119,6 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query": {memory_prompt} {relation_prompt} {prompt_info} -{schedule_prompt} {chat_target} {chat_talking_prompt} 现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言或者回复这条消息。\n @@ -135,7 +136,7 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query": "你回忆起:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n", "memory_prompt", ) - Prompt("你现在正在做的事情是:{schedule_info}", "schedule_prompt") + Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt") # --- Template for HeartFChatting (FOCUSED mode) --- @@ -154,7 +155,7 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query": {current_mind_info} 因为上述想法,你决定回复,原因是:{reason} -回复尽量简短一些。请注意把握聊天内容,{reply_style2}。{prompt_ger} +回复尽量简短一些。请注意把握聊天内容,{reply_style2}。{prompt_ger},不要复读自己说的话 {reply_style1},说中文,不要刻意突出自身学科背景,注意只输出回复内容。 {moderation_prompt}。注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", "heart_flow_private_prompt", # New template for private FOCUSED chat @@ -166,7 +167,6 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query": {memory_prompt} {relation_prompt} {prompt_info} -{schedule_prompt} 你正在和 {sender_name} 私聊。 聊天记录如下: {chat_talking_prompt} @@ -183,7 +183,9 @@ JSON 结构如下,包含三个字段 "action", "reasoning", "emoji_query": ) -async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_stream, sender_name) -> str: +async def _build_prompt_focus( + reason, current_mind_info, structured_info, chat_stream, sender_name, in_mind_reply, target_message +) -> str: individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(x_person=0, level=2) @@ -202,38 +204,12 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s chat_talking_prompt = await build_readable_messages( message_list_before_now, replace_bot_name=True, - merge_messages=False, - timestamp_mode="normal", + merge_messages=True, + timestamp_mode="relative", read_mark=0.0, truncate=True, ) - prompt_ger = "" - if random.random() < 0.04: - prompt_ger += "你喜欢用倒装句" - if random.random() < 0.02: - prompt_ger += "你喜欢用反问句" - - reply_styles1 = [ - ("给出日常且口语化的回复,平淡一些", 0.4), - ("给出非常简短的回复", 0.4), - ("给出缺失主语的回复,简短", 0.15), - ("给出带有语病的回复,朴实平淡", 0.05), - ] - reply_style1_chosen = random.choices( - [style[0] for style in reply_styles1], weights=[style[1] for style in reply_styles1], k=1 - )[0] - - reply_styles2 = [ - ("不要回复的太有条理,可以有个性", 0.6), - ("不要回复的太有条理,可以复读", 0.15), - ("回复的认真一些", 0.2), - ("可以回复单个表情符号", 0.05), - ] - reply_style2_chosen = random.choices( - [style[0] for style in reply_styles2], weights=[style[1] for style in reply_styles2], k=1 - )[0] - if structured_info: structured_info_prompt = await global_prompt_manager.format_prompt( "info_from_tools", structured_info=structured_info @@ -241,6 +217,38 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s else: structured_info_prompt = "" + # 从/data/expression/对应chat_id/expressions.json中读取表达方式 + ( + learnt_style_expressions, + learnt_grammar_expressions, + personality_expressions, + ) = await expression_learner.get_expression_by_chat_id(chat_stream.stream_id) + + style_habbits = [] + grammar_habbits = [] + # 1. learnt_expressions加权随机选3条 + if learnt_style_expressions: + weights = [expr["count"] for expr in learnt_style_expressions] + selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3) + for expr in selected_learnt: + if isinstance(expr, dict) and "situation" in expr and "style" in expr: + style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}") + # 2. learnt_grammar_expressions加权随机选3条 + if learnt_grammar_expressions: + weights = [expr["count"] for expr in learnt_grammar_expressions] + selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3) + for expr in selected_learnt: + if isinstance(expr, dict) and "situation" in expr and "style" in expr: + grammar_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}") + # 3. personality_expressions随机选1条 + if personality_expressions: + expr = random.choice(personality_expressions) + if isinstance(expr, dict) and "situation" in expr and "style" in expr: + style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}") + + style_habbits_str = "\n".join(style_habbits) + grammar_habbits_str = "\n".join(grammar_habbits) + logger.debug("开始构建 focus prompt") # --- Choose template based on chat type --- @@ -248,22 +256,23 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s template_name = "heart_flow_prompt" # Group specific formatting variables (already fetched or default) chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1") - chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") + # chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") prompt = await global_prompt_manager.format_prompt( template_name, - info_from_tools=structured_info_prompt, + # info_from_tools=structured_info_prompt, + style_habbits=style_habbits_str, + grammar_habbits=grammar_habbits_str, chat_target=chat_target_1, # Used in group template - chat_talking_prompt=chat_talking_prompt, + # chat_talking_prompt=chat_talking_prompt, + chat_info=chat_talking_prompt, bot_name=global_config.BOT_NICKNAME, - prompt_personality=prompt_personality, - chat_target_2=chat_target_2, # Used in group template - current_mind_info=current_mind_info, - reply_style2=reply_style2_chosen, - reply_style1=reply_style1_chosen, + # prompt_personality=prompt_personality, + prompt_personality="", reason=reason, - prompt_ger=prompt_ger, - moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), + in_mind_reply=in_mind_reply, + target_message=target_message, + # moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), # sender_name is not used in the group template ) else: # Private chat @@ -277,10 +286,7 @@ async def _build_prompt_focus(reason, current_mind_info, structured_info, chat_s prompt_personality=prompt_personality, # chat_target and chat_target_2 are not used in private template current_mind_info=current_mind_info, - reply_style2=reply_style2_chosen, - reply_style1=reply_style1_chosen, reason=reason, - prompt_ger=prompt_ger, moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), ) # --- End choosing template --- @@ -303,9 +309,11 @@ class PromptBuilder: structured_info=None, message_txt=None, sender_name="某人", + in_mind_reply=None, + target_message=None, ) -> Optional[str]: if build_mode == "normal": - return await self._build_prompt_normal(chat_stream, message_txt, sender_name) + return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name) elif build_mode == "focus": return await _build_prompt_focus( @@ -314,6 +322,8 @@ class PromptBuilder: structured_info, chat_stream, sender_name, + in_mind_reply, + target_message, ) return None @@ -425,13 +435,6 @@ class PromptBuilder: end_time = time.time() logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}秒") - if global_config.ENABLE_SCHEDULE_GEN: - schedule_prompt = await global_prompt_manager.format_prompt( - "schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False) - ) - else: - schedule_prompt = "" - logger.debug("开始构建 normal prompt") # --- Choose template and format based on chat type --- @@ -447,7 +450,6 @@ class PromptBuilder: sender_name=effective_sender_name, memory_prompt=memory_prompt, prompt_info=prompt_info, - schedule_prompt=schedule_prompt, chat_target=chat_target_1, chat_target_2=chat_target_2, chat_talking_prompt=chat_talking_prompt, @@ -472,7 +474,6 @@ class PromptBuilder: sender_name=effective_sender_name, memory_prompt=memory_prompt, prompt_info=prompt_info, - schedule_prompt=schedule_prompt, chat_talking_prompt=chat_talking_prompt, message_txt=message_txt, bot_name=global_config.BOT_NICKNAME, @@ -749,11 +750,11 @@ class PromptBuilder: self, is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument - cycle_history: Deque["CycleInfo"], # Now passed as argument (Type hint needs import or string) observed_messages_str: str, current_mind: Optional[str], structured_info: Dict[str, Any], current_available_actions: Dict[str, str], + cycle_info: Optional[str], # replan_prompt: str, # Replan logic still simplified ) -> str: """构建 Planner LLM 的提示词 (获取模板并填充数据)""" @@ -786,40 +787,11 @@ class PromptBuilder: chat_content_block = "当前没有观察到新的聊天内容。\\n" # Current mind block - current_mind_block = "" + mind_info_prompt = "" if current_mind: - current_mind_block = f"你的内心想法:\n{current_mind}" + mind_info_prompt = f"对聊天的规划:{current_mind}" else: - current_mind_block = "你的内心想法:\n[没有特别的想法]" - - # Cycle info block (using passed cycle_history) - cycle_info_block = "" - recent_active_cycles = [] - for cycle in reversed(cycle_history): - if cycle.action_taken: - recent_active_cycles.append(cycle) - if len(recent_active_cycles) == 3: - break - consecutive_text_replies = 0 - responses_for_prompt = [] - for cycle in recent_active_cycles: - if cycle.action_type == "text_reply": - consecutive_text_replies += 1 - response_text = cycle.response_info.get("response_text", []) - formatted_response = "[空回复]" if not response_text else " ".join(response_text) - responses_for_prompt.append(formatted_response) - else: - break - if consecutive_text_replies >= 3: - cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' - elif consecutive_text_replies == 2: - cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' - elif consecutive_text_replies == 1: - cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' - if cycle_info_block: - cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" - else: - cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n" + mind_info_prompt = "你刚参与聊天" individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(x_person=2, level=2) @@ -829,7 +801,6 @@ class PromptBuilder: for name in action_keys: desc = current_available_actions[name] action_options_text += f"- '{name}': {desc}\n" - example_action_key = action_keys[0] if action_keys else "no_reply" planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") @@ -839,10 +810,10 @@ class PromptBuilder: chat_context_description=chat_context_description, structured_info_block=structured_info_block, chat_content_block=chat_content_block, - current_mind_block=current_mind_block, - cycle_info_block=cycle_info_block, + mind_info_prompt=mind_info_prompt, + cycle_info_block=cycle_info, action_options_text=action_options_text, - example_action=example_action_key, + moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), ) return prompt @@ -852,5 +823,39 @@ class PromptBuilder: return "[构建 Planner Prompt 时出错]" +def weighted_sample_no_replacement(items, weights, k) -> list: + """ + 加权且不放回地随机抽取k个元素。 + + 参数: + items: 待抽取的元素列表 + weights: 每个元素对应的权重(与items等长,且为正数) + k: 需要抽取的元素个数 + 返回: + selected: 按权重加权且不重复抽取的k个元素组成的列表 + + 如果 items 中的元素不足 k 个,就只会返回所有可用的元素 + + 实现思路: + 每次从当前池中按权重加权随机选出一个元素,选中后将其从池中移除,重复k次。 + 这样保证了: + 1. count越大被选中概率越高 + 2. 不会重复选中同一个元素 + """ + selected = [] + pool = list(zip(items, weights)) + for _ in range(min(k, len(pool))): + total = sum(w for _, w in pool) + r = random.uniform(0, total) + upto = 0 + for idx, (item, weight) in enumerate(pool): + upto += weight + if upto >= r: + selected.append(item) + pool.pop(idx) + break + return selected + + init_prompt() prompt_builder = PromptBuilder() diff --git a/src/chat/focus_chat/hfc_utils.py b/src/chat/focus_chat/hfc_utils.py new file mode 100644 index 000000000..36907c4c0 --- /dev/null +++ b/src/chat/focus_chat/hfc_utils.py @@ -0,0 +1,68 @@ +import time +from typing import Optional +from src.chat.message_receive.message import MessageRecv, BaseMessageInfo +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.message_receive.message import UserInfo +from src.common.logger_manager import get_logger +import json + +logger = get_logger(__name__) + + +async def create_empty_anchor_message( + platform: str, group_info: dict, chat_stream: ChatStream +) -> Optional[MessageRecv]: + """ + 重构观察到的最后一条消息作为回复的锚点, + 如果重构失败或观察为空,则创建一个占位符。 + """ + + placeholder_id = f"mid_pf_{int(time.time() * 1000)}" + placeholder_user = UserInfo(user_id="system_trigger", user_nickname="System Trigger", platform=platform) + placeholder_msg_info = BaseMessageInfo( + message_id=placeholder_id, + platform=platform, + group_info=group_info, + user_info=placeholder_user, + time=time.time(), + ) + placeholder_msg_dict = { + "message_info": placeholder_msg_info.to_dict(), + "processed_plain_text": "[System Trigger Context]", + "raw_message": "", + "time": placeholder_msg_info.time, + } + anchor_message = MessageRecv(placeholder_msg_dict) + anchor_message.update_chat_stream(chat_stream) + + return anchor_message + + +def parse_thinking_id_to_timestamp(thinking_id: str) -> float: + """ + 将形如 'tid' 的 thinking_id 解析回 float 时间戳 + 例如: 'tid1718251234.56' -> 1718251234.56 + """ + if not thinking_id.startswith("tid"): + raise ValueError("thinking_id 格式不正确") + ts_str = thinking_id[3:] + return float(ts_str) + + +def get_keywords_from_json(json_str: str) -> list[str]: + # 提取JSON内容 + start = json_str.find("{") + end = json_str.rfind("}") + 1 + if start == -1 or end == 0: + logger.error("未找到有效的JSON内容") + return [] + + json_content = json_str[start:end] + + # 解析JSON + try: + json_data = json.loads(json_content) + return json_data.get("keywords", []) + except json.JSONDecodeError as e: + logger.error(f"JSON解析失败: {e}") + return [] diff --git a/src/chat/focus_chat/info/chat_info.py b/src/chat/focus_chat/info/chat_info.py new file mode 100644 index 000000000..445529318 --- /dev/null +++ b/src/chat/focus_chat/info/chat_info.py @@ -0,0 +1,97 @@ +from typing import Dict, Optional +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class ChatInfo(InfoBase): + """聊天信息类 + + 用于记录和管理聊天相关的信息,包括聊天ID、名称和类型等。 + 继承自 InfoBase 类,使用字典存储具体数据。 + + Attributes: + type (str): 信息类型标识符,固定为 "chat" + + Data Fields: + chat_id (str): 聊天的唯一标识符 + chat_name (str): 聊天的名称 + chat_type (str): 聊天的类型 + """ + + type: str = "chat" + + def set_chat_id(self, chat_id: str) -> None: + """设置聊天ID + + Args: + chat_id (str): 聊天的唯一标识符 + """ + self.data["chat_id"] = chat_id + + def set_chat_name(self, chat_name: str) -> None: + """设置聊天名称 + + Args: + chat_name (str): 聊天的名称 + """ + self.data["chat_name"] = chat_name + + def set_chat_type(self, chat_type: str) -> None: + """设置聊天类型 + + Args: + chat_type (str): 聊天的类型 + """ + self.data["chat_type"] = chat_type + + def get_chat_id(self) -> Optional[str]: + """获取聊天ID + + Returns: + Optional[str]: 聊天的唯一标识符,如果未设置则返回 None + """ + return self.get_info("chat_id") + + def get_chat_name(self) -> Optional[str]: + """获取聊天名称 + + Returns: + Optional[str]: 聊天的名称,如果未设置则返回 None + """ + return self.get_info("chat_name") + + def get_chat_type(self) -> Optional[str]: + """获取聊天类型 + + Returns: + Optional[str]: 聊天的类型,如果未设置则返回 None + """ + return self.get_info("chat_type") + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, str]: + """获取所有信息数据 + + Returns: + Dict[str, str]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[str]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[str]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) diff --git a/src/chat/focus_chat/info/cycle_info.py b/src/chat/focus_chat/info/cycle_info.py new file mode 100644 index 000000000..3701aa153 --- /dev/null +++ b/src/chat/focus_chat/info/cycle_info.py @@ -0,0 +1,157 @@ +from typing import Dict, Optional, Any +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class CycleInfo(InfoBase): + """循环信息类 + + 用于记录和管理心跳循环的相关信息,包括循环ID、时间信息、动作信息等。 + 继承自 InfoBase 类,使用字典存储具体数据。 + + Attributes: + type (str): 信息类型标识符,固定为 "cycle" + + Data Fields: + cycle_id (str): 当前循环的唯一标识符 + start_time (str): 循环开始的时间 + end_time (str): 循环结束的时间 + action (str): 在循环中采取的动作 + action_data (Dict[str, Any]): 动作相关的详细数据 + reason (str): 触发循环的原因 + observe_info (str): 当前的回复信息 + """ + + type: str = "cycle" + + def get_type(self) -> str: + """获取信息类型""" + return self.type + + def get_data(self) -> Dict[str, str]: + """获取信息数据""" + return self.data + + def get_info(self, key: str) -> Optional[str]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + 属性值,如果键不存在则返回 None + """ + return self.data.get(key) + + def set_cycle_id(self, cycle_id: str) -> None: + """设置循环ID + + Args: + cycle_id (str): 循环的唯一标识符 + """ + self.data["cycle_id"] = cycle_id + + def set_start_time(self, start_time: str) -> None: + """设置开始时间 + + Args: + start_time (str): 循环开始的时间,建议使用标准时间格式 + """ + self.data["start_time"] = start_time + + def set_end_time(self, end_time: str) -> None: + """设置结束时间 + + Args: + end_time (str): 循环结束的时间,建议使用标准时间格式 + """ + self.data["end_time"] = end_time + + def set_action(self, action: str) -> None: + """设置采取的动作 + + Args: + action (str): 在循环中执行的动作名称 + """ + self.data["action"] = action + + def set_action_data(self, action_data: Dict[str, Any]) -> None: + """设置动作数据 + + Args: + action_data (Dict[str, Any]): 动作相关的详细数据,将被转换为字符串存储 + """ + self.data["action_data"] = str(action_data) + + def set_reason(self, reason: str) -> None: + """设置原因 + + Args: + reason (str): 触发循环的原因说明 + """ + self.data["reason"] = reason + + def set_observe_info(self, observe_info: str) -> None: + """设置回复信息 + + Args: + observe_info (str): 当前的回复信息 + """ + self.data["observe_info"] = observe_info + + def get_cycle_id(self) -> Optional[str]: + """获取循环ID + + Returns: + Optional[str]: 循环的唯一标识符,如果未设置则返回 None + """ + return self.get_info("cycle_id") + + def get_start_time(self) -> Optional[str]: + """获取开始时间 + + Returns: + Optional[str]: 循环开始的时间,如果未设置则返回 None + """ + return self.get_info("start_time") + + def get_end_time(self) -> Optional[str]: + """获取结束时间 + + Returns: + Optional[str]: 循环结束的时间,如果未设置则返回 None + """ + return self.get_info("end_time") + + def get_action(self) -> Optional[str]: + """获取采取的动作 + + Returns: + Optional[str]: 在循环中执行的动作名称,如果未设置则返回 None + """ + return self.get_info("action") + + def get_action_data(self) -> Optional[str]: + """获取动作数据 + + Returns: + Optional[str]: 动作相关的详细数据(字符串形式),如果未设置则返回 None + """ + return self.get_info("action_data") + + def get_reason(self) -> Optional[str]: + """获取原因 + + Returns: + Optional[str]: 触发循环的原因说明,如果未设置则返回 None + """ + return self.get_info("reason") + + def get_observe_info(self) -> Optional[str]: + """获取回复信息 + + Returns: + Optional[str]: 当前的回复信息,如果未设置则返回 None + """ + return self.get_info("observe_info") diff --git a/src/chat/focus_chat/info/info_base.py b/src/chat/focus_chat/info/info_base.py new file mode 100644 index 000000000..7779d913a --- /dev/null +++ b/src/chat/focus_chat/info/info_base.py @@ -0,0 +1,60 @@ +from typing import Dict, Optional, Any, List +from dataclasses import dataclass, field + + +@dataclass +class InfoBase: + """信息基类 + + 这是一个基础信息类,用于存储和管理各种类型的信息数据。 + 所有具体的信息类都应该继承自这个基类。 + + Attributes: + type (str): 信息类型标识符,默认为 "base" + data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典, + 支持存储字符串、字典、列表等嵌套数据结构 + """ + + type: str = "base" + data: Dict[str, Any] = field(default_factory=dict) + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, Any]: + """获取所有信息数据 + + Returns: + Dict[str, Any]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[Any]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[Any]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) + + def get_info_list(self, key: str) -> List[Any]: + """获取特定属性的信息列表 + + Args: + key: 要获取的属性键名 + + Returns: + List[Any]: 属性值列表,如果键不存在则返回空列表 + """ + value = self.data.get(key) + if isinstance(value, list): + return value + return [] diff --git a/src/chat/focus_chat/info/mind_info.py b/src/chat/focus_chat/info/mind_info.py new file mode 100644 index 000000000..3cfde1bbb --- /dev/null +++ b/src/chat/focus_chat/info/mind_info.py @@ -0,0 +1,34 @@ +from typing import Dict, Any +from dataclasses import dataclass, field +from .info_base import InfoBase + + +@dataclass +class MindInfo(InfoBase): + """思维信息类 + + 用于存储和管理当前思维状态的信息。 + + Attributes: + type (str): 信息类型标识符,默认为 "mind" + data (Dict[str, Any]): 包含 current_mind 的数据字典 + """ + + type: str = "mind" + data: Dict[str, Any] = field(default_factory=lambda: {"current_mind": ""}) + + def get_current_mind(self) -> str: + """获取当前思维状态 + + Returns: + str: 当前思维状态 + """ + return self.get_info("current_mind") or "" + + def set_current_mind(self, mind: str) -> None: + """设置当前思维状态 + + Args: + mind: 要设置的思维状态 + """ + self.data["current_mind"] = mind diff --git a/src/chat/focus_chat/info/obs_info.py b/src/chat/focus_chat/info/obs_info.py new file mode 100644 index 000000000..05dcf98c8 --- /dev/null +++ b/src/chat/focus_chat/info/obs_info.py @@ -0,0 +1,115 @@ +from typing import Dict, Optional +from dataclasses import dataclass +from .info_base import InfoBase + + +@dataclass +class ObsInfo(InfoBase): + """OBS信息类 + + 用于记录和管理OBS相关的信息,包括说话消息、截断后的说话消息和聊天类型。 + 继承自 InfoBase 类,使用字典存储具体数据。 + + Attributes: + type (str): 信息类型标识符,固定为 "obs" + + Data Fields: + talking_message (str): 说话消息内容 + talking_message_str_truncate (str): 截断后的说话消息内容 + chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) + """ + + type: str = "obs" + + def set_talking_message(self, message: str) -> None: + """设置说话消息 + + Args: + message (str): 说话消息内容 + """ + self.data["talking_message"] = message + + def set_talking_message_str_truncate(self, message: str) -> None: + """设置截断后的说话消息 + + Args: + message (str): 截断后的说话消息内容 + """ + self.data["talking_message_str_truncate"] = message + + def set_previous_chat_info(self, message: str) -> None: + """设置之前聊天信息 + + Args: + message (str): 之前聊天信息内容 + """ + self.data["previous_chat_info"] = message + + def set_chat_type(self, chat_type: str) -> None: + """设置聊天类型 + + Args: + chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) + """ + if chat_type not in ["private", "group", "other"]: + chat_type = "other" + self.data["chat_type"] = chat_type + + def set_chat_target(self, chat_target: str) -> None: + """设置聊天目标 + + Args: + chat_target (str): 聊天目标,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) + """ + self.data["chat_target"] = chat_target + + def get_talking_message(self) -> Optional[str]: + """获取说话消息 + + Returns: + Optional[str]: 说话消息内容,如果未设置则返回 None + """ + return self.get_info("talking_message") + + def get_talking_message_str_truncate(self) -> Optional[str]: + """获取截断后的说话消息 + + Returns: + Optional[str]: 截断后的说话消息内容,如果未设置则返回 None + """ + return self.get_info("talking_message_str_truncate") + + def get_chat_type(self) -> str: + """获取聊天类型 + + Returns: + str: 聊天类型,默认为 "other" + """ + return self.get_info("chat_type") or "other" + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, str]: + """获取所有信息数据 + + Returns: + Dict[str, str]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[str]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[str]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) diff --git a/src/chat/focus_chat/info/structured_info.py b/src/chat/focus_chat/info/structured_info.py new file mode 100644 index 000000000..61269c8f2 --- /dev/null +++ b/src/chat/focus_chat/info/structured_info.py @@ -0,0 +1,69 @@ +from typing import Dict, Optional, Any, List +from dataclasses import dataclass, field + + +@dataclass +class StructuredInfo: + """信息基类 + + 这是一个基础信息类,用于存储和管理各种类型的信息数据。 + 所有具体的信息类都应该继承自这个基类。 + + Attributes: + type (str): 信息类型标识符,默认为 "base" + data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典, + 支持存储字符串、字典、列表等嵌套数据结构 + """ + + type: str = "structured_info" + data: Dict[str, Any] = field(default_factory=dict) + + def get_type(self) -> str: + """获取信息类型 + + Returns: + str: 当前信息对象的类型标识符 + """ + return self.type + + def get_data(self) -> Dict[str, Any]: + """获取所有信息数据 + + Returns: + Dict[str, Any]: 包含所有信息数据的字典 + """ + return self.data + + def get_info(self, key: str) -> Optional[Any]: + """获取特定属性的信息 + + Args: + key: 要获取的属性键名 + + Returns: + Optional[Any]: 属性值,如果键不存在则返回 None + """ + return self.data.get(key) + + def get_info_list(self, key: str) -> List[Any]: + """获取特定属性的信息列表 + + Args: + key: 要获取的属性键名 + + Returns: + List[Any]: 属性值列表,如果键不存在则返回空列表 + """ + value = self.data.get(key) + if isinstance(value, list): + return value + return [] + + def set_info(self, key: str, value: Any) -> None: + """设置特定属性的信息值 + + Args: + key: 要设置的属性键名 + value: 要设置的属性值 + """ + self.data[key] = value diff --git a/src/chat/focus_chat/info_processors/base_processor.py b/src/chat/focus_chat/info_processors/base_processor.py new file mode 100644 index 000000000..e11ec9596 --- /dev/null +++ b/src/chat/focus_chat/info_processors/base_processor.py @@ -0,0 +1,52 @@ +from abc import ABC, abstractmethod +from typing import List, Any, Optional, Dict +from src.chat.focus_chat.info.info_base import InfoBase +from src.chat.heart_flow.observation.observation import Observation +from src.common.logger_manager import get_logger + +logger = get_logger("base_processor") + + +class BaseProcessor(ABC): + """信息处理器基类 + + 所有具体的信息处理器都应该继承这个基类,并实现process_info方法。 + 支持处理InfoBase和Observation类型的输入。 + """ + + @abstractmethod + def __init__(self): + """初始化处理器""" + pass + + @abstractmethod + async def process_info( + self, + infos: List[InfoBase], + observations: Optional[List[Observation]] = None, + running_memorys: Optional[List[Dict]] = None, + **kwargs: Any, + ) -> List[InfoBase]: + """处理信息对象的抽象方法 + + Args: + infos: InfoBase对象列表 + observations: 可选的Observation对象列表 + **kwargs: 其他可选参数 + + Returns: + List[InfoBase]: 处理后的InfoBase实例列表 + """ + pass + + def _create_processed_item(self, info_type: str, info_data: Any) -> dict: + """创建处理后的信息项 + + Args: + info_type: 信息类型 + info_data: 信息数据 + + Returns: + dict: 处理后的信息项 + """ + return {"type": info_type, "id": f"info_{info_type}", "content": info_data, "ttl": 3} diff --git a/src/chat/focus_chat/info_processors/chattinginfo_processor.py b/src/chat/focus_chat/info_processors/chattinginfo_processor.py new file mode 100644 index 000000000..bc722b90e --- /dev/null +++ b/src/chat/focus_chat/info_processors/chattinginfo_processor.py @@ -0,0 +1,123 @@ +from typing import List, Optional, Any +from src.chat.focus_chat.info.obs_info import ObsInfo +from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.info.info_base import InfoBase +from .base_processor import BaseProcessor +from src.common.logger_manager import get_logger +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation +from src.chat.focus_chat.info.cycle_info import CycleInfo +from datetime import datetime +from typing import Dict +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config + +logger = get_logger("observation") + + +class ChattingInfoProcessor(BaseProcessor): + """观察处理器 + + 用于处理Observation对象,将其转换为ObsInfo对象。 + """ + + def __init__(self): + """初始化观察处理器""" + self.llm_summary = LLMRequest( + model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" + ) + super().__init__() + + async def process_info( + self, + observations: Optional[List[Observation]] = None, + running_memorys: Optional[List[Dict]] = None, + **kwargs: Any, + ) -> List[InfoBase]: + """处理Observation对象 + + Args: + infos: InfoBase对象列表 + observations: 可选的Observation对象列表 + **kwargs: 其他可选参数 + + Returns: + List[InfoBase]: 处理后的ObsInfo实例列表 + """ + # print(f"observations: {observations}") + processed_infos = [] + + # 处理Observation对象 + if observations: + for obs in observations: + # print(f"obs: {obs}") + if isinstance(obs, ChattingObservation): + obs_info = ObsInfo() + + await self.chat_compress(obs) + + # 设置说话消息 + if hasattr(obs, "talking_message_str"): + obs_info.set_talking_message(obs.talking_message_str) + + # 设置截断后的说话消息 + if hasattr(obs, "talking_message_str_truncate"): + obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate) + + if hasattr(obs, "mid_memory_info"): + obs_info.set_previous_chat_info(obs.mid_memory_info) + + # 设置聊天类型 + is_group_chat = obs.is_group_chat + if is_group_chat: + chat_type = "group" + else: + chat_type = "private" + obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人")) + obs_info.set_chat_type(chat_type) + + # logger.debug(f"聊天信息处理器处理后的信息: {obs_info}") + + processed_infos.append(obs_info) + if isinstance(obs, HFCloopObservation): + obs_info = CycleInfo() + obs_info.set_observe_info(obs.observe_info) + processed_infos.append(obs_info) + + return processed_infos + + async def chat_compress(self, obs: ChattingObservation): + if obs.compressor_prompt: + try: + summary_result, _, _ = await self.llm_summary.generate_response(obs.compressor_prompt) + summary = "没有主题的闲聊" # 默认值 + if summary_result: # 确保结果不为空 + summary = summary_result + except Exception as e: + logger.error(f"总结主题失败 for chat {obs.chat_id}: {e}") + + mid_memory = { + "id": str(int(datetime.now().timestamp())), + "theme": summary, + "messages": obs.oldest_messages, # 存储原始消息对象 + "readable_messages": obs.oldest_messages_str, + # "timestamps": oldest_timestamps, + "chat_id": obs.chat_id, + "created_at": datetime.now().timestamp(), + } + + obs.mid_memorys.append(mid_memory) + if len(obs.mid_memorys) > obs.max_mid_memory_len: + obs.mid_memorys.pop(0) # 移除最旧的 + + mid_memory_str = "之前聊天的内容概述是:\n" + for mid_memory_item in obs.mid_memorys: # 重命名循环变量以示区分 + time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) + mid_memory_str += ( + f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" + ) + obs.mid_memory_info = mid_memory_str + + obs.compressor_prompt = "" + obs.oldest_messages = [] + obs.oldest_messages_str = "" diff --git a/src/chat/focus_chat/info_processors/mind_processor.py b/src/chat/focus_chat/info_processors/mind_processor.py new file mode 100644 index 000000000..ec32ea7e9 --- /dev/null +++ b/src/chat/focus_chat/info_processors/mind_processor.py @@ -0,0 +1,410 @@ +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.heart_flow.observation.observation import Observation +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +import time +import traceback +from src.common.logger_manager import get_logger +from src.individuality.individuality import Individuality +import random +from src.chat.utils.prompt_builder import Prompt, global_prompt_manager +from src.chat.utils.json_utils import safe_json_dumps +from src.chat.message_receive.chat_stream import chat_manager +import difflib +from src.chat.person_info.relationship_manager import relationship_manager +from .base_processor import BaseProcessor +from src.chat.focus_chat.info.mind_info import MindInfo +from typing import List, Optional +from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation +from src.chat.focus_chat.info_processors.processor_utils import ( + calculate_similarity, + calculate_replacement_probability, + get_spark, +) +from typing import Dict +from src.chat.focus_chat.info.info_base import InfoBase + +logger = get_logger("sub_heartflow") + + +def init_prompt(): + # --- Group Chat Prompt --- + group_prompt = """ +{memory_str} +{extra_info} +{relation_prompt} +你的名字是{bot_name} +{mood_info} +{cycle_info_block} +现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: +{chat_observe_info} + +以下是你之前对聊天的观察和规划,你的名字是{bot_name}: +{last_mind} + +现在请你继续输出观察和规划,输出要求: +1. 先关注未读新消息的内容和近期回复历史 +2. 根据新信息,修改和删除之前的观察和规划 +3. 根据聊天内容继续输出观察和规划,{hf_do_next} +4. 注意群聊的时间线索,话题由谁发起,进展状况如何,思考聊天的时间线。 +6. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出思考内容就好""" + Prompt(group_prompt, "sub_heartflow_prompt_before") + + # --- Private Chat Prompt --- + private_prompt = """ +{memory_str} +{extra_info} +{relation_prompt} +你的名字是{bot_name},{prompt_personality},你现在{mood_info} +{cycle_info_block} +现在是{time_now},你正在上网,和 {chat_target_name} 私聊,以下是你们的聊天内容: +{chat_observe_info} +以下是你之前对聊天的观察和规划: +{last_mind} +请仔细阅读聊天内容,想想你和 {chat_target_name} 的关系,回顾你们刚刚的交流,你刚刚发言和对方的反应,思考聊天的主题。 +请思考你要不要回复以及如何回复对方。 +思考并输出你的内心想法 +输出要求: +1. 根据聊天内容生成你的想法,{hf_do_next} +2. 不要分点、不要使用表情符号 +3. 避免多余符号(冒号、引号、括号等) +4. 语言简洁自然,不要浮夸 +5. 如果你刚发言,对方没有回复你,请谨慎回复""" + Prompt(private_prompt, "sub_heartflow_prompt_private_before") + + +class MindProcessor(BaseProcessor): + def __init__(self, subheartflow_id: str): + super().__init__() + self.subheartflow_id = subheartflow_id + + self.llm_model = LLMRequest( + model=global_config.llm_sub_heartflow, + temperature=global_config.llm_sub_heartflow["temp"], + max_tokens=800, + request_type="sub_heart_flow", + ) + + self.current_mind = "" + self.past_mind = [] + self.structured_info = [] + self.structured_info_str = "" + + name = chat_manager.get_stream_name(self.subheartflow_id) + self.log_prefix = f"[{name}] " + self._update_structured_info_str() + + def _update_structured_info_str(self): + """根据 structured_info 更新 structured_info_str""" + if not self.structured_info: + self.structured_info_str = "" + return + + lines = ["【信息】"] + for item in self.structured_info: + # 简化展示,突出内容和类型,包含TTL供调试 + type_str = item.get("type", "未知类型") + content_str = item.get("content", "") + + if type_str == "info": + lines.append(f"刚刚: {content_str}") + elif type_str == "memory": + lines.append(f"{content_str}") + elif type_str == "comparison_result": + lines.append(f"数字大小比较结果: {content_str}") + elif type_str == "time_info": + lines.append(f"{content_str}") + elif type_str == "lpmm_knowledge": + lines.append(f"你知道:{content_str}") + else: + lines.append(f"{type_str}的信息: {content_str}") + + self.structured_info_str = "\n".join(lines) + logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}") + + async def process_info( + self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos + ) -> List[InfoBase]: + """处理信息对象 + + Args: + *infos: 可变数量的InfoBase类型的信息对象 + + Returns: + List[InfoBase]: 处理后的结构化信息列表 + """ + current_mind = await self.do_thinking_before_reply(observations, running_memorys) + + mind_info = MindInfo() + mind_info.set_current_mind(current_mind) + + return [mind_info] + + async def do_thinking_before_reply( + self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None + ): + """ + 在回复前进行思考,生成内心想法并收集工具调用结果 + + 参数: + observations: 观察信息 + + 返回: + 如果return_prompt为False: + tuple: (current_mind, past_mind) 当前想法和过去的想法列表 + 如果return_prompt为True: + tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt + """ + + # ---------- 0. 更新和清理 structured_info ---------- + if self.structured_info: + updated_info = [] + for item in self.structured_info: + item["ttl"] -= 1 + if item["ttl"] > 0: + updated_info.append(item) + else: + logger.debug(f"{self.log_prefix} 移除过期的 structured_info 项: {item['id']}") + self.structured_info = updated_info + self._update_structured_info_str() + logger.debug( + f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}" + ) + + memory_str = "" + if running_memorys: + memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" + for running_memory in running_memorys: + memory_str += f"{running_memory['topic']}: {running_memory['content']}\n" + + # ---------- 1. 准备基础数据 ---------- + # 获取现有想法和情绪状态 + previous_mind = self.current_mind if self.current_mind else "" + + if observations is None: + observations = [] + for observation in observations: + if isinstance(observation, ChattingObservation): + # 获取聊天元信息 + is_group_chat = observation.is_group_chat + chat_target_info = observation.chat_target_info + chat_target_name = "对方" # 私聊默认名称 + if not is_group_chat and chat_target_info: + # 优先使用person_name,其次user_nickname,最后回退到默认值 + chat_target_name = ( + chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name + ) + # 获取聊天内容 + chat_observe_info = observation.get_observe_info() + person_list = observation.person_list + if isinstance(observation, HFCloopObservation): + hfcloop_observe_info = observation.get_observe_info() + + # ---------- 3. 准备个性化数据 ---------- + # 获取个性化信息 + individuality = Individuality.get_instance() + + relation_prompt = "" + for person in person_list: + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) + + # 构建个性部分 + # prompt_personality = individuality.get_prompt(x_person=2, level=2) + + # 获取当前时间 + time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + + spark_prompt = get_spark() + + # ---------- 5. 构建最终提示词 ---------- + template_name = "sub_heartflow_prompt_before" if is_group_chat else "sub_heartflow_prompt_private_before" + logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板") + + prompt = (await global_prompt_manager.get_prompt_async(template_name)).format( + memory_str=memory_str, + extra_info=self.structured_info_str, + # prompt_personality=prompt_personality, + relation_prompt=relation_prompt, + bot_name=individuality.name, + time_now=time_now, + chat_observe_info=chat_observe_info, + mood_info="mood_info", + hf_do_next=spark_prompt, + last_mind=previous_mind, + cycle_info_block=hfcloop_observe_info, + chat_target_name=chat_target_name, + ) + + # 在构建完提示词后,生成最终的prompt字符串 + final_prompt = prompt + + content = "" # 初始化内容变量 + + try: + # 调用LLM生成响应 + response, _ = await self.llm_model.generate_response_async(prompt=final_prompt) + + # 直接使用LLM返回的文本响应作为 content + content = response if response else "" + + except Exception as e: + # 处理总体异常 + logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}") + logger.error(traceback.format_exc()) + content = "思考过程中出现错误" + + # 记录初步思考结果 + logger.debug(f"{self.log_prefix} 思考prompt: \n{final_prompt}\n") + + # 处理空响应情况 + if not content: + content = "(不知道该想些什么...)" + logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。") + + # ---------- 8. 更新思考状态并返回结果 ---------- + logger.info(f"{self.log_prefix} 思考结果: {content}") + # 更新当前思考内容 + self.update_current_mind(content) + + return content + + def update_current_mind(self, response): + if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind + self.past_mind.append(self.current_mind) + self.current_mind = response + + def de_similar(self, previous_mind, new_content): + try: + similarity = calculate_similarity(previous_mind, new_content) + replacement_prob = calculate_replacement_probability(similarity) + logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}") + + # 定义词语列表 (移到判断之前) + yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] + zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] + cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] + zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao + + if random.random() < replacement_prob: + # 相似度非常高时,尝试去重或特殊处理 + if similarity == 1.0: + logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...") + # 随机截取大约一半内容 + if len(new_content) > 1: # 避免内容过短无法截取 + split_point = max( + 1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4) + ) + truncated_content = new_content[:split_point] + else: + truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 + + # 添加语气词和转折/承接词 + yu_qi_ci = random.choice(yu_qi_ci_liebiao) + zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao) + content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}" + logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}") + + else: + # 相似度较高但非100%,执行标准去重逻辑 + logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...") + logger.debug( + f"{self.log_prefix} previous_mind类型: {type(previous_mind)}, new_content类型: {type(new_content)}" + ) + + matcher = difflib.SequenceMatcher(None, previous_mind, new_content) + logger.debug(f"{self.log_prefix} matcher类型: {type(matcher)}") + + deduplicated_parts = [] + last_match_end_in_b = 0 + + # 获取并记录所有匹配块 + matching_blocks = matcher.get_matching_blocks() + logger.debug(f"{self.log_prefix} 匹配块数量: {len(matching_blocks)}") + logger.debug( + f"{self.log_prefix} 匹配块示例(前3个): {matching_blocks[:3] if len(matching_blocks) > 3 else matching_blocks}" + ) + + # get_matching_blocks()返回形如[(i, j, n), ...]的列表,其中i是a中的索引,j是b中的索引,n是匹配的长度 + for idx, match in enumerate(matching_blocks): + if not isinstance(match, tuple): + logger.error(f"{self.log_prefix} 匹配块 {idx} 不是元组类型,而是 {type(match)}: {match}") + continue + + try: + _i, j, n = match # 解包元组为三个变量 + logger.debug(f"{self.log_prefix} 匹配块 {idx}: i={_i}, j={j}, n={n}") + + if last_match_end_in_b < j: + # 确保添加的是字符串,而不是元组 + try: + non_matching_part = new_content[last_match_end_in_b:j] + logger.debug( + f"{self.log_prefix} 添加非匹配部分: '{non_matching_part}', 类型: {type(non_matching_part)}" + ) + if not isinstance(non_matching_part, str): + logger.warning( + f"{self.log_prefix} 非匹配部分不是字符串类型: {type(non_matching_part)}" + ) + non_matching_part = str(non_matching_part) + deduplicated_parts.append(non_matching_part) + except Exception as e: + logger.error(f"{self.log_prefix} 处理非匹配部分时出错: {e}") + logger.error(traceback.format_exc()) + last_match_end_in_b = j + n + except Exception as e: + logger.error(f"{self.log_prefix} 处理匹配块时出错: {e}") + logger.error(traceback.format_exc()) + + logger.debug(f"{self.log_prefix} 去重前部分列表: {deduplicated_parts}") + logger.debug(f"{self.log_prefix} 列表元素类型: {[type(part) for part in deduplicated_parts]}") + + # 确保所有元素都是字符串 + deduplicated_parts = [str(part) for part in deduplicated_parts] + + # 防止列表为空 + if not deduplicated_parts: + logger.warning(f"{self.log_prefix} 去重后列表为空,添加空字符串") + deduplicated_parts = [""] + + logger.debug(f"{self.log_prefix} 处理后的部分列表: {deduplicated_parts}") + + try: + deduplicated_content = "".join(deduplicated_parts).strip() + logger.debug(f"{self.log_prefix} 拼接后的去重内容: '{deduplicated_content}'") + except Exception as e: + logger.error(f"{self.log_prefix} 拼接去重内容时出错: {e}") + logger.error(traceback.format_exc()) + deduplicated_content = "" + + if deduplicated_content: + # 根据概率决定是否添加词语 + prefix_str = "" + if random.random() < 0.3: # 30% 概率添加语气词 + prefix_str += random.choice(yu_qi_ci_liebiao) + if random.random() < 0.7: # 70% 概率添加转折/承接词 + prefix_str += random.choice(zhuan_jie_ci_liebiao) + + # 组合最终结果 + if prefix_str: + content = f"{prefix_str},{deduplicated_content}" # 更新 content + logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}") + else: + content = deduplicated_content # 更新 content + logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}") + else: + logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}") + content = new_content # 保留原始 content + else: + logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})") + # content 保持 new_content 不变 + + except Exception as e: + logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}") + logger.error(traceback.format_exc()) + # 出错时保留原始 content + content = new_content + + return content + + +init_prompt() diff --git a/src/chat/focus_chat/info_processors/processor_utils.py b/src/chat/focus_chat/info_processors/processor_utils.py new file mode 100644 index 000000000..77cdc7a6b --- /dev/null +++ b/src/chat/focus_chat/info_processors/processor_utils.py @@ -0,0 +1,56 @@ +import difflib +import random +import time + + +def calculate_similarity(text_a: str, text_b: str) -> float: + """ + 计算两个文本字符串的相似度。 + """ + if not text_a or not text_b: + return 0.0 + matcher = difflib.SequenceMatcher(None, text_a, text_b) + return matcher.ratio() + + +def calculate_replacement_probability(similarity: float) -> float: + """ + 根据相似度计算替换的概率。 + 规则: + - 相似度 <= 0.4: 概率 = 0 + - 相似度 >= 0.9: 概率 = 1 + - 相似度 == 0.6: 概率 = 0.7 + - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) + - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) + """ + if similarity <= 0.4: + return 0.0 + elif similarity >= 0.9: + return 1.0 + elif 0.4 < similarity <= 0.6: + # p = 3.5 * s - 1.4 + probability = 3.5 * similarity - 1.4 + return max(0.0, probability) + else: # 0.6 < similarity < 0.9 + # p = s + 0.1 + probability = similarity + 0.1 + return min(1.0, max(0.0, probability)) + + +def get_spark(): + local_random = random.Random() + current_minute = int(time.strftime("%M")) + local_random.seed(current_minute) + + hf_options = [ + ("可以参考之前的想法,在原来想法的基础上继续思考", 0.2), + ("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4), + ("不要太深入", 0.2), + ("进行深入思考", 0.2), + ] + # 加权随机选择思考指导 + hf_do_next = local_random.choices( + [option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1 + )[0] + + return hf_do_next diff --git a/src/chat/focus_chat/info_processors/tool_processor.py b/src/chat/focus_chat/info_processors/tool_processor.py new file mode 100644 index 000000000..79dba4cd3 --- /dev/null +++ b/src/chat/focus_chat/info_processors/tool_processor.py @@ -0,0 +1,193 @@ +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +import time +from src.common.logger_manager import get_logger +from src.individuality.individuality import Individuality +from src.chat.utils.prompt_builder import Prompt, global_prompt_manager +from src.tools.tool_use import ToolUser +from src.chat.utils.json_utils import process_llm_tool_calls +from src.chat.person_info.relationship_manager import relationship_manager +from .base_processor import BaseProcessor +from typing import List, Optional, Dict +from src.chat.heart_flow.observation.observation import Observation +from src.chat.heart_flow.observation.working_observation import WorkingObservation +from src.chat.focus_chat.info.structured_info import StructuredInfo + +logger = get_logger("tool_use") + + +def init_prompt(): + # ... 原有代码 ... + + # 添加工具执行器提示词 + tool_executor_prompt = """ +你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。 + +你要在群聊中扮演以下角色: +{prompt_personality} + +你当前的额外信息: +{memory_str} + +群里正在进行的聊天内容: +{chat_observe_info} + +请仔细分析聊天内容,考虑以下几点: +1. 内容中是否包含需要查询信息的问题 +2. 是否需要执行特定操作 +3. 是否有明确的工具使用指令 +4. 考虑用户与你的关系以及当前的对话氛围 + +如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。 +""" + Prompt(tool_executor_prompt, "tool_executor_prompt") + + +class ToolProcessor(BaseProcessor): + def __init__(self, subheartflow_id: str): + super().__init__() + self.subheartflow_id = subheartflow_id + self.log_prefix = f"[{subheartflow_id}:ToolExecutor] " + self.llm_model = LLMRequest( + model=global_config.llm_tool_use, + max_tokens=500, + request_type="tool_execution", + ) + self.structured_info = [] + + async def process_info( + self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos + ) -> List[dict]: + """处理信息对象 + + Args: + *infos: 可变数量的InfoBase类型的信息对象 + + Returns: + list: 处理后的结构化信息列表 + """ + + if observations: + for observation in observations: + if isinstance(observation, ChattingObservation): + result, used_tools, prompt = await self.execute_tools(observation, running_memorys) + + # 更新WorkingObservation中的结构化信息 + for observation in observations: + if isinstance(observation, WorkingObservation): + for structured_info in result: + logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}") + observation.add_structured_info(structured_info) + + working_infos = observation.get_observe_info() + logger.debug(f"{self.log_prefix} 获取更新后WorkingObservation中的结构化信息: {working_infos}") + + structured_info = StructuredInfo() + for working_info in working_infos: + structured_info.set_info(working_info.get("type"), working_info.get("content")) + + return [structured_info] + + async def execute_tools(self, observation: ChattingObservation, running_memorys: Optional[List[Dict]] = None): + """ + 并行执行工具,返回结构化信息 + + 参数: + sub_mind: 子思维对象 + chat_target_name: 聊天目标名称,默认为"对方" + is_group_chat: 是否为群聊,默认为False + return_details: 是否返回详细信息,默认为False + cycle_info: 循环信息对象,可用于记录详细执行信息 + + 返回: + 如果return_details为False: + List[Dict]: 工具执行结果的结构化信息列表 + 如果return_details为True: + Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词) + """ + tool_instance = ToolUser() + tools = tool_instance._define_tools() + + # logger.debug(f"observation: {observation}") + # logger.debug(f"observation.chat_target_info: {observation.chat_target_info}") + # logger.debug(f"observation.is_group_chat: {observation.is_group_chat}") + # logger.debug(f"observation.person_list: {observation.person_list}") + + is_group_chat = observation.is_group_chat + + chat_observe_info = observation.get_observe_info() + person_list = observation.person_list + + memory_str = "" + if running_memorys: + memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" + for running_memory in running_memorys: + memory_str += f"{running_memory['topic']}: {running_memory['content']}\n" + + # 构建关系信息 + relation_prompt = "【关系信息】\n" + for person in person_list: + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) + + # 获取个性信息 + individuality = Individuality.get_instance() + prompt_personality = individuality.get_prompt(x_person=2, level=2) + + # 获取时间信息 + time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + + # 构建专用于工具调用的提示词 + prompt = await global_prompt_manager.format_prompt( + "tool_executor_prompt", + memory_str=memory_str, + # extra_info="extra_structured_info", + chat_observe_info=chat_observe_info, + # chat_target_name=chat_target_name, + is_group_chat=is_group_chat, + # relation_prompt=relation_prompt, + prompt_personality=prompt_personality, + # mood_info=mood_info, + bot_name=individuality.name, + time_now=time_now, + ) + + # 调用LLM,专注于工具使用 + logger.debug(f"开始执行工具调用{prompt}") + response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools) + + logger.debug(f"获取到工具原始输出:\n{tool_calls}") + # 处理工具调用和结果收集,类似于SubMind中的逻辑 + new_structured_items = [] + used_tools = [] # 记录使用了哪些工具 + + if tool_calls: + success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls) + if success and valid_tool_calls: + for tool_call in valid_tool_calls: + try: + # 记录使用的工具名称 + tool_name = tool_call.get("name", "unknown_tool") + used_tools.append(tool_name) + + result = await tool_instance._execute_tool_call(tool_call) + + name = result.get("type", "unknown_type") + content = result.get("content", "") + + logger.info(f"工具{name},获得信息:{content}") + if result: + new_item = { + "type": result.get("type", "unknown_type"), + "id": result.get("id", f"tool_exec_{time.time()}"), + "content": result.get("content", ""), + "ttl": 3, + } + new_structured_items.append(new_item) + except Exception as e: + logger.error(f"{self.log_prefix}工具执行失败: {e}") + + return new_structured_items, used_tools, prompt + + +init_prompt() diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py new file mode 100644 index 000000000..76be3e5d7 --- /dev/null +++ b/src/chat/focus_chat/memory_activator.py @@ -0,0 +1,105 @@ +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.heart_flow.observation.working_observation import WorkingObservation +from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.common.logger_manager import get_logger +from src.chat.utils.prompt_builder import Prompt +from datetime import datetime +from src.chat.memory_system.Hippocampus import HippocampusManager +from typing import List, Dict + + +logger = get_logger("memory_activator") + + +def init_prompt(): + # --- Group Chat Prompt --- + memory_activator_prompt = """ + 你是一个记忆分析器,你需要根据以下信息来进行会议 + 以下是一场聊天中的信息,请根据这些信息,总结出几个关键词作为记忆回忆的触发词 + + {obs_info_text} + + 请输出一个json格式,包含以下字段: + {{ + "keywords": ["关键词1", "关键词2", "关键词3",......] + }} + 不要输出其他多余内容,只输出json格式就好 + """ + + Prompt(memory_activator_prompt, "memory_activator_prompt") + + +class MemoryActivator: + def __init__(self): + self.summary_model = LLMRequest( + model=global_config.llm_summary, temperature=0.7, max_tokens=50, request_type="chat_observation" + ) + self.running_memory = [] + + async def activate_memory(self, observations) -> List[Dict]: + """ + 激活记忆 + + Args: + observations: 现有的进行观察后的 观察列表 + + Returns: + List[Dict]: 激活的记忆列表 + """ + obs_info_text = "" + for observation in observations: + if isinstance(observation, ChattingObservation): + obs_info_text += observation.get_observe_info() + elif isinstance(observation, WorkingObservation): + working_info = observation.get_observe_info() + for working_info_item in working_info: + obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n" + elif isinstance(observation, HFCloopObservation): + obs_info_text += observation.get_observe_info() + + # prompt = await global_prompt_manager.format_prompt( + # "memory_activator_prompt", + # obs_info_text=obs_info_text, + # ) + + # logger.debug(f"prompt: {prompt}") + + # response = await self.summary_model.generate_response(prompt) + + # logger.debug(f"response: {response}") + + # # 只取response的第一个元素(字符串) + # response_str = response[0] + # keywords = list(get_keywords_from_json(response_str)) + + # #调用记忆系统获取相关记忆 + # related_memory = await HippocampusManager.get_instance().get_memory_from_topic( + # valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 + # ) + related_memory = await HippocampusManager.get_instance().get_memory_from_text( + text=obs_info_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=True + ) + + logger.debug(f"获取到的记忆: {related_memory}") + + # 激活时,所有已有记忆的duration+1,达到3则移除 + for m in self.running_memory[:]: + m["duration"] = m.get("duration", 1) + 1 + self.running_memory = [m for m in self.running_memory if m["duration"] < 3] + + if related_memory: + for topic, memory in related_memory: + # 检查是否已存在相同topic和content的记忆 + exists = any(m["topic"] == topic and m["content"] == memory for m in self.running_memory) + if not exists: + self.running_memory.append( + {"topic": topic, "content": memory, "timestamp": datetime.now().isoformat(), "duration": 1} + ) + logger.debug(f"添加新记忆: {topic} - {memory}") + + return self.running_memory + + +init_prompt() diff --git a/src/heart_flow/background_tasks.py b/src/chat/heart_flow/background_tasks.py similarity index 98% rename from src/heart_flow/background_tasks.py rename to src/chat/heart_flow/background_tasks.py index 5ed664e0e..d9fa1c9d3 100644 --- a/src/heart_flow/background_tasks.py +++ b/src/chat/heart_flow/background_tasks.py @@ -5,9 +5,9 @@ from typing import Optional, Coroutine, Callable, Any, List from src.common.logger_manager import get_logger # Need manager types for dependency injection -from src.heart_flow.mai_state_manager import MaiStateManager, MaiStateInfo -from src.heart_flow.subheartflow_manager import SubHeartflowManager -from src.heart_flow.interest_logger import InterestLogger +from src.chat.heart_flow.mai_state_manager import MaiStateManager, MaiStateInfo +from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager +from src.chat.heart_flow.interest_logger import InterestLogger logger = get_logger("background_tasks") diff --git a/src/heart_flow/chat_state_info.py b/src/chat/heart_flow/chat_state_info.py similarity index 86% rename from src/heart_flow/chat_state_info.py rename to src/chat/heart_flow/chat_state_info.py index bda5c26c0..972882201 100644 --- a/src/heart_flow/chat_state_info.py +++ b/src/chat/heart_flow/chat_state_info.py @@ -10,7 +10,7 @@ class ChatState(enum.Enum): class ChatStateInfo: def __init__(self): - self.chat_status: ChatState = ChatState.ABSENT + self.chat_status: ChatState = ChatState.CHAT self.current_state_time = 120 self.mood_manager = mood_manager diff --git a/src/heart_flow/heartflow.py b/src/chat/heart_flow/heartflow.py similarity index 74% rename from src/heart_flow/heartflow.py rename to src/chat/heart_flow/heartflow.py index 2cf7d365e..ad876bcf0 100644 --- a/src/heart_flow/heartflow.py +++ b/src/chat/heart_flow/heartflow.py @@ -1,16 +1,14 @@ -from src.heart_flow.sub_heartflow import SubHeartflow, ChatState -from src.plugins.models.utils_model import LLMRequest +from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState +from src.chat.models.utils_model import LLMRequest from src.config.config import global_config -from src.plugins.schedule.schedule_generator import bot_schedule from src.common.logger_manager import get_logger from typing import Any, Optional -from src.do_tool.tool_use import ToolUser -from src.plugins.person_info.relationship_manager import relationship_manager # Module instance -from src.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager -from src.heart_flow.subheartflow_manager import SubHeartflowManager -from src.heart_flow.mind import Mind -from src.heart_flow.interest_logger import InterestLogger # Import InterestLogger -from src.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager +from src.tools.tool_use import ToolUser +from src.chat.person_info.relationship_manager import relationship_manager # Module instance +from src.chat.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager +from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager +from src.chat.heart_flow.interest_logger import InterestLogger # Import InterestLogger +from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager logger = get_logger("heartflow") @@ -45,8 +43,6 @@ class Heartflow: self.tool_user_instance = ToolUser() # 工具使用模块 self.relationship_manager_instance = relationship_manager # 关系管理模块 - # 子系统初始化 - self.mind: Mind = Mind(self.subheartflow_manager, self.llm_model) # 思考管理器 self.interest_logger: InterestLogger = InterestLogger(self.subheartflow_manager, self) # 兴趣日志记录器 # 后台任务管理器 (整合所有定时任务) @@ -97,16 +93,5 @@ class Heartflow: await self.subheartflow_manager.deactivate_all_subflows() logger.info("[Heartflow] 所有任务和子心流已停止") - async def do_a_thinking(self): - """执行一次主心流思考过程""" - schedule_info = bot_schedule.get_current_num_task(num=4, time_info=True) - new_mind = await self.mind.do_a_thinking( - current_main_mind=self.current_mind, mai_state_info=self.current_state, schedule_info=schedule_info - ) - self.past_mind.append(self.current_mind) - self.current_mind = new_mind - logger.info(f"麦麦的总体脑内状态更新为:{self.current_mind[:100]}...") - self.mind.update_subflows_with_main_mind(new_mind) - heartflow = Heartflow() diff --git a/src/heart_flow/interest_chatting.py b/src/chat/heart_flow/interest_chatting.py similarity index 99% rename from src/heart_flow/interest_chatting.py rename to src/chat/heart_flow/interest_chatting.py index 4525d09d2..45f7fe952 100644 --- a/src/heart_flow/interest_chatting.py +++ b/src/chat/heart_flow/interest_chatting.py @@ -3,7 +3,7 @@ from src.config.config import global_config from typing import Optional, Dict import traceback from src.common.logger_manager import get_logger -from src.plugins.chat.message import MessageRecv +from src.chat.message_receive.message import MessageRecv import math diff --git a/src/heart_flow/interest_logger.py b/src/chat/heart_flow/interest_logger.py similarity index 96% rename from src/heart_flow/interest_logger.py rename to src/chat/heart_flow/interest_logger.py index fb33a6f63..b33f449db 100644 --- a/src/heart_flow/interest_logger.py +++ b/src/chat/heart_flow/interest_logger.py @@ -8,12 +8,12 @@ from typing import TYPE_CHECKING, Dict, List from src.common.logger_manager import get_logger # Need chat_manager to get stream names -from src.plugins.chat.chat_stream import chat_manager +from src.chat.message_receive.chat_stream import chat_manager if TYPE_CHECKING: - from src.heart_flow.subheartflow_manager import SubHeartflowManager - from src.heart_flow.sub_heartflow import SubHeartflow - from src.heart_flow.heartflow import Heartflow # 导入 Heartflow 类型 + from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager + from src.chat.heart_flow.sub_heartflow import SubHeartflow + from src.chat.heart_flow.heartflow import Heartflow # 导入 Heartflow 类型 logger = get_logger("interest") diff --git a/src/heart_flow/mai_state_manager.py b/src/chat/heart_flow/mai_state_manager.py similarity index 73% rename from src/heart_flow/mai_state_manager.py rename to src/chat/heart_flow/mai_state_manager.py index 3c6c19d66..7dea910e9 100644 --- a/src/heart_flow/mai_state_manager.py +++ b/src/chat/heart_flow/mai_state_manager.py @@ -13,8 +13,8 @@ logger = get_logger("mai_state") # The line `enable_unlimited_hfc_chat = False` is setting a configuration parameter that controls # whether a specific debugging feature is enabled or not. When `enable_unlimited_hfc_chat` is set to # `False`, it means that the debugging feature for unlimited focused chatting is disabled. -# enable_unlimited_hfc_chat = True # 调试用:无限专注聊天 -enable_unlimited_hfc_chat = False +enable_unlimited_hfc_chat = True # 调试用:无限专注聊天 +# enable_unlimited_hfc_chat = False prevent_offline_state = True # 目前默认不启用OFFLINE状态 @@ -82,7 +82,7 @@ class MaiState(enum.Enum): class MaiStateInfo: def __init__(self): - self.mai_status: MaiState = MaiState.OFFLINE + self.mai_status: MaiState = MaiState.NORMAL_CHAT # 初始状态改为 NORMAL_CHAT self.mai_status_history: List[Tuple[MaiState, float]] = [] # 历史状态,包含 状态,时间戳 self.last_status_change_time: float = time.time() # 状态最后改变时间 self.last_min_check_time: float = time.time() # 上次1分钟规则检查时间 @@ -141,24 +141,18 @@ class MaiStateManager: def check_and_decide_next_state(current_state_info: MaiStateInfo) -> Optional[MaiState]: """ 根据当前状态和规则检查是否需要转换状态,并决定下一个状态。 - - Args: - current_state_info: 当前的 MaiStateInfo 实例。 - - Returns: - Optional[MaiState]: 如果需要转换,返回目标 MaiState;否则返回 None。 """ current_time = time.time() current_status = current_state_info.mai_status time_in_current_status = current_time - current_state_info.last_status_change_time - time_since_last_min_check = current_time - current_state_info.last_min_check_time + _time_since_last_min_check = current_time - current_state_info.last_min_check_time next_state: Optional[MaiState] = None # 辅助函数:根据 prevent_offline_state 标志调整目标状态 def _resolve_offline(candidate_state: MaiState) -> MaiState: - if prevent_offline_state and candidate_state == MaiState.OFFLINE: - logger.debug("阻止进入 OFFLINE,改为 PEEKING") - return MaiState.PEEKING + # 现在不再切换到OFFLINE,直接返回当前状态 + if candidate_state == MaiState.OFFLINE: + return current_status return candidate_state if current_status == MaiState.OFFLINE: @@ -170,16 +164,16 @@ class MaiStateManager: elif current_status == MaiState.FOCUSED_CHAT: logger.info("当前在[专心看手机]思考要不要继续聊下去......") - # 1. 麦麦每分钟都有概率离线 - if time_since_last_min_check >= 60: - if current_status != MaiState.OFFLINE: - if random.random() < 0.03: # 3% 概率切换到 OFFLINE - potential_next = MaiState.OFFLINE - resolved_next = _resolve_offline(potential_next) - logger.debug(f"概率触发下线,resolve 为 {resolved_next.value}") - # 只有当解析后的状态与当前状态不同时才设置 next_state - if resolved_next != current_status: - next_state = resolved_next + # 1. 移除每分钟概率切换到OFFLINE的逻辑 + # if time_since_last_min_check >= 60: + # if current_status != MaiState.OFFLINE: + # if random.random() < 0.03: # 3% 概率切换到 OFFLINE + # potential_next = MaiState.OFFLINE + # resolved_next = _resolve_offline(potential_next) + # logger.debug(f"概率触发下线,resolve 为 {resolved_next.value}") + # # 只有当解析后的状态与当前状态不同时才设置 next_state + # if resolved_next != current_status: + # next_state = resolved_next # 2. 状态持续时间规则 (只有在规则1没有触发状态改变时才检查) if next_state is None: @@ -189,30 +183,26 @@ class MaiStateManager: rule_id = "" if current_status == MaiState.OFFLINE: - # 注意:即使 prevent_offline_state=True,也可能从初始的 OFFLINE 状态启动 - if time_in_current_status >= 60: - time_limit_exceeded = True - rule_id = "2.1 (From OFFLINE)" - weights = [30, 30, 20, 20] - choices_list = [MaiState.PEEKING, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT, MaiState.OFFLINE] + # OFFLINE 状态不再自动切换,直接返回 None + return None elif current_status == MaiState.PEEKING: if time_in_current_status >= 600: # PEEKING 最多持续 600 秒 time_limit_exceeded = True rule_id = "2.2 (From PEEKING)" - weights = [70, 20, 10] - choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT] + weights = [50, 50] + choices_list = [MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT] elif current_status == MaiState.NORMAL_CHAT: if time_in_current_status >= 300: # NORMAL_CHAT 最多持续 300 秒 time_limit_exceeded = True rule_id = "2.3 (From NORMAL_CHAT)" weights = [50, 50] - choices_list = [MaiState.OFFLINE, MaiState.FOCUSED_CHAT] + choices_list = [MaiState.PEEKING, MaiState.FOCUSED_CHAT] elif current_status == MaiState.FOCUSED_CHAT: if time_in_current_status >= 600: # FOCUSED_CHAT 最多持续 600 秒 time_limit_exceeded = True rule_id = "2.4 (From FOCUSED_CHAT)" - weights = [80, 20] - choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT] + weights = [50, 50] + choices_list = [MaiState.NORMAL_CHAT, MaiState.PEEKING] if time_limit_exceeded: next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0] @@ -232,14 +222,5 @@ class MaiStateManager: # 如果决定了下一个状态,且这个状态与当前状态不同,则返回下一个状态 if next_state is not None and next_state != current_status: return next_state - # 如果决定保持 OFFLINE (next_state == MaiState.OFFLINE) 且当前也是 OFFLINE, - # 并且是由于持续时间规则触发的,返回 OFFLINE 以便调用者可以重置计时器。 - # 注意:这个分支只有在 prevent_offline_state = False 时才可能被触发。 - elif next_state == MaiState.OFFLINE and current_status == MaiState.OFFLINE and time_in_current_status >= 60: - logger.debug("决定保持 OFFLINE (持续时间规则),返回 OFFLINE 以提示重置计时器。") - return MaiState.OFFLINE # Return OFFLINE to signal caller that timer reset might be needed else: - # 1. next_state is None (没有触发任何转换规则) - # 2. next_state is not None 但等于 current_status (例如规则1想切OFFLINE但被resolve成PEEKING,而当前已经是PEEKING) - # 3. next_state is OFFLINE, current is OFFLINE, 但不是因为时间规则触发 (例如初始状态还没到60秒) return None # 没有状态转换发生或无需重置计时器 diff --git a/src/heart_flow/observation.py b/src/chat/heart_flow/observation/chatting_observation.py similarity index 56% rename from src/heart_flow/observation.py rename to src/chat/heart_flow/observation/chatting_observation.py index 2d819a880..af45dc934 100644 --- a/src/heart_flow/observation.py +++ b/src/chat/heart_flow/observation/chatting_observation.py @@ -1,28 +1,27 @@ -# 定义了来自外部世界的信息 -# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 from datetime import datetime -from src.plugins.models.utils_model import LLMRequest +from src.chat.models.utils_model import LLMRequest from src.config.config import global_config -from src.common.logger_manager import get_logger import traceback -from src.plugins.utils.chat_message_builder import ( +from src.chat.utils.chat_message_builder import ( get_raw_msg_before_timestamp_with_chat, build_readable_messages, get_raw_msg_by_timestamp_with_chat, num_new_messages_since, get_person_id_list, ) -from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager +from src.chat.utils.prompt_builder import global_prompt_manager from typing import Optional import difflib -from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入 +from src.chat.message_receive.message import MessageRecv # 添加 MessageRecv 导入 +from src.chat.heart_flow.observation.observation import Observation +from src.common.logger_manager import get_logger +from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.utils.prompt_builder import Prompt -# Import the new utility function -from .utils_chat import get_chat_type_and_target_info -logger = get_logger("observation") +logger = get_logger(__name__) + -# --- Define Prompt Templates for Chat Summary --- Prompt( """这是qq群聊的聊天记录,请总结以下聊天记录的主题: {chat_logs} @@ -39,22 +38,10 @@ Prompt( # --- End Prompt Template Definition --- -# 所有观察的基类 -class Observation: - def __init__(self, observe_type, observe_id): - self.observe_info = "" - self.observe_type = observe_type - self.observe_id = observe_id - self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 - - async def observe(self): - pass - - # 聊天观察 class ChattingObservation(Observation): def __init__(self, chat_id): - super().__init__("chat", chat_id) + super().__init__(chat_id) self.chat_id = chat_id # --- Initialize attributes (defaults) --- @@ -74,26 +61,25 @@ class ChattingObservation(Observation): self.max_mid_memory_len = global_config.compress_length_limit self.mid_memory_info = "" self.person_list = [] + self.oldest_messages = [] + self.oldest_messages_str = "" + self.compressor_prompt = "" self.llm_summary = LLMRequest( model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" ) async def initialize(self): - # --- Use utility function to determine chat type and fetch info --- self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id) - # logger.debug(f"is_group_chat: {self.is_group_chat}") - # logger.debug(f"chat_target_info: {self.chat_target_info}") - # --- End using utility function --- - - # Fetch initial messages (existing logic) + logger.debug(f"初始化observation: self.is_group_chat: {self.is_group_chat}") + logger.debug(f"初始化observation: self.chat_target_info: {self.chat_target_info}") initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10) self.talking_message = initial_messages self.talking_message_str = await build_readable_messages(self.talking_message) # 进行一次观察 返回观察结果observe_info def get_observe_info(self, ids=None): + mid_memory_str = "" if ids: - mid_memory_str = "" for id in ids: print(f"id:{id}") try: @@ -114,7 +100,74 @@ class ChattingObservation(Observation): return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str else: - return self.talking_message_str + mid_memory_str = "之前的聊天内容:\n" + for mid_memory in self.mid_memorys: + mid_memory_str += f"{mid_memory['theme']}\n" + return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str + + def serch_message_by_text(self, text: str) -> Optional[MessageRecv]: + """ + 根据回复的纯文本 + 1. 在talking_message中查找最新的,最匹配的消息 + 2. 如果找到,则返回消息 + """ + msg_list = [] + find_msg = None + reverse_talking_message = list(reversed(self.talking_message)) + + for message in reverse_talking_message: + if message["processed_plain_text"] == text: + find_msg = message + logger.debug(f"找到的锚定消息:find_msg: {find_msg}") + break + else: + similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio() + msg_list.append({"message": message, "similarity": similarity}) + logger.debug(f"对锚定消息检查:message: {message['processed_plain_text']},similarity: {similarity}") + if not find_msg: + if msg_list: + msg_list.sort(key=lambda x: x["similarity"], reverse=True) + if msg_list[0]["similarity"] >= 0.5: # 只返回相似度大于等于0.5的消息 + find_msg = msg_list[0]["message"] + else: + logger.debug("没有找到锚定消息,相似度低") + return None + else: + logger.debug("没有找到锚定消息,没有消息捕获") + return None + + # logger.debug(f"找到的锚定消息:find_msg: {find_msg}") + group_info = find_msg.get("chat_info", {}).get("group_info") + user_info = find_msg.get("chat_info", {}).get("user_info") + + content_format = "" + accept_format = "" + template_items = {} + + format_info = {"content_format": content_format, "accept_format": accept_format} + template_info = { + "template_items": template_items, + } + + message_info = { + "platform": find_msg.get("platform"), + "message_id": find_msg.get("message_id"), + "time": find_msg.get("time"), + "group_info": group_info, + "user_info": user_info, + "additional_config": find_msg.get("additional_config"), + "format_info": format_info, + "template_info": template_info, + } + message_dict = { + "message_info": message_info, + "raw_message": find_msg.get("processed_plain_text"), + "detailed_plain_text": find_msg.get("processed_plain_text"), + "processed_plain_text": find_msg.get("processed_plain_text"), + } + find_rec_msg = MessageRecv(message_dict) + logger.debug(f"锚定消息处理后:find_rec_msg: {find_rec_msg}") + return find_rec_msg async def observe(self): # 自上一次观察的新消息 @@ -174,40 +227,10 @@ class ChattingObservation(Observation): logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}") # prompt remains None - summary = "没有主题的闲聊" # 默认值 - if prompt: # Check if prompt was built successfully - try: - summary_result, _, _ = await self.llm_summary.generate_response(prompt) - if summary_result: # 确保结果不为空 - summary = summary_result - except Exception as e: - logger.error(f"总结主题失败 for chat {self.chat_id}: {e}") - # 保留默认总结 "没有主题的闲聊" - else: - logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}") - - mid_memory = { - "id": str(int(datetime.now().timestamp())), - "theme": summary, - "messages": oldest_messages, # 存储原始消息对象 - "readable_messages": oldest_messages_str, - # "timestamps": oldest_timestamps, - "chat_id": self.chat_id, - "created_at": datetime.now().timestamp(), - } - - self.mid_memorys.append(mid_memory) - if len(self.mid_memorys) > self.max_mid_memory_len: - self.mid_memorys.pop(0) # 移除最旧的 - - mid_memory_str = "之前聊天的内容概述是:\n" - for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分 - time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) - mid_memory_str += ( - f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" - ) - self.mid_memory_info = mid_memory_str + self.compressor_prompt = prompt + self.oldest_messages = oldest_messages + self.oldest_messages_str = oldest_messages_str self.talking_message_str = await build_readable_messages( messages=self.talking_message, @@ -229,70 +252,6 @@ class ChattingObservation(Observation): f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}" ) - async def find_best_matching_message(self, search_str: str, min_similarity: float = 0.6) -> Optional[MessageRecv]: - """ - 在 talking_message 中查找与 search_str 最匹配的消息。 - - Args: - search_str: 要搜索的字符串。 - min_similarity: 要求的最低相似度(0到1之间)。 - - Returns: - 匹配的 MessageRecv 实例,如果找不到则返回 None。 - """ - best_match_score = -1.0 - best_match_dict = None - - if not self.talking_message: - logger.debug(f"Chat {self.chat_id}: talking_message is empty, cannot find match for '{search_str}'") - return None - - for message_dict in self.talking_message: - try: - # 临时创建 MessageRecv 以处理文本 - temp_msg = MessageRecv(message_dict) - await temp_msg.process() # 处理消息以获取 processed_plain_text - current_text = temp_msg.processed_plain_text - - if not current_text: # 跳过没有文本内容的消息 - continue - - # 计算相似度 - matcher = difflib.SequenceMatcher(None, search_str, current_text) - score = matcher.ratio() - - # logger.debug(f"Comparing '{search_str}' with '{current_text}', score: {score}") # 可选:用于调试 - - if score > best_match_score: - best_match_score = score - best_match_dict = message_dict - - except Exception as e: - logger.error(f"Error processing message for matching in chat {self.chat_id}: {e}", exc_info=True) - continue # 继续处理下一条消息 - - if best_match_dict is not None and best_match_score >= min_similarity: - logger.debug(f"Found best match for '{search_str}' with score {best_match_score:.2f}") - try: - final_msg = MessageRecv(best_match_dict) - await final_msg.process() - # 确保 MessageRecv 实例有关联的 chat_stream - if hasattr(self, "chat_stream"): - final_msg.update_chat_stream(self.chat_stream) - else: - logger.warning( - f"ChattingObservation instance for chat {self.chat_id} does not have a chat_stream attribute set." - ) - return final_msg - except Exception as e: - logger.error(f"Error creating final MessageRecv for chat {self.chat_id}: {e}", exc_info=True) - return None - else: - logger.debug( - f"No suitable match found for '{search_str}' in chat {self.chat_id} (best score: {best_match_score:.2f}, threshold: {min_similarity})" - ) - return None - async def has_new_messages_since(self, timestamp: float) -> bool: """检查指定时间戳之后是否有新消息""" count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp) diff --git a/src/chat/heart_flow/observation/hfcloop_observation.py b/src/chat/heart_flow/observation/hfcloop_observation.py new file mode 100644 index 000000000..f2f336710 --- /dev/null +++ b/src/chat/heart_flow/observation/hfcloop_observation.py @@ -0,0 +1,82 @@ +# 定义了来自外部世界的信息 +# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 +from datetime import datetime +from src.common.logger_manager import get_logger +from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail +from typing import List +# Import the new utility function + +logger = get_logger("observation") + + +# 所有观察的基类 +class HFCloopObservation: + def __init__(self, observe_id): + self.observe_info = "" + self.observe_id = observe_id + self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 + self.history_loop: List[CycleDetail] = [] + + def get_observe_info(self): + return self.observe_info + + def add_loop_info(self, loop_info: CycleDetail): + # logger.debug(f"添加循环信息111111111111111111111111111111111111: {loop_info}") + # print(f"添加循环信息111111111111111111111111111111111111: {loop_info}") + print(f"action_taken: {loop_info.action_taken}") + print(f"action_type: {loop_info.action_type}") + print(f"response_info: {loop_info.response_info}") + self.history_loop.append(loop_info) + + async def observe(self): + recent_active_cycles: List[CycleDetail] = [] + for cycle in reversed(self.history_loop): + # 只关心实际执行了动作的循环 + if cycle.action_taken: + recent_active_cycles.append(cycle) + # 最多找最近的3个活动循环 + if len(recent_active_cycles) == 3: + break + + cycle_info_block = "" + consecutive_text_replies = 0 + responses_for_prompt = [] + + # 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看) + for cycle in recent_active_cycles: + if cycle.action_type == "reply": + consecutive_text_replies += 1 + # 获取回复内容,如果不存在则返回'[空回复]' + response_text = cycle.response_info.get("response_text", "[空回复]") + responses_for_prompt.append(response_text) + else: + break + + # 根据连续文本回复的数量构建提示信息 + # 注意: responses_for_prompt 列表是从最近到最远排序的 + if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复 + cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' + elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复 + cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' + elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复 + cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' + + # 包装提示块,增加可读性,即使没有连续回复也给个标记 + if cycle_info_block: + cycle_info_block = f"\n你最近的回复\n{cycle_info_block}\n" + else: + # 如果最近的活动循环不是文本回复,或者没有活动循环 + cycle_info_block = "\n" + + # 获取history_loop中最新添加的 + if self.history_loop: + last_loop = self.history_loop[-1] + start_time = last_loop.start_time + end_time = last_loop.end_time + if start_time is not None and end_time is not None: + time_diff = int(end_time - start_time) + cycle_info_block += f"\n距离你上一次阅读消息已经过去了{time_diff}分钟\n" + else: + cycle_info_block += "\n无法获取上一次阅读消息的时间\n" + + self.observe_info = cycle_info_block diff --git a/src/chat/heart_flow/observation/memory_observation.py b/src/chat/heart_flow/observation/memory_observation.py new file mode 100644 index 000000000..1938a47d3 --- /dev/null +++ b/src/chat/heart_flow/observation/memory_observation.py @@ -0,0 +1,55 @@ +from src.chat.heart_flow.observation.observation import Observation +from datetime import datetime +from src.common.logger_manager import get_logger +import traceback + +# Import the new utility function +from src.chat.memory_system.Hippocampus import HippocampusManager +import jieba +from typing import List + +logger = get_logger("memory") + + +class MemoryObservation(Observation): + def __init__(self, observe_id): + super().__init__(observe_id) + self.observe_info: str = "" + self.context: str = "" + self.running_memory: List[dict] = [] + + def get_observe_info(self): + for memory in self.running_memory: + self.observe_info += f"{memory['topic']}:{memory['content']}\n" + return self.observe_info + + async def observe(self): + # ---------- 2. 获取记忆 ---------- + try: + # 从聊天内容中提取关键词 + chat_words = set(jieba.cut(self.context)) + # 过滤掉停用词和单字词 + keywords = [word for word in chat_words if len(word) > 1] + # 去重并限制数量 + keywords = list(set(keywords))[:5] + + logger.debug(f"取的关键词: {keywords}") + + # 调用记忆系统获取相关记忆 + related_memory = await HippocampusManager.get_instance().get_memory_from_topic( + valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 + ) + + logger.debug(f"获取到的记忆: {related_memory}") + + if related_memory: + for topic, memory in related_memory: + # 将记忆添加到 running_memory + self.running_memory.append( + {"topic": topic, "content": memory, "timestamp": datetime.now().isoformat()} + ) + logger.debug(f"添加新记忆: {topic} - {memory}") + + except Exception as e: + logger.error(f"观察 记忆时出错: {e}") + logger.error(traceback.format_exc()) diff --git a/src/chat/heart_flow/observation/observation.py b/src/chat/heart_flow/observation/observation.py new file mode 100644 index 000000000..97e254fc0 --- /dev/null +++ b/src/chat/heart_flow/observation/observation.py @@ -0,0 +1,17 @@ +# 定义了来自外部世界的信息 +# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 +from datetime import datetime +from src.common.logger_manager import get_logger + +logger = get_logger("observation") + + +# 所有观察的基类 +class Observation: + def __init__(self, observe_id): + self.observe_info = "" + self.observe_id = observe_id + self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 + + async def observe(self): + pass diff --git a/src/chat/heart_flow/observation/working_observation.py b/src/chat/heart_flow/observation/working_observation.py new file mode 100644 index 000000000..27b6ab92d --- /dev/null +++ b/src/chat/heart_flow/observation/working_observation.py @@ -0,0 +1,34 @@ +# 定义了来自外部世界的信息 +# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 +from datetime import datetime +from src.common.logger_manager import get_logger + +# Import the new utility function + +logger = get_logger("observation") + + +# 所有观察的基类 +class WorkingObservation: + def __init__(self, observe_id): + self.observe_info = "" + self.observe_id = observe_id + self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 + self.history_loop = [] + self.structured_info = [] + + def get_observe_info(self): + return self.structured_info + + def add_structured_info(self, structured_info: dict): + self.structured_info.append(structured_info) + + async def observe(self): + observed_structured_infos = [] + for structured_info in self.structured_info: + if structured_info.get("ttl") > 0: + structured_info["ttl"] -= 1 + observed_structured_infos.append(structured_info) + logger.debug(f"观察到结构化信息仍旧在: {structured_info}") + + self.structured_info = observed_structured_infos diff --git a/src/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py similarity index 94% rename from src/heart_flow/sub_heartflow.py rename to src/chat/heart_flow/sub_heartflow.py index 5be0d73cd..157c1c957 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -1,16 +1,16 @@ -from .observation import Observation, ChattingObservation +from .observation.observation import Observation +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation import asyncio import time from typing import Optional, List, Dict, Tuple, Callable, Coroutine import traceback from src.common.logger_manager import get_logger -from src.plugins.chat.message import MessageRecv -from src.plugins.chat.chat_stream import chat_manager -from src.plugins.heartFC_chat.heartFC_chat import HeartFChatting -from src.plugins.heartFC_chat.normal_chat import NormalChat -from src.heart_flow.mai_state_manager import MaiStateInfo -from src.heart_flow.chat_state_info import ChatState, ChatStateInfo -from src.heart_flow.sub_mind import SubMind +from src.chat.message_receive.message import MessageRecv +from src.chat.message_receive.chat_stream import chat_manager +from src.chat.focus_chat.heartFC_chat import HeartFChatting +from src.chat.normal_chat.normal_chat import NormalChat +from src.chat.heart_flow.mai_state_manager import MaiStateInfo +from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo from .utils_chat import get_chat_type_and_target_info from .interest_chatting import InterestChatting @@ -58,7 +58,7 @@ class SubHeartflow: self.should_stop = False # 停止标志 self.task: Optional[asyncio.Task] = None # 后台任务 - # 随便水群 normal_chat 和 认真水群 heartFC_chat 实例 + # 随便水群 normal_chat 和 认真水群 focus_chat 实例 # CHAT模式激活 随便水群 FOCUS模式激活 认真水群 self.heart_fc_instance: Optional[HeartFChatting] = None # 该sub_heartflow的HeartFChatting实例 self.normal_chat_instance: Optional[NormalChat] = None # 该sub_heartflow的NormalChat实例 @@ -68,11 +68,6 @@ class SubHeartflow: self.observations: List[ChattingObservation] = [] # 观察列表 # self.running_knowledges = [] # 运行中的知识,待完善 - # LLM模型配置,负责进行思考 - self.sub_mind = SubMind( - subheartflow_id=self.subheartflow_id, chat_state=self.chat_state, observations=self.observations - ) - # 日志前缀 - Moved determination to initialize self.log_prefix = str(subheartflow_id) # Initial default prefix @@ -186,7 +181,6 @@ class SubHeartflow: # 创建 HeartFChatting 实例,并传递 从构造函数传入的 回调函数 self.heart_fc_instance = HeartFChatting( chat_id=self.subheartflow_id, - sub_mind=self.sub_mind, observations=self.observations, # 传递所有观察者 on_consecutive_no_reply_callback=self.hfc_no_reply_callback, # <-- Use stored callback ) @@ -288,9 +282,6 @@ class SubHeartflow: logger.info(f"{self.log_prefix} 子心流后台任务已停止。") - def update_current_mind(self, response): - self.sub_mind.update_current_mind(response) - def add_observation(self, observation: Observation): for existing_obs in self.observations: if existing_obs.observe_id == observation.observe_id: @@ -304,9 +295,6 @@ class SubHeartflow: def get_all_observations(self) -> list[Observation]: return self.observations - def clear_observations(self): - self.observations.clear() - def _get_primary_observation(self) -> Optional[ChattingObservation]: if self.observations and isinstance(self.observations[0], ChattingObservation): return self.observations[0] @@ -332,7 +320,6 @@ class SubHeartflow: interest_state = await self.get_interest_state() return { "interest_state": interest_state, - "current_mind": self.sub_mind.current_mind, "chat_state": self.chat_state.chat_status.value, "chat_state_changed_time": self.chat_state_changed_time, } diff --git a/src/heart_flow/subheartflow_manager.py b/src/chat/heart_flow/subheartflow_manager.py similarity index 99% rename from src/heart_flow/subheartflow_manager.py rename to src/chat/heart_flow/subheartflow_manager.py index c074d29a2..a4bff8338 100644 --- a/src/heart_flow/subheartflow_manager.py +++ b/src/chat/heart_flow/subheartflow_manager.py @@ -9,15 +9,15 @@ import functools # <-- 新增导入 from src.common.logger_manager import get_logger # 导入聊天流管理模块 -from src.plugins.chat.chat_stream import chat_manager +from src.chat.message_receive.chat_stream import chat_manager # 导入心流相关类 -from src.heart_flow.sub_heartflow import SubHeartflow, ChatState -from src.heart_flow.mai_state_manager import MaiStateInfo -from .observation import ChattingObservation +from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState +from src.chat.heart_flow.mai_state_manager import MaiStateInfo +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation # 导入LLM请求工具 -from src.plugins.models.utils_model import LLMRequest +from src.chat.models.utils_model import LLMRequest from src.config.config import global_config from src.individuality.individuality import Individuality import traceback diff --git a/src/heart_flow/utils_chat.py b/src/chat/heart_flow/utils_chat.py similarity index 95% rename from src/heart_flow/utils_chat.py rename to src/chat/heart_flow/utils_chat.py index c3f81a14a..68d5cb1bc 100644 --- a/src/heart_flow/utils_chat.py +++ b/src/chat/heart_flow/utils_chat.py @@ -1,8 +1,8 @@ import asyncio from typing import Optional, Tuple, Dict from src.common.logger_manager import get_logger -from src.plugins.chat.chat_stream import chat_manager -from src.plugins.person_info.person_info import person_info_manager +from src.chat.message_receive.chat_stream import chat_manager +from src.chat.person_info.person_info import person_info_manager logger = get_logger("heartflow_utils") diff --git a/src/plugins/knowledge/LICENSE b/src/chat/knowledge/LICENSE similarity index 100% rename from src/plugins/knowledge/LICENSE rename to src/chat/knowledge/LICENSE diff --git a/src/plugins/knowledge/__init__.py b/src/chat/knowledge/__init__.py similarity index 100% rename from src/plugins/knowledge/__init__.py rename to src/chat/knowledge/__init__.py diff --git a/src/plugins/knowledge/knowledge_lib.py b/src/chat/knowledge/knowledge_lib.py similarity index 100% rename from src/plugins/knowledge/knowledge_lib.py rename to src/chat/knowledge/knowledge_lib.py diff --git a/src/plugins/knowledge/src/__init__.py b/src/chat/knowledge/src/__init__.py similarity index 100% rename from src/plugins/knowledge/src/__init__.py rename to src/chat/knowledge/src/__init__.py diff --git a/src/plugins/knowledge/src/embedding_store.py b/src/chat/knowledge/src/embedding_store.py similarity index 100% rename from src/plugins/knowledge/src/embedding_store.py rename to src/chat/knowledge/src/embedding_store.py diff --git a/src/plugins/knowledge/src/global_logger.py b/src/chat/knowledge/src/global_logger.py similarity index 100% rename from src/plugins/knowledge/src/global_logger.py rename to src/chat/knowledge/src/global_logger.py diff --git a/src/plugins/knowledge/src/ie_process.py b/src/chat/knowledge/src/ie_process.py similarity index 100% rename from src/plugins/knowledge/src/ie_process.py rename to src/chat/knowledge/src/ie_process.py diff --git a/src/plugins/knowledge/src/kg_manager.py b/src/chat/knowledge/src/kg_manager.py similarity index 100% rename from src/plugins/knowledge/src/kg_manager.py rename to src/chat/knowledge/src/kg_manager.py diff --git a/src/plugins/knowledge/src/llm_client.py b/src/chat/knowledge/src/llm_client.py similarity index 100% rename from src/plugins/knowledge/src/llm_client.py rename to src/chat/knowledge/src/llm_client.py diff --git a/src/plugins/knowledge/src/lpmmconfig.py b/src/chat/knowledge/src/lpmmconfig.py similarity index 100% rename from src/plugins/knowledge/src/lpmmconfig.py rename to src/chat/knowledge/src/lpmmconfig.py diff --git a/src/plugins/knowledge/src/mem_active_manager.py b/src/chat/knowledge/src/mem_active_manager.py similarity index 100% rename from src/plugins/knowledge/src/mem_active_manager.py rename to src/chat/knowledge/src/mem_active_manager.py diff --git a/src/plugins/knowledge/src/open_ie.py b/src/chat/knowledge/src/open_ie.py similarity index 100% rename from src/plugins/knowledge/src/open_ie.py rename to src/chat/knowledge/src/open_ie.py diff --git a/src/plugins/knowledge/src/prompt_template.py b/src/chat/knowledge/src/prompt_template.py similarity index 100% rename from src/plugins/knowledge/src/prompt_template.py rename to src/chat/knowledge/src/prompt_template.py diff --git a/src/plugins/knowledge/src/qa_manager.py b/src/chat/knowledge/src/qa_manager.py similarity index 100% rename from src/plugins/knowledge/src/qa_manager.py rename to src/chat/knowledge/src/qa_manager.py diff --git a/src/plugins/knowledge/src/raw_processing.py b/src/chat/knowledge/src/raw_processing.py similarity index 100% rename from src/plugins/knowledge/src/raw_processing.py rename to src/chat/knowledge/src/raw_processing.py diff --git a/src/plugins/knowledge/src/utils/__init__.py b/src/chat/knowledge/src/utils/__init__.py similarity index 100% rename from src/plugins/knowledge/src/utils/__init__.py rename to src/chat/knowledge/src/utils/__init__.py diff --git a/src/plugins/knowledge/src/utils/dyn_topk.py b/src/chat/knowledge/src/utils/dyn_topk.py similarity index 100% rename from src/plugins/knowledge/src/utils/dyn_topk.py rename to src/chat/knowledge/src/utils/dyn_topk.py diff --git a/src/plugins/knowledge/src/utils/hash.py b/src/chat/knowledge/src/utils/hash.py similarity index 100% rename from src/plugins/knowledge/src/utils/hash.py rename to src/chat/knowledge/src/utils/hash.py diff --git a/src/plugins/knowledge/src/utils/json_fix.py b/src/chat/knowledge/src/utils/json_fix.py similarity index 100% rename from src/plugins/knowledge/src/utils/json_fix.py rename to src/chat/knowledge/src/utils/json_fix.py diff --git a/src/plugins/knowledge/src/utils/visualize_graph.py b/src/chat/knowledge/src/utils/visualize_graph.py similarity index 100% rename from src/plugins/knowledge/src/utils/visualize_graph.py rename to src/chat/knowledge/src/utils/visualize_graph.py diff --git a/src/plugins/memory_system/Hippocampus.py b/src/chat/memory_system/Hippocampus.py similarity index 99% rename from src/plugins/memory_system/Hippocampus.py rename to src/chat/memory_system/Hippocampus.py index 24d320f78..70eb679c9 100644 --- a/src/plugins/memory_system/Hippocampus.py +++ b/src/chat/memory_system/Hippocampus.py @@ -11,14 +11,14 @@ import networkx as nx import numpy as np from collections import Counter from ...common.database import db -from ...plugins.models.utils_model import LLMRequest +from ...chat.models.utils_model import LLMRequest from src.common.logger_manager import get_logger -from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器 +from src.chat.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器 from ..utils.chat_message_builder import ( get_raw_msg_by_timestamp, build_readable_messages, ) # 导入 build_readable_messages -from ..chat.utils import translate_timestamp_to_human_readable +from ..utils.utils import translate_timestamp_to_human_readable from .memory_config import MemoryConfig from rich.traceback import install @@ -499,7 +499,7 @@ class Hippocampus: for topic, memory_items, _ in unique_memories: memory = memory_items[0] # 因为每个topic只有一条记忆 result.append((topic, memory)) - logger.info(f"选中记忆: {memory} (来自节点: {topic})") + logger.debug(f"选中记忆: {memory} (来自节点: {topic})") return result @@ -665,7 +665,7 @@ class Hippocampus: for topic, memory_items, _ in unique_memories: memory = memory_items[0] # 因为每个topic只有一条记忆 result.append((topic, memory)) - logger.info(f"选中记忆: {memory} (来自节点: {topic})") + logger.debug(f"选中记忆: {memory} (来自节点: {topic})") return result diff --git a/src/plugins/memory_system/debug_memory.py b/src/chat/memory_system/debug_memory.py similarity index 96% rename from src/plugins/memory_system/debug_memory.py rename to src/chat/memory_system/debug_memory.py index 8f79c6a8e..baf745409 100644 --- a/src/plugins/memory_system/debug_memory.py +++ b/src/chat/memory_system/debug_memory.py @@ -6,7 +6,7 @@ import os # 添加项目根目录到系统路径 sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))) -from src.plugins.memory_system.Hippocampus import HippocampusManager +from src.chat.memory_system.Hippocampus import HippocampusManager from src.config.config import global_config from rich.traceback import install diff --git a/src/plugins/memory_system/manually_alter_memory.py b/src/chat/memory_system/manually_alter_memory.py similarity index 100% rename from src/plugins/memory_system/manually_alter_memory.py rename to src/chat/memory_system/manually_alter_memory.py diff --git a/src/plugins/memory_system/memory_config.py b/src/chat/memory_system/memory_config.py similarity index 100% rename from src/plugins/memory_system/memory_config.py rename to src/chat/memory_system/memory_config.py diff --git a/src/plugins/memory_system/offline_llm.py b/src/chat/memory_system/offline_llm.py similarity index 100% rename from src/plugins/memory_system/offline_llm.py rename to src/chat/memory_system/offline_llm.py diff --git a/src/plugins/memory_system/sample_distribution.py b/src/chat/memory_system/sample_distribution.py similarity index 100% rename from src/plugins/memory_system/sample_distribution.py rename to src/chat/memory_system/sample_distribution.py diff --git a/src/plugins/chat/__init__.py b/src/chat/message_receive/__init__.py similarity index 88% rename from src/plugins/chat/__init__.py rename to src/chat/message_receive/__init__.py index e5b0b942b..39a1f2637 100644 --- a/src/plugins/chat/__init__.py +++ b/src/chat/message_receive/__init__.py @@ -2,7 +2,7 @@ from ..emoji_system.emoji_manager import emoji_manager from ..person_info.relationship_manager import relationship_manager from .chat_stream import chat_manager from .message_sender import message_manager -from ..storage.storage import MessageStorage +from .storage import MessageStorage __all__ = [ diff --git a/src/plugins/chat/bot.py b/src/chat/message_receive/bot.py similarity index 93% rename from src/plugins/chat/bot.py rename to src/chat/message_receive/bot.py index 79e97c4f3..3c9e4420c 100644 --- a/src/plugins/chat/bot.py +++ b/src/chat/message_receive/bot.py @@ -3,13 +3,13 @@ from typing import Dict, Any from src.common.logger_manager import get_logger from src.manager.mood_manager import mood_manager # 导入情绪管理器 -from .chat_stream import chat_manager -from .message import MessageRecv -from .only_message_process import MessageProcessor -from ..PFC.pfc_manager import PFCManager -from ..heartFC_chat.heartflow_processor import HeartFCProcessor -from ..utils.prompt_builder import Prompt, global_prompt_manager -from ...config.config import global_config +from src.chat.message_receive.chat_stream import chat_manager +from src.chat.message_receive.message import MessageRecv +from src.experimental.only_message_process import MessageProcessor +from src.experimental.PFC.pfc_manager import PFCManager +from src.chat.focus_chat.heartflow_processor import HeartFCProcessor +from src.chat.utils.prompt_builder import Prompt, global_prompt_manager +from src.config.config import global_config # 定义日志配置 diff --git a/src/plugins/chat/chat_stream.py b/src/chat/message_receive/chat_stream.py similarity index 100% rename from src/plugins/chat/chat_stream.py rename to src/chat/message_receive/chat_stream.py diff --git a/src/plugins/chat/message.py b/src/chat/message_receive/message.py similarity index 99% rename from src/plugins/chat/message.py rename to src/chat/message_receive/message.py index b9c152889..a42a11a82 100644 --- a/src/plugins/chat/message.py +++ b/src/chat/message_receive/message.py @@ -7,7 +7,7 @@ import urllib3 from src.common.logger_manager import get_logger from .chat_stream import ChatStream -from .utils_image import image_manager +from ..utils.utils_image import image_manager from maim_message import Seg, UserInfo, BaseMessageInfo, MessageBase from rich.traceback import install @@ -100,6 +100,7 @@ class MessageRecv(Message): Args: message_dict: MessageCQ序列化后的字典 """ + # print(f"message_dict: {message_dict}") self.message_info = BaseMessageInfo.from_dict(message_dict.get("message_info", {})) self.message_segment = Seg.from_dict(message_dict.get("message_segment", {})) diff --git a/src/plugins/chat/message_buffer.py b/src/chat/message_receive/message_buffer.py similarity index 100% rename from src/plugins/chat/message_buffer.py rename to src/chat/message_receive/message_buffer.py diff --git a/src/plugins/chat/message_sender.py b/src/chat/message_receive/message_sender.py similarity index 98% rename from src/plugins/chat/message_sender.py rename to src/chat/message_receive/message_sender.py index 104a5ea49..5db34fdea 100644 --- a/src/plugins/chat/message_sender.py +++ b/src/chat/message_receive/message_sender.py @@ -3,14 +3,14 @@ import asyncio import time from asyncio import Task from typing import Union -from src.plugins.message.api import global_api +from src.common.message.api import global_api # from ...common.database import db # 数据库依赖似乎不需要了,注释掉 from .message import MessageSending, MessageThinking, MessageSet -from ..storage.storage import MessageStorage +from .storage import MessageStorage from ...config.config import global_config -from .utils import truncate_message, calculate_typing_time, count_messages_between +from ..utils.utils import truncate_message, calculate_typing_time, count_messages_between from src.common.logger_manager import get_logger from rich.traceback import install @@ -212,7 +212,7 @@ class MessageManager: _ = message.update_thinking_time() # 更新思考时间 thinking_start_time = message.thinking_start_time now_time = time.time() - logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}") + # logger.debug(f"thinking_start_time:{thinking_start_time},now_time:{now_time}") thinking_messages_count, thinking_messages_length = count_messages_between( start_time=thinking_start_time, end_time=now_time, stream_id=message.chat_stream.stream_id ) @@ -236,7 +236,7 @@ class MessageManager: await message.process() # 预处理消息内容 - logger.debug(f"{message}") + # logger.debug(f"{message}") # 使用全局 message_sender 实例 await send_message(message) diff --git a/src/plugins/storage/storage.py b/src/chat/message_receive/storage.py similarity index 96% rename from src/plugins/storage/storage.py rename to src/chat/message_receive/storage.py index 34864d2cf..cae029a11 100644 --- a/src/plugins/storage/storage.py +++ b/src/chat/message_receive/storage.py @@ -2,8 +2,8 @@ import re from typing import Union from ...common.database import db -from ..chat.message import MessageSending, MessageRecv -from ..chat.chat_stream import ChatStream +from .message import MessageSending, MessageRecv +from .chat_stream import ChatStream from src.common.logger import get_module_logger logger = get_module_logger("message_storage") diff --git a/src/plugins/models/utils_model.py b/src/chat/models/utils_model.py similarity index 99% rename from src/plugins/models/utils_model.py rename to src/chat/models/utils_model.py index 8ee219562..18c9c7374 100644 --- a/src/plugins/models/utils_model.py +++ b/src/chat/models/utils_model.py @@ -157,7 +157,7 @@ class LLMRequest: completion_tokens: 输出token数 total_tokens: 总token数 user_id: 用户ID,默认为system - request_type: 请求类型(chat/embedding/image/topic/schedule) + request_type: 请求类型 endpoint: API端点 """ # 如果 request_type 为 None,则使用实例变量中的值 diff --git a/src/plugins/heartFC_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py similarity index 96% rename from src/plugins/heartFC_chat/normal_chat.py rename to src/chat/normal_chat/normal_chat.py index 460e881a0..9dc2454ff 100644 --- a/src/plugins/heartFC_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -8,19 +8,19 @@ from typing import List, Optional # 导入 Optional from maim_message import UserInfo, Seg from src.common.logger_manager import get_logger -from src.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from src.manager.mood_manager import mood_manager -from src.plugins.chat.chat_stream import ChatStream, chat_manager -from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager -from src.plugins.utils.timer_calculator import Timer +from src.chat.message_receive.chat_stream import ChatStream, chat_manager +from src.chat.person_info.relationship_manager import relationship_manager +from src.chat.utils.info_catcher import info_catcher_manager +from src.chat.utils.timer_calculator import Timer from .normal_chat_generator import NormalChatGenerator -from ..chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet -from ..chat.message_sender import message_manager -from ..chat.utils_image import image_path_to_base64 -from ..emoji_system.emoji_manager import emoji_manager -from ..willing.willing_manager import willing_manager -from ...config.config import global_config +from ..message_receive.message import MessageSending, MessageRecv, MessageThinking, MessageSet +from src.chat.message_receive.message_sender import message_manager +from src.chat.utils.utils_image import image_path_to_base64 +from src.chat.emoji_system.emoji_manager import emoji_manager +from src.chat.normal_chat.willing.willing_manager import willing_manager +from src.config.config import global_config logger = get_logger("chat") @@ -353,7 +353,8 @@ class NormalChat: async def _process_initial_interest_messages(self): """处理启动时存在于 interest_dict 中的高兴趣消息。""" if not self.interest_dict: - return # 如果 interest_dict 为 None或空,直接返回 + return # 如果 interest_dict 为 None 或空,直接返回 + items_to_process = list(self.interest_dict.items()) if not items_to_process: return # 没有初始消息,直接返回 diff --git a/src/plugins/heartFC_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py similarity index 95% rename from src/plugins/heartFC_chat/normal_chat_generator.py rename to src/chat/normal_chat/normal_chat_generator.py index ea698bf2c..aec65ed1d 100644 --- a/src/plugins/heartFC_chat/normal_chat_generator.py +++ b/src/chat/normal_chat/normal_chat_generator.py @@ -2,12 +2,12 @@ from typing import List, Optional, Tuple, Union import random from ..models.utils_model import LLMRequest from ...config.config import global_config -from ..chat.message import MessageThinking -from .heartflow_prompt_builder import prompt_builder -from ..chat.utils import process_llm_response -from ..utils.timer_calculator import Timer +from ..message_receive.message import MessageThinking +from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder +from src.chat.utils.utils import process_llm_response +from src.chat.utils.timer_calculator import Timer from src.common.logger_manager import get_logger -from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager +from src.chat.utils.info_catcher import info_catcher_manager logger = get_logger("llm") diff --git a/src/plugins/willing/mode_classical.py b/src/chat/normal_chat/willing/mode_classical.py similarity index 100% rename from src/plugins/willing/mode_classical.py rename to src/chat/normal_chat/willing/mode_classical.py diff --git a/src/plugins/willing/mode_custom.py b/src/chat/normal_chat/willing/mode_custom.py similarity index 100% rename from src/plugins/willing/mode_custom.py rename to src/chat/normal_chat/willing/mode_custom.py diff --git a/src/plugins/willing/mode_mxp.py b/src/chat/normal_chat/willing/mode_mxp.py similarity index 100% rename from src/plugins/willing/mode_mxp.py rename to src/chat/normal_chat/willing/mode_mxp.py diff --git a/src/plugins/willing/willing_manager.py b/src/chat/normal_chat/willing/willing_manager.py similarity index 96% rename from src/plugins/willing/willing_manager.py rename to src/chat/normal_chat/willing/willing_manager.py index ba1e3e097..37e623d11 100644 --- a/src/plugins/willing/willing_manager.py +++ b/src/chat/normal_chat/willing/willing_manager.py @@ -1,9 +1,9 @@ from src.common.logger import LogConfig, WILLING_STYLE_CONFIG, LoguruLogger, get_module_logger from dataclasses import dataclass -from ...config.config import global_config, BotConfig -from ..chat.chat_stream import ChatStream, GroupInfo -from ..chat.message import MessageRecv -from ..person_info.person_info import person_info_manager, PersonInfoManager +from src.config.config import global_config, BotConfig +from src.chat.message_receive.chat_stream import ChatStream, GroupInfo +from src.chat.message_receive.message import MessageRecv +from src.chat.person_info.person_info import person_info_manager, PersonInfoManager from abc import ABC, abstractmethod import importlib from typing import Dict, Optional diff --git a/src/plugins/person_info/person_info.py b/src/chat/person_info/person_info.py similarity index 99% rename from src/plugins/person_info/person_info.py rename to src/chat/person_info/person_info.py index fe4069024..605b86b23 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/chat/person_info/person_info.py @@ -6,7 +6,7 @@ from typing import Any, Callable, Dict import datetime import asyncio import numpy as np -from src.plugins.models.utils_model import LLMRequest +from src.chat.models.utils_model import LLMRequest from src.config.config import global_config from src.individuality.individuality import Individuality diff --git a/src/plugins/person_info/relationship_manager.py b/src/chat/person_info/relationship_manager.py similarity index 95% rename from src/plugins/person_info/relationship_manager.py rename to src/chat/person_info/relationship_manager.py index e9dad4b74..c8a443857 100644 --- a/src/plugins/person_info/relationship_manager.py +++ b/src/chat/person_info/relationship_manager.py @@ -1,5 +1,5 @@ from src.common.logger_manager import get_logger -from ..chat.chat_stream import ChatStream +from ..message_receive.chat_stream import ChatStream import math from bson.decimal128 import Decimal128 from .person_info import person_info_manager @@ -94,13 +94,23 @@ class RelationshipManager: return False @staticmethod - async def first_knowing_some_one(platform, user_id, user_nickname, user_cardname, user_avatar): + async def first_knowing_some_one( + platform: str, user_id: str, user_nickname: str, user_cardname: str, user_avatar: str + ): """判断是否认识某人""" person_id = person_info_manager.get_person_id(platform, user_id) - await person_info_manager.update_one_field(person_id, "nickname", user_nickname) - # await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname) - # await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar) - await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar) + data = { + "platform": platform, + "user_id": user_id, + "nickname": user_nickname, + "konw_time": int(time.time()), + } + await person_info_manager.update_one_field( + person_id=person_id, field_name="nickname", value=user_nickname, data=data + ) + await person_info_manager.qv_person_name( + person_id=person_id, user_nickname=user_nickname, user_cardname=user_cardname, user_avatar=user_avatar + ) async def calculate_update_relationship_value(self, user_info: UserInfo, platform: str, label: str, stance: str): """计算并变更关系值 diff --git a/src/plugins/utils/chat_message_builder.py b/src/chat/utils/chat_message_builder.py similarity index 86% rename from src/plugins/utils/chat_message_builder.py rename to src/chat/utils/chat_message_builder.py index f30403e39..dcd98e19d 100644 --- a/src/plugins/utils/chat_message_builder.py +++ b/src/chat/utils/chat_message_builder.py @@ -1,22 +1,11 @@ from src.config.config import global_config - -# 不再直接使用 db -# from src.common.database import db -# 移除 logger 和 traceback,因为错误处理移至 repository -# from src.common.logger import get_module_logger -# import traceback from typing import List, Dict, Any, Tuple # 确保类型提示被导入 import time # 导入 time 模块以获取当前时间 - -# 导入新的 repository 函数 +import random +import re from src.common.message_repository import find_messages, count_messages - -# 导入 PersonInfoManager 和时间转换工具 -from src.plugins.person_info.person_info import person_info_manager -from src.plugins.chat.utils import translate_timestamp_to_human_readable - -# 不再需要文件级别的 logger -# logger = get_module_logger(__name__) +from src.chat.person_info.person_info import person_info_manager +from src.chat.utils.utils import translate_timestamp_to_human_readable def get_raw_msg_by_timestamp( @@ -69,6 +58,23 @@ def get_raw_msg_by_timestamp_with_chat_users( return find_messages(message_filter=filter_query, sort=sort_order, limit=limit, limit_mode=limit_mode) +def get_raw_msg_by_timestamp_random( + timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest" +) -> List[Dict[str, Any]]: + """ + 先在范围时间戳内随机选择一条消息,取得消息的chat_id,然后根据chat_id获取该聊天在指定时间戳范围内的消息 + """ + # 获取所有消息,只取chat_id字段 + all_msgs = get_raw_msg_by_timestamp(timestamp_start, timestamp_end) + if not all_msgs: + return [] + # 随机选一条 + msg = random.choice(all_msgs) + chat_id = msg["chat_id"] + # 用 chat_id 获取该聊天在指定时间戳范围内的消息 + return get_raw_msg_by_timestamp_with_chat(chat_id, timestamp_start, timestamp_end, limit, limit_mode) + + def get_raw_msg_by_timestamp_with_users( timestamp_start: float, timestamp_end: float, person_ids: list, limit: int = 0, limit_mode: str = "latest" ) -> List[Dict[str, Any]]: @@ -197,7 +203,45 @@ async def _build_readable_messages_internal( else: person_name = "某人" - message_details_raw.append((timestamp, person_name, content)) + # 检查是否有 回复 字段 + reply_pattern = r"回复<([^:<>]+):([^:<>]+)>" + match = re.search(reply_pattern, content) + if match: + aaa = match.group(1) + bbb = match.group(2) + reply_person_id = person_info_manager.get_person_id(platform, bbb) + reply_person_name = await person_info_manager.get_value(reply_person_id, "person_name") + if not reply_person_name: + reply_person_name = aaa + # 在内容前加上回复信息 + content = re.sub(reply_pattern, f"回复 {reply_person_name}", content, count=1) + + # 检查是否有 @ 字段 @<{member_info.get('nickname')}:{member_info.get('user_id')}> + at_pattern = r"@<([^:<>]+):([^:<>]+)>" + at_matches = list(re.finditer(at_pattern, content)) + if at_matches: + new_content = "" + last_end = 0 + for m in at_matches: + new_content += content[last_end : m.start()] + aaa = m.group(1) + bbb = m.group(2) + at_person_id = person_info_manager.get_person_id(platform, bbb) + at_person_name = await person_info_manager.get_value(at_person_id, "person_name") + if not at_person_name: + at_person_name = aaa + new_content += f"@{at_person_name}" + last_end = m.end() + new_content += content[last_end:] + content = new_content + + target_str = "这是QQ的一个功能,用于提及某人,但没那么明显" + if target_str in content: + if random.random() < 0.6: + content = content.replace(target_str, "") + + if content != "": + message_details_raw.append((timestamp, person_name, content)) if not message_details_raw: return "", [] diff --git a/src/plugins/respon_info_catcher/info_catcher.py b/src/chat/utils/info_catcher.py similarity index 99% rename from src/plugins/respon_info_catcher/info_catcher.py rename to src/chat/utils/info_catcher.py index 32add8427..174bb5b49 100644 --- a/src/plugins/respon_info_catcher/info_catcher.py +++ b/src/chat/utils/info_catcher.py @@ -1,5 +1,5 @@ from src.config.config import global_config -from src.plugins.chat.message import MessageRecv, MessageSending, Message +from src.chat.message_receive.message import MessageRecv, MessageSending, Message from src.common.database import db import time import traceback diff --git a/src/plugins/utils/json_utils.py b/src/chat/utils/json_utils.py similarity index 100% rename from src/plugins/utils/json_utils.py rename to src/chat/utils/json_utils.py diff --git a/src/plugins/utils/logger_config.py b/src/chat/utils/logger_config.py similarity index 100% rename from src/plugins/utils/logger_config.py rename to src/chat/utils/logger_config.py diff --git a/src/plugins/utils/prompt_builder.py b/src/chat/utils/prompt_builder.py similarity index 100% rename from src/plugins/utils/prompt_builder.py rename to src/chat/utils/prompt_builder.py diff --git a/src/plugins/utils/statistic.py b/src/chat/utils/statistic.py similarity index 93% rename from src/plugins/utils/statistic.py rename to src/chat/utils/statistic.py index 6a0b95964..970746138 100644 --- a/src/plugins/utils/statistic.py +++ b/src/chat/utils/statistic.py @@ -512,46 +512,54 @@ class StatisticOutputTask(AsyncTask): # format总在线时间 # 按模型分类统计 - model_rows = "\n".join([ - f"" - f"{model_name}" - f"{count}" - f"{stat_data[IN_TOK_BY_MODEL][model_name]}" - f"{stat_data[OUT_TOK_BY_MODEL][model_name]}" - f"{stat_data[TOTAL_TOK_BY_MODEL][model_name]}" - f"{stat_data[COST_BY_MODEL][model_name]:.4f} ¥" - f"" - for model_name, count in sorted(stat_data[REQ_CNT_BY_MODEL].items()) - ]) + model_rows = "\n".join( + [ + f"" + f"{model_name}" + f"{count}" + f"{stat_data[IN_TOK_BY_MODEL][model_name]}" + f"{stat_data[OUT_TOK_BY_MODEL][model_name]}" + f"{stat_data[TOTAL_TOK_BY_MODEL][model_name]}" + f"{stat_data[COST_BY_MODEL][model_name]:.4f} ¥" + f"" + for model_name, count in sorted(stat_data[REQ_CNT_BY_MODEL].items()) + ] + ) # 按请求类型分类统计 - type_rows = "\n".join([ - f"" - f"{req_type}" - f"{count}" - f"{stat_data[IN_TOK_BY_TYPE][req_type]}" - f"{stat_data[OUT_TOK_BY_TYPE][req_type]}" - f"{stat_data[TOTAL_TOK_BY_TYPE][req_type]}" - f"{stat_data[COST_BY_TYPE][req_type]:.4f} ¥" - f"" - for req_type, count in sorted(stat_data[REQ_CNT_BY_TYPE].items()) - ]) + type_rows = "\n".join( + [ + f"" + f"{req_type}" + f"{count}" + f"{stat_data[IN_TOK_BY_TYPE][req_type]}" + f"{stat_data[OUT_TOK_BY_TYPE][req_type]}" + f"{stat_data[TOTAL_TOK_BY_TYPE][req_type]}" + f"{stat_data[COST_BY_TYPE][req_type]:.4f} ¥" + f"" + for req_type, count in sorted(stat_data[REQ_CNT_BY_TYPE].items()) + ] + ) # 按用户分类统计 - user_rows = "\n".join([ - f"" - f"{user_id}" - f"{count}" - f"{stat_data[IN_TOK_BY_USER][user_id]}" - f"{stat_data[OUT_TOK_BY_USER][user_id]}" - f"{stat_data[TOTAL_TOK_BY_USER][user_id]}" - f"{stat_data[COST_BY_USER][user_id]:.4f} ¥" - f"" - for user_id, count in sorted(stat_data[REQ_CNT_BY_USER].items()) - ]) + user_rows = "\n".join( + [ + f"" + f"{user_id}" + f"{count}" + f"{stat_data[IN_TOK_BY_USER][user_id]}" + f"{stat_data[OUT_TOK_BY_USER][user_id]}" + f"{stat_data[TOTAL_TOK_BY_USER][user_id]}" + f"{stat_data[COST_BY_USER][user_id]:.4f} ¥" + f"" + for user_id, count in sorted(stat_data[REQ_CNT_BY_USER].items()) + ] + ) # 聊天消息统计 - chat_rows = "\n".join([ - f"{self.name_mapping[chat_id][0]}{count}" - for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items()) - ]) + chat_rows = "\n".join( + [ + f"{self.name_mapping[chat_id][0]}{count}" + for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items()) + ] + ) # 生成HTML return f"""
diff --git a/src/plugins/utils/timer_calculator.py b/src/chat/utils/timer_calculator.py similarity index 100% rename from src/plugins/utils/timer_calculator.py rename to src/chat/utils/timer_calculator.py diff --git a/src/plugins/utils/typo_generator.py b/src/chat/utils/typo_generator.py similarity index 100% rename from src/plugins/utils/typo_generator.py rename to src/chat/utils/typo_generator.py diff --git a/src/plugins/chat/utils.py b/src/chat/utils/utils.py similarity index 99% rename from src/plugins/chat/utils.py rename to src/chat/utils/utils.py index c229f0a59..8fe8334b8 100644 --- a/src/plugins/chat/utils.py +++ b/src/chat/utils/utils.py @@ -10,9 +10,9 @@ from pymongo.errors import PyMongoError from src.common.logger import get_module_logger from src.manager.mood_manager import mood_manager -from .message import MessageRecv +from ..message_receive.message import MessageRecv from ..models.utils_model import LLMRequest -from ..utils.typo_generator import ChineseTypoGenerator +from .typo_generator import ChineseTypoGenerator from ...common.database import db from ...config.config import global_config diff --git a/src/plugins/chat/utils_image.py b/src/chat/utils/utils_image.py similarity index 98% rename from src/plugins/chat/utils_image.py rename to src/chat/utils/utils_image.py index 5508ad233..455038246 100644 --- a/src/plugins/chat/utils_image.py +++ b/src/chat/utils/utils_image.py @@ -117,7 +117,7 @@ class ImageManager: cached_description = self._get_description_from_db(image_hash, "emoji") if cached_description: # logger.debug(f"缓存表情包描述: {cached_description}") - return f"[表达了:{cached_description}]" + return f"[表情包,含义看起来是:{cached_description}]" # 调用AI获取描述 if image_format == "gif" or image_format == "GIF": @@ -131,7 +131,7 @@ class ImageManager: cached_description = self._get_description_from_db(image_hash, "emoji") if cached_description: logger.warning(f"虽然生成了描述,但是找到缓存表情包描述: {cached_description}") - return f"[表达了:{cached_description}]" + return f"[表情包,含义看起来是:{cached_description}]" # 根据配置决定是否保存图片 if global_config.save_emoji: diff --git a/src/plugins/zhishi/knowledge_library.py b/src/chat/zhishi/knowledge_library.py similarity index 100% rename from src/plugins/zhishi/knowledge_library.py rename to src/chat/zhishi/knowledge_library.py diff --git a/src/common/logger.py b/src/common/logger.py index f7d6fb28e..490bf7c53 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -8,7 +8,7 @@ from dotenv import load_dotenv # 加载 .env 文件 -env_path = Path(__file__).resolve().parent.parent.parent / ".env" +env_path = Path(os.getcwd()) / ".env" load_dotenv(dotenv_path=env_path) # 保存原生处理器ID @@ -29,8 +29,7 @@ _handler_registry: dict[str, List[int]] = {} _custom_style_handlers: dict[Tuple[str, str], List[int]] = {} # 记录自定义样式处理器ID # 获取日志存储根地址 -current_file_path = Path(__file__).resolve() -ROOT_PATH = os.path.abspath(os.path.join(current_file_path, "..", "..")) +ROOT_PATH = os.getcwd() LOG_ROOT = str(ROOT_PATH) + "/" + "logs" SIMPLE_OUTPUT = os.getenv("SIMPLE_OUTPUT", "false").strip().lower() diff --git a/src/common/logger_manager.py b/src/common/logger_manager.py index 8aae71e10..e1cbcbacc 100644 --- a/src/common/logger_manager.py +++ b/src/common/logger_manager.py @@ -9,7 +9,6 @@ from src.common.logger import ( RELATION_STYLE_CONFIG, CONFIG_STYLE_CONFIG, HEARTFLOW_STYLE_CONFIG, - SCHEDULE_STYLE_CONFIG, LLM_STYLE_CONFIG, CHAT_STYLE_CONFIG, EMOJI_STYLE_CONFIG, @@ -56,7 +55,6 @@ MODULE_LOGGER_CONFIGS = { "relation": RELATION_STYLE_CONFIG, # 关系 "config": CONFIG_STYLE_CONFIG, # 配置 "heartflow": HEARTFLOW_STYLE_CONFIG, # 麦麦大脑袋 - "schedule": SCHEDULE_STYLE_CONFIG, # 在干嘛 "llm": LLM_STYLE_CONFIG, # 麦麦组织语言 "chat": CHAT_STYLE_CONFIG, # 见闻 "emoji": EMOJI_STYLE_CONFIG, # 表情包 diff --git a/src/plugins/message/__init__.py b/src/common/message/__init__.py similarity index 100% rename from src/plugins/message/__init__.py rename to src/common/message/__init__.py diff --git a/src/plugins/message/api.py b/src/common/message/api.py similarity index 100% rename from src/plugins/message/api.py rename to src/common/message/api.py diff --git a/src/plugins/remote/remote.py b/src/common/remote.py similarity index 100% rename from src/plugins/remote/remote.py rename to src/common/remote.py diff --git a/src/config/config.py b/src/config/config.py index 5c2bdcc2a..b186f3b83 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -2,7 +2,6 @@ import os import re from dataclasses import dataclass, field from typing import Dict, List, Optional -from dateutil import tz import tomli import tomlkit @@ -23,9 +22,9 @@ install(extra_lines=3) logger = get_logger("config") # 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 -is_test = False -mai_version_main = "0.6.3" -mai_version_fix = "fix-3" +is_test = True +mai_version_main = "0.6.4" +mai_version_fix = "snapshot-1" if mai_version_fix: if is_test: @@ -153,6 +152,7 @@ class BotConfig: "用一句话或几句话描述人格的一些侧面", ] ) + expression_style = "描述麦麦说话的表达风格,表达习惯" # identity identity_detail: List[str] = field( default_factory=lambda: [ @@ -166,13 +166,6 @@ class BotConfig: gender: str = "男" # 性别 appearance: str = "用几句话描述外貌特征" # 外貌特征 - # schedule - ENABLE_SCHEDULE_GEN: bool = False # 是否启用日程生成 - PROMPT_SCHEDULE_GEN = "无日程" - SCHEDULE_DOING_UPDATE_INTERVAL: int = 300 # 日程表更新间隔 单位秒 - SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度,建议0.5-1.0 - TIME_ZONE: str = "Asia/Shanghai" # 时区 - # chat allow_focus_mode: bool = True # 是否允许专注聊天状态 @@ -360,6 +353,8 @@ class BotConfig: if config.INNER_VERSION in SpecifierSet(">=1.2.4"): config.personality_core = personality_config.get("personality_core", config.personality_core) config.personality_sides = personality_config.get("personality_sides", config.personality_sides) + if config.INNER_VERSION in SpecifierSet(">=1.7.0"): + config.expression_style = personality_config.get("expression_style", config.expression_style) def identity(parent: dict): identity_config = parent["identity"] @@ -371,24 +366,6 @@ class BotConfig: config.gender = identity_config.get("gender", config.gender) config.appearance = identity_config.get("appearance", config.appearance) - def schedule(parent: dict): - schedule_config = parent["schedule"] - config.ENABLE_SCHEDULE_GEN = schedule_config.get("enable_schedule_gen", config.ENABLE_SCHEDULE_GEN) - config.PROMPT_SCHEDULE_GEN = schedule_config.get("prompt_schedule_gen", config.PROMPT_SCHEDULE_GEN) - config.SCHEDULE_DOING_UPDATE_INTERVAL = schedule_config.get( - "schedule_doing_update_interval", config.SCHEDULE_DOING_UPDATE_INTERVAL - ) - logger.info( - f"载入自定义日程prompt:{schedule_config.get('prompt_schedule_gen', config.PROMPT_SCHEDULE_GEN)}" - ) - if config.INNER_VERSION in SpecifierSet(">=1.0.2"): - config.SCHEDULE_TEMPERATURE = schedule_config.get("schedule_temperature", config.SCHEDULE_TEMPERATURE) - time_zone = schedule_config.get("time_zone", config.TIME_ZONE) - if tz.gettz(time_zone) is None: - logger.error(f"无效的时区: {time_zone},使用默认值: {config.TIME_ZONE}") - else: - config.TIME_ZONE = time_zone - def emoji(parent: dict): emoji_config = parent["emoji"] config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL) @@ -645,12 +622,6 @@ class BotConfig: # config.ban_user_id = set(groups_config.get("ban_user_id", [])) config.ban_user_id = set(str(user) for user in groups_config.get("ban_user_id", [])) - def platforms(parent: dict): - platforms_config = parent["platforms"] - if platforms_config and isinstance(platforms_config, dict): - for k in platforms_config.keys(): - config.api_urls[k] = platforms_config[k] - def experimental(parent: dict): experimental_config = parent["experimental"] config.enable_friend_chat = experimental_config.get("enable_friend_chat", config.enable_friend_chat) @@ -678,7 +649,6 @@ class BotConfig: "groups": {"func": groups, "support": ">=0.0.0"}, "personality": {"func": personality, "support": ">=0.0.0"}, "identity": {"func": identity, "support": ">=1.2.4"}, - "schedule": {"func": schedule, "support": ">=0.0.11", "necessary": False}, "emoji": {"func": emoji, "support": ">=0.0.0"}, "model": {"func": model, "support": ">=0.0.0"}, "memory": {"func": memory, "support": ">=0.0.0", "necessary": False}, @@ -686,7 +656,6 @@ class BotConfig: "remote": {"func": remote, "support": ">=0.0.10", "necessary": False}, "keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False}, "chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False}, - "platforms": {"func": platforms, "support": ">=1.0.0"}, "response_splitter": {"func": response_splitter, "support": ">=0.0.11", "necessary": False}, "experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False}, "chat": {"func": chat, "support": ">=1.6.0", "necessary": False}, diff --git a/src/do_tool/not_used/get_current_task.py b/src/do_tool/not_used/get_current_task.py deleted file mode 100644 index 30184d672..000000000 --- a/src/do_tool/not_used/get_current_task.py +++ /dev/null @@ -1,60 +0,0 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool -from src.plugins.schedule.schedule_generator import bot_schedule -from src.common.logger import get_module_logger -from typing import Any -from datetime import datetime - -logger = get_module_logger("get_current_task_tool") - - -class GetCurrentTaskTool(BaseTool): - """获取当前正在做的事情/最近的任务工具""" - - name = "get_schedule" - description = "获取当前正在做的事情,或者某个时间点/时间段的日程信息" - parameters = { - "type": "object", - "properties": { - "start_time": {"type": "string", "description": "开始时间,格式为'HH:MM',填写current则获取当前任务"}, - "end_time": {"type": "string", "description": "结束时间,格式为'HH:MM',填写current则获取当前任务"}, - }, - "required": ["start_time", "end_time"], - } - - async def execute(self, function_args: dict[str, Any], message_txt: str = "") -> dict[str, Any]: - """执行获取当前任务或指定时间段的日程信息 - - Args: - function_args: 工具参数 - message_txt: 原始消息文本,此工具不使用 - - Returns: - dict: 工具执行结果 - """ - start_time = function_args.get("start_time") - end_time = function_args.get("end_time") - - # 如果 start_time 或 end_time 为 "current",则获取当前任务 - if start_time == "current" or end_time == "current": - current_task = bot_schedule.get_current_num_task(num=1, time_info=True) - current_time = datetime.now().strftime("%H:%M:%S") - current_date = datetime.now().strftime("%Y-%m-%d") - if current_task: - task_info = f"{current_date} {current_time},你在{current_task}" - else: - task_info = f"{current_time} {current_date},没在做任何事情" - # 如果提供了时间范围,则获取该时间段的日程信息 - elif start_time and end_time: - tasks = await bot_schedule.get_task_from_time_to_time(start_time, end_time) - if tasks: - task_list = [] - for task in tasks: - task_time = task[0].strftime("%H:%M") - task_content = task[1] - task_list.append(f"{task_time}时,{task_content}") - task_info = "\n".join(task_list) - else: - task_info = f"在 {start_time} 到 {end_time} 之间没有找到日程信息" - else: - task_info = "请提供有效的开始时间和结束时间" - return {"name": "get_current_task", "content": f"日程信息: {task_info}"} diff --git a/src/plugins/PFC/action_planner.py b/src/experimental/PFC/action_planner.py similarity index 98% rename from src/plugins/PFC/action_planner.py rename to src/experimental/PFC/action_planner.py index 4770c6ce5..b4182c9aa 100644 --- a/src/plugins/PFC/action_planner.py +++ b/src/experimental/PFC/action_planner.py @@ -1,14 +1,14 @@ import time from typing import Tuple, Optional # 增加了 Optional from src.common.logger_manager import get_logger -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from .chat_observer import ChatObserver -from .pfc_utils import get_items_from_json +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.experimental.PFC.chat_observer import ChatObserver +from src.experimental.PFC.pfc_utils import get_items_from_json from src.individuality.individuality import Individuality -from .observation_info import ObservationInfo -from .conversation_info import ConversationInfo -from src.plugins.utils.chat_message_builder import build_readable_messages +from src.experimental.PFC.observation_info import ObservationInfo +from src.experimental.PFC.conversation_info import ConversationInfo +from src.chat.utils.chat_message_builder import build_readable_messages logger = get_logger("pfc_action_planner") diff --git a/src/plugins/PFC/chat_observer.py b/src/experimental/PFC/chat_observer.py similarity index 98% rename from src/plugins/PFC/chat_observer.py rename to src/experimental/PFC/chat_observer.py index 22cbf27d3..704eeb330 100644 --- a/src/plugins/PFC/chat_observer.py +++ b/src/experimental/PFC/chat_observer.py @@ -4,9 +4,13 @@ import traceback from typing import Optional, Dict, Any, List from src.common.logger import get_module_logger from maim_message import UserInfo -from ...config.config import global_config -from .chat_states import NotificationManager, create_new_message_notification, create_cold_chat_notification -from .message_storage import MongoDBMessageStorage +from src.config.config import global_config +from src.experimental.PFC.chat_states import ( + NotificationManager, + create_new_message_notification, + create_cold_chat_notification, +) +from src.experimental.PFC.message_storage import MongoDBMessageStorage from rich.traceback import install install(extra_lines=3) diff --git a/src/plugins/PFC/chat_states.py b/src/experimental/PFC/chat_states.py similarity index 100% rename from src/plugins/PFC/chat_states.py rename to src/experimental/PFC/chat_states.py diff --git a/src/plugins/PFC/conversation.py b/src/experimental/PFC/conversation.py similarity index 99% rename from src/plugins/PFC/conversation.py rename to src/experimental/PFC/conversation.py index 0bc4cae8d..0216e8e9e 100644 --- a/src/plugins/PFC/conversation.py +++ b/src/experimental/PFC/conversation.py @@ -3,11 +3,11 @@ import asyncio import datetime # from .message_storage import MongoDBMessageStorage -from src.plugins.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat +from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat # from ...config.config import global_config from typing import Dict, Any, Optional -from ..chat.message import Message +from src.chat.message_receive.message import Message from .pfc_types import ConversationState from .pfc import ChatObserver, GoalAnalyzer from .message_sender import DirectMessageSender @@ -16,9 +16,9 @@ from .action_planner import ActionPlanner from .observation_info import ObservationInfo from .conversation_info import ConversationInfo # 确保导入 ConversationInfo from .reply_generator import ReplyGenerator -from ..chat.chat_stream import ChatStream -from maim_message import UserInfo -from src.plugins.chat.chat_stream import chat_manager +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.message_receive.message import UserInfo +from src.chat.message_receive.chat_stream import chat_manager from .pfc_KnowledgeFetcher import KnowledgeFetcher from .waiter import Waiter diff --git a/src/plugins/PFC/conversation_info.py b/src/experimental/PFC/conversation_info.py similarity index 100% rename from src/plugins/PFC/conversation_info.py rename to src/experimental/PFC/conversation_info.py diff --git a/src/plugins/PFC/message_sender.py b/src/experimental/PFC/message_sender.py similarity index 87% rename from src/plugins/PFC/message_sender.py rename to src/experimental/PFC/message_sender.py index 12c2143ea..181bf171b 100644 --- a/src/plugins/PFC/message_sender.py +++ b/src/experimental/PFC/message_sender.py @@ -1,13 +1,13 @@ import time from typing import Optional from src.common.logger import get_module_logger -from ..chat.chat_stream import ChatStream -from ..chat.message import Message +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.message_receive.message import Message from maim_message import UserInfo, Seg -from src.plugins.chat.message import MessageSending, MessageSet -from src.plugins.chat.message_sender import message_manager -from ..storage.storage import MessageStorage -from ...config.config import global_config +from src.chat.message_receive.message import MessageSending, MessageSet +from src.chat.message_receive.message_sender import message_manager +from src.chat.message_receive.storage import MessageStorage +from src.config.config import global_config from rich.traceback import install install(extra_lines=3) diff --git a/src/plugins/PFC/message_storage.py b/src/experimental/PFC/message_storage.py similarity index 100% rename from src/plugins/PFC/message_storage.py rename to src/experimental/PFC/message_storage.py diff --git a/src/plugins/PFC/observation_info.py b/src/experimental/PFC/observation_info.py similarity index 98% rename from src/plugins/PFC/observation_info.py rename to src/experimental/PFC/observation_info.py index c75729553..5e14bf1d6 100644 --- a/src/plugins/PFC/observation_info.py +++ b/src/experimental/PFC/observation_info.py @@ -2,9 +2,9 @@ from typing import List, Optional, Dict, Any, Set from maim_message import UserInfo import time from src.common.logger import get_module_logger -from .chat_observer import ChatObserver -from .chat_states import NotificationHandler, NotificationType, Notification -from src.plugins.utils.chat_message_builder import build_readable_messages +from src.experimental.PFC.chat_observer import ChatObserver +from src.experimental.PFC.chat_states import NotificationHandler, NotificationType, Notification +from src.chat.utils.chat_message_builder import build_readable_messages import traceback # 导入 traceback 用于调试 logger = get_module_logger("observation_info") diff --git a/src/plugins/PFC/pfc.py b/src/experimental/PFC/pfc.py similarity index 96% rename from src/plugins/PFC/pfc.py rename to src/experimental/PFC/pfc.py index b17ee21d9..84fb9f8dc 100644 --- a/src/plugins/PFC/pfc.py +++ b/src/experimental/PFC/pfc.py @@ -1,13 +1,13 @@ from typing import List, Tuple, TYPE_CHECKING from src.common.logger import get_module_logger -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from .chat_observer import ChatObserver -from .pfc_utils import get_items_from_json +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.experimental.PFC.chat_observer import ChatObserver +from src.experimental.PFC.pfc_utils import get_items_from_json from src.individuality.individuality import Individuality -from .conversation_info import ConversationInfo -from .observation_info import ObservationInfo -from src.plugins.utils.chat_message_builder import build_readable_messages +from src.experimental.PFC.conversation_info import ConversationInfo +from src.experimental.PFC.observation_info import ObservationInfo +from src.chat.utils.chat_message_builder import build_readable_messages from rich.traceback import install install(extra_lines=3) diff --git a/src/plugins/PFC/pfc_KnowledgeFetcher.py b/src/experimental/PFC/pfc_KnowledgeFetcher.py similarity index 88% rename from src/plugins/PFC/pfc_KnowledgeFetcher.py rename to src/experimental/PFC/pfc_KnowledgeFetcher.py index 0989339df..8ebc307e2 100644 --- a/src/plugins/PFC/pfc_KnowledgeFetcher.py +++ b/src/experimental/PFC/pfc_KnowledgeFetcher.py @@ -1,11 +1,11 @@ from typing import List, Tuple from src.common.logger import get_module_logger -from src.plugins.memory_system.Hippocampus import HippocampusManager -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from ..chat.message import Message -from ..knowledge.knowledge_lib import qa_manager -from ..utils.chat_message_builder import build_readable_messages +from src.chat.memory_system.Hippocampus import HippocampusManager +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.chat.message_receive.message import Message +from src.chat.knowledge.knowledge_lib import qa_manager +from src.chat.utils.chat_message_builder import build_readable_messages logger = get_module_logger("knowledge_fetcher") diff --git a/src/plugins/PFC/pfc_manager.py b/src/experimental/PFC/pfc_manager.py similarity index 100% rename from src/plugins/PFC/pfc_manager.py rename to src/experimental/PFC/pfc_manager.py diff --git a/src/plugins/PFC/pfc_types.py b/src/experimental/PFC/pfc_types.py similarity index 100% rename from src/plugins/PFC/pfc_types.py rename to src/experimental/PFC/pfc_types.py diff --git a/src/plugins/PFC/pfc_utils.py b/src/experimental/PFC/pfc_utils.py similarity index 100% rename from src/plugins/PFC/pfc_utils.py rename to src/experimental/PFC/pfc_utils.py diff --git a/src/plugins/PFC/reply_checker.py b/src/experimental/PFC/reply_checker.py similarity index 98% rename from src/plugins/PFC/reply_checker.py rename to src/experimental/PFC/reply_checker.py index 35e9af500..a76e8a0da 100644 --- a/src/plugins/PFC/reply_checker.py +++ b/src/experimental/PFC/reply_checker.py @@ -1,9 +1,9 @@ import json from typing import Tuple, List, Dict, Any from src.common.logger import get_module_logger -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from .chat_observer import ChatObserver +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.experimental.PFC.chat_observer import ChatObserver from maim_message import UserInfo logger = get_module_logger("reply_checker") diff --git a/src/plugins/PFC/reply_generator.py b/src/experimental/PFC/reply_generator.py similarity index 97% rename from src/plugins/PFC/reply_generator.py rename to src/experimental/PFC/reply_generator.py index 890f807c7..6dcda69af 100644 --- a/src/plugins/PFC/reply_generator.py +++ b/src/experimental/PFC/reply_generator.py @@ -1,13 +1,13 @@ from typing import Tuple, List, Dict, Any from src.common.logger import get_module_logger -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from .chat_observer import ChatObserver -from .reply_checker import ReplyChecker +from src.chat.models.utils_model import LLMRequest +from src.config.config import global_config +from src.experimental.PFC.chat_observer import ChatObserver +from src.experimental.PFC.reply_checker import ReplyChecker from src.individuality.individuality import Individuality from .observation_info import ObservationInfo from .conversation_info import ConversationInfo -from src.plugins.utils.chat_message_builder import build_readable_messages +from src.chat.utils.chat_message_builder import build_readable_messages logger = get_module_logger("reply_generator") diff --git a/src/plugins/PFC/waiter.py b/src/experimental/PFC/waiter.py similarity index 98% rename from src/plugins/PFC/waiter.py rename to src/experimental/PFC/waiter.py index 0f5881fc0..af5cf7ad0 100644 --- a/src/plugins/PFC/waiter.py +++ b/src/experimental/PFC/waiter.py @@ -3,7 +3,7 @@ from .chat_observer import ChatObserver from .conversation_info import ConversationInfo # from src.individuality.individuality import Individuality # 不再需要 -from ...config.config import global_config +from src.config.config import global_config import time import asyncio diff --git a/src/plugins/chat/only_message_process.py b/src/experimental/only_message_process.py similarity index 95% rename from src/plugins/chat/only_message_process.py rename to src/experimental/only_message_process.py index b1bb0cea5..3d1432703 100644 --- a/src/plugins/chat/only_message_process.py +++ b/src/experimental/only_message_process.py @@ -1,6 +1,6 @@ from src.common.logger_manager import get_logger -from src.plugins.chat.message import MessageRecv -from src.plugins.storage.storage import MessageStorage +from src.chat.message_receive.message import MessageRecv +from src.chat.message_receive.storage import MessageStorage from src.config.config import global_config from datetime import datetime diff --git a/src/plugins/heartFC_chat/heartFC_chatting_logic.md b/src/heartFC_chatting_logic.md similarity index 100% rename from src/plugins/heartFC_chat/heartFC_chatting_logic.md rename to src/heartFC_chatting_logic.md diff --git a/src/plugins/heartFC_chat/heartFC_readme.md b/src/heartFC_readme.md similarity index 89% rename from src/plugins/heartFC_chat/heartFC_readme.md rename to src/heartFC_readme.md index 07bc4c63c..10b1aa1fd 100644 --- a/src/plugins/heartFC_chat/heartFC_readme.md +++ b/src/heartFC_readme.md @@ -5,7 +5,7 @@ HeartFC_chat 是一个基于心流理论的聊天系统,通过模拟人类的 ## 核心工作流程 ### 1. 消息处理与存储 (HeartFCProcessor) -[代码位置: src/plugins/heartFC_chat/heartflow_processor.py] +[代码位置: src/plugins/focus_chat/heartflow_processor.py] 消息处理器负责接收和预处理消息,主要完成以下工作: ```mermaid @@ -23,7 +23,7 @@ graph TD - 消息存储:`storage.store_message()` [行号: 108] ### 2. 对话管理循环 (HeartFChatting) -[代码位置: src/plugins/heartFC_chat/heartFC_chat.py] +[代码位置: src/plugins/focus_chat/focus_chat.py] HeartFChatting是系统的核心组件,实现了完整的对话管理循环: @@ -55,7 +55,7 @@ graph TD * 处理表情:`_handle_emoji()` [行号: 527-567] ### 3. 回复生成机制 (HeartFCGenerator) -[代码位置: src/plugins/heartFC_chat/heartFC_generator.py] +[代码位置: src/plugins/focus_chat/heartFC_generator.py] 回复生成器负责产生高质量的回复内容: @@ -74,7 +74,7 @@ graph TD * 响应处理:`_process_response()` [行号: 97-106] ### 4. 提示词构建系统 (HeartFlowPromptBuilder) -[代码位置: src/plugins/heartFC_chat/heartflow_prompt_builder.py] +[代码位置: src/plugins/focus_chat/heartflow_prompt_builder.py] 提示词构建器支持两种工作模式,HeartFC_chat专门使用Focus模式,而Normal模式是为normal_chat设计的: @@ -106,8 +106,8 @@ graph TD ## 智能特性 ### 1. 对话决策机制 -- LLM决策工具定义:`PLANNER_TOOL_DEFINITION` [heartFC_chat.py 行号: 13-42] -- 决策执行:`_planner()` [heartFC_chat.py 行号: 282-386] +- LLM决策工具定义:`PLANNER_TOOL_DEFINITION` [focus_chat.py 行号: 13-42] +- 决策执行:`_planner()` [focus_chat.py 行号: 282-386] - 考虑因素: * 上下文相关性 * 情感状态 @@ -115,7 +115,7 @@ graph TD * 对话时机 ### 2. 状态管理 -[代码位置: src/plugins/heartFC_chat/heartFC_chat.py] +[代码位置: src/plugins/focus_chat/focus_chat.py] - 状态机实现:`HeartFChatting`类 [行号: 44-567] - 核心功能: * 初始化:`_initialize()` [行号: 89-112] @@ -123,7 +123,7 @@ graph TD * 状态转换:`_handle_loop_completion()` [行号: 166-190] ### 3. 回复生成策略 -[代码位置: src/plugins/heartFC_chat/heartFC_generator.py] +[代码位置: src/plugins/focus_chat/heartFC_generator.py] - 温度调节:`current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier` [行号: 48] - 生成控制:`_generate_response_with_model()` [行号: 69-95] - 响应处理:`_process_response()` [行号: 97-106] @@ -133,7 +133,7 @@ graph TD ### 关键参数 - LLM配置:`model_normal` [heartFC_generator.py 行号: 32-37] - 过滤规则:`_check_ban_words()`, `_check_ban_regex()` [heartflow_processor.py 行号: 196-215] -- 状态控制:`INITIAL_DURATION = 60.0` [heartFC_chat.py 行号: 11] +- 状态控制:`INITIAL_DURATION = 60.0` [focus_chat.py 行号: 11] ### 优化建议 1. 调整LLM参数:`temperature`和`max_tokens` diff --git a/src/heart_flow/mind.py b/src/heart_flow/mind.py deleted file mode 100644 index 89ffc6a39..000000000 --- a/src/heart_flow/mind.py +++ /dev/null @@ -1,139 +0,0 @@ -import traceback -from typing import TYPE_CHECKING - -from src.common.logger_manager import get_logger -from src.plugins.models.utils_model import LLMRequest -from src.individuality.individuality import Individuality -from src.plugins.utils.prompt_builder import global_prompt_manager -from src.config.config import global_config - -# Need access to SubHeartflowManager to get minds and update them -if TYPE_CHECKING: - from src.heart_flow.subheartflow_manager import SubHeartflowManager - from src.heart_flow.mai_state_manager import MaiStateInfo - - -logger = get_logger("sub_heartflow_mind") - - -class Mind: - """封装 Mai 的思考过程,包括生成内心独白和汇总想法。""" - - def __init__(self, subheartflow_manager: "SubHeartflowManager", llm_model: LLMRequest): - self.subheartflow_manager = subheartflow_manager - self.llm_model = llm_model - self.individuality = Individuality.get_instance() - - async def do_a_thinking(self, current_main_mind: str, mai_state_info: "MaiStateInfo", schedule_info: str): - """ - 执行一次主心流思考过程,生成新的内心独白。 - - Args: - current_main_mind: 当前的主心流想法。 - mai_state_info: 当前的 Mai 状态信息 (用于获取 mood)。 - schedule_info: 当前的日程信息。 - - Returns: - str: 生成的新的内心独白,如果出错则返回提示信息。 - """ - logger.debug("Mind: 执行思考...") - - # --- 构建 Prompt --- # - personality_info = ( - self.individuality.get_prompt_snippet() - if hasattr(self.individuality, "get_prompt_snippet") - else self.individuality.personality.personality_core - ) - mood_info = mai_state_info.get_mood_prompt() - related_memory_info = "memory" # TODO: Implement memory retrieval - - # Get subflow minds summary via internal method - try: - sub_flows_info = await self._get_subflows_summary(current_main_mind, mai_state_info) - except Exception as e: - logger.error(f"[Mind Thinking] 获取子心流想法汇总失败: {e}") - logger.error(traceback.format_exc()) - sub_flows_info = "(获取子心流想法时出错)" - - # Format prompt - try: - prompt = (await global_prompt_manager.get_prompt_async("thinking_prompt")).format( - schedule_info=schedule_info, - personality_info=personality_info, - related_memory_info=related_memory_info, - current_thinking_info=current_main_mind, # Use passed current mind - sub_flows_info=sub_flows_info, - mood_info=mood_info, - ) - except Exception as e: - logger.error(f"[Mind Thinking] 格式化 thinking_prompt 失败: {e}") - return "(思考时格式化Prompt出错...)" - - # --- 调用 LLM --- # - try: - response, reasoning_content = await self.llm_model.generate_response_async(prompt) - if not response: - logger.warning("[Mind Thinking] 内心独白 LLM 返回空结果。") - response = "(暂时没什么想法...)" - logger.info(f"Mind: 新想法生成: {response[:100]}...") # Log truncated response - return response - except Exception as e: - logger.error(f"[Mind Thinking] 内心独白 LLM 调用失败: {e}") - logger.error(traceback.format_exc()) - return "(思考时调用LLM出错...)" - - async def _get_subflows_summary(self, current_main_mind: str, mai_state_info: "MaiStateInfo") -> str: - """获取所有活跃子心流的想法,并使用 LLM 进行汇总。""" - # 1. Get active minds from SubHeartflowManager - sub_minds_list = self.subheartflow_manager.get_active_subflow_minds() - - if not sub_minds_list: - return "(当前没有活跃的子心流想法)" - - minds_str = "\n".join([f"- {mind}" for mind in sub_minds_list]) - logger.debug(f"Mind: 获取到 {len(sub_minds_list)} 个子心流想法进行汇总。") - - # 2. Call LLM for summary - # --- 构建 Prompt --- # - personality_info = ( - self.individuality.get_prompt_snippet() - if hasattr(self.individuality, "get_prompt_snippet") - else self.individuality.personality.personality_core - ) - mood_info = mai_state_info.get_mood_prompt() - bot_name = global_config.BOT_NICKNAME - - try: - prompt = (await global_prompt_manager.get_prompt_async("mind_summary_prompt")).format( - personality_info=personality_info, - bot_name=bot_name, - current_mind=current_main_mind, # Use main mind passed for context - minds_str=minds_str, - mood_info=mood_info, - ) - except Exception as e: - logger.error(f"[Mind Summary] 格式化 mind_summary_prompt 失败: {e}") - return "(汇总想法时格式化Prompt出错...)" - - # --- 调用 LLM --- # - try: - response, reasoning_content = await self.llm_model.generate_response_async(prompt) - if not response: - logger.warning("[Mind Summary] 想法汇总 LLM 返回空结果。") - return "(想法汇总失败...)" - logger.debug(f"Mind: 子想法汇总完成: {response[:100]}...") - return response - except Exception as e: - logger.error(f"[Mind Summary] 想法汇总 LLM 调用失败: {e}") - logger.error(traceback.format_exc()) - return "(想法汇总时调用LLM出错...)" - - def update_subflows_with_main_mind(self, main_mind: str): - """触发 SubHeartflowManager 更新所有子心流的主心流信息。""" - logger.debug("Mind: 请求更新子心流的主想法信息。") - self.subheartflow_manager.update_main_mind_in_subflows(main_mind) - - -# Note: update_current_mind (managing self.current_mind and self.past_mind) -# remains in Heartflow for now, as Heartflow is the central coordinator holding the main state. -# Mind class focuses solely on the *process* of thinking and summarizing. diff --git a/src/heart_flow/sub_mind.py b/src/heart_flow/sub_mind.py deleted file mode 100644 index 31f571598..000000000 --- a/src/heart_flow/sub_mind.py +++ /dev/null @@ -1,586 +0,0 @@ -from .observation import ChattingObservation -from src.plugins.models.utils_model import LLMRequest -from src.config.config import global_config -import time -import traceback -from src.common.logger_manager import get_logger -from src.individuality.individuality import Individuality -import random -from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager -from src.do_tool.tool_use import ToolUser -from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls -from src.heart_flow.chat_state_info import ChatStateInfo -from src.plugins.chat.chat_stream import chat_manager -from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo -import difflib -from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.memory_system.Hippocampus import HippocampusManager -import jieba - - -logger = get_logger("sub_heartflow") - - -def init_prompt(): - # --- Group Chat Prompt --- - group_prompt = """ -{extra_info} -{relation_prompt} -你的名字是{bot_name},{prompt_personality} -{last_loop_prompt} -{cycle_info_block} -现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: -{chat_observe_info} - -你现在{mood_info} -请仔细阅读当前群聊内容,分析讨论话题和群成员关系,分析你刚刚发言和别人对你的发言的反应,思考你要不要回复。然后思考你是否需要使用函数工具。 -思考并输出你的内心想法 -输出要求: -1. 根据聊天内容生成你的想法,{hf_do_next} -2. 不要分点、不要使用表情符号 -3. 避免多余符号(冒号、引号、括号等) -4. 语言简洁自然,不要浮夸 -5. 如果你刚发言,并且没有人回复你,不要回复 -工具使用说明: -1. 输出想法后考虑是否需要使用工具 -2. 工具可获取信息或执行操作 -3. 如需处理消息或回复,请使用工具。""" - Prompt(group_prompt, "sub_heartflow_prompt_before") - - # --- Private Chat Prompt --- - private_prompt = """ -{extra_info} -{relation_prompt} -你的名字是{bot_name},{prompt_personality} -{last_loop_prompt} -{cycle_info_block} -现在是{time_now},你正在上网,和 {chat_target_name} 私聊,以下是你们的聊天内容: -{chat_observe_info} - -你现在{mood_info} -请仔细阅读聊天内容,想想你和 {chat_target_name} 的关系,回顾你们刚刚的交流,你刚刚发言和对方的反应,思考聊天的主题。 -请思考你要不要回复以及如何回复对方。然后思考你是否需要使用函数工具。 -思考并输出你的内心想法 -输出要求: -1. 根据聊天内容生成你的想法,{hf_do_next} -2. 不要分点、不要使用表情符号 -3. 避免多余符号(冒号、引号、括号等) -4. 语言简洁自然,不要浮夸 -5. 如果你刚发言,对方没有回复你,请谨慎回复 -工具使用说明: -1. 输出想法后考虑是否需要使用工具 -2. 工具可获取信息或执行操作 -3. 如需处理消息或回复,请使用工具。""" - Prompt(private_prompt, "sub_heartflow_prompt_private_before") # New template name - - # --- Last Loop Prompt (remains the same) --- - last_loop_t = """ -刚刚你的内心想法是:{current_thinking_info} -{if_replan_prompt} -""" - Prompt(last_loop_t, "last_loop") - - -def calculate_similarity(text_a: str, text_b: str) -> float: - """ - 计算两个文本字符串的相似度。 - """ - if not text_a or not text_b: - return 0.0 - matcher = difflib.SequenceMatcher(None, text_a, text_b) - return matcher.ratio() - - -def calculate_replacement_probability(similarity: float) -> float: - """ - 根据相似度计算替换的概率。 - 规则: - - 相似度 <= 0.4: 概率 = 0 - - 相似度 >= 0.9: 概率 = 1 - - 相似度 == 0.6: 概率 = 0.7 - - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) - - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) - """ - if similarity <= 0.4: - return 0.0 - elif similarity >= 0.9: - return 1.0 - elif 0.4 < similarity <= 0.6: - # p = 3.5 * s - 1.4 - probability = 3.5 * similarity - 1.4 - return max(0.0, probability) - else: # 0.6 < similarity < 0.9 - # p = s + 0.1 - probability = similarity + 0.1 - return min(1.0, max(0.0, probability)) - - -class SubMind: - def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: ChattingObservation): - self.last_active_time = None - self.subheartflow_id = subheartflow_id - - self.llm_model = LLMRequest( - model=global_config.llm_sub_heartflow, - temperature=global_config.llm_sub_heartflow["temp"], - max_tokens=800, - request_type="sub_heart_flow", - ) - - self.chat_state = chat_state - self.observations = observations - - self.current_mind = "" - self.past_mind = [] - self.structured_info = [] - self.structured_info_str = "" - - name = chat_manager.get_stream_name(self.subheartflow_id) - self.log_prefix = f"[{name}] " - self._update_structured_info_str() - - def _update_structured_info_str(self): - """根据 structured_info 更新 structured_info_str""" - if not self.structured_info: - self.structured_info_str = "" - return - - lines = ["【信息】"] - for item in self.structured_info: - # 简化展示,突出内容和类型,包含TTL供调试 - type_str = item.get("type", "未知类型") - content_str = item.get("content", "") - - if type_str == "info": - lines.append(f"刚刚: {content_str}") - elif type_str == "memory": - lines.append(f"{content_str}") - elif type_str == "comparison_result": - lines.append(f"数字大小比较结果: {content_str}") - elif type_str == "time_info": - lines.append(f"{content_str}") - elif type_str == "lpmm_knowledge": - lines.append(f"你知道:{content_str}") - else: - lines.append(f"{type_str}的信息: {content_str}") - - self.structured_info_str = "\n".join(lines) - logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}") - - async def do_thinking_before_reply(self, history_cycle: list[CycleInfo] = None): - """ - 在回复前进行思考,生成内心想法并收集工具调用结果 - - 返回: - tuple: (current_mind, past_mind) 当前想法和过去的想法列表 - """ - # 更新活跃时间 - self.last_active_time = time.time() - - # ---------- 0. 更新和清理 structured_info ---------- - if self.structured_info: - logger.debug( - f"{self.log_prefix} 更新前的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}" - ) - updated_info = [] - for item in self.structured_info: - item["ttl"] -= 1 - if item["ttl"] > 0: - updated_info.append(item) - else: - logger.debug(f"{self.log_prefix} 移除过期的 structured_info 项: {item['id']}") - self.structured_info = updated_info - logger.debug( - f"{self.log_prefix} 更新后的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}" - ) - self._update_structured_info_str() - logger.debug( - f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}" - ) - - # ---------- 1. 准备基础数据 ---------- - # 获取现有想法和情绪状态 - previous_mind = self.current_mind if self.current_mind else "" - mood_info = self.chat_state.mood - - # 获取观察对象 - observation: ChattingObservation = self.observations[0] if self.observations else None - if not observation or not hasattr(observation, "is_group_chat"): # Ensure it's ChattingObservation or similar - logger.error(f"{self.log_prefix} 无法获取有效的观察对象或缺少聊天类型信息") - self.update_current_mind("(观察出错了...)") - return self.current_mind, self.past_mind - - is_group_chat = observation.is_group_chat - # logger.debug(f"is_group_chat: {is_group_chat}") - - chat_target_info = observation.chat_target_info - chat_target_name = "对方" # Default for private - if not is_group_chat and chat_target_info: - chat_target_name = ( - chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name - ) - # --- End getting observation info --- - - # 获取观察内容 - chat_observe_info = observation.get_observe_info() - person_list = observation.person_list - - # ---------- 2. 获取记忆 ---------- - try: - # 从聊天内容中提取关键词 - chat_words = set(jieba.cut(chat_observe_info)) - # 过滤掉停用词和单字词 - keywords = [word for word in chat_words if len(word) > 1] - # 去重并限制数量 - keywords = list(set(keywords))[:5] - - logger.debug(f"{self.log_prefix} 提取的关键词: {keywords}") - # 检查已有记忆,过滤掉已存在的主题 - existing_topics = set() - for item in self.structured_info: - if item["type"] == "memory": - existing_topics.add(item["id"]) - - # 过滤掉已存在的主题 - filtered_keywords = [k for k in keywords if k not in existing_topics] - - if not filtered_keywords: - logger.debug(f"{self.log_prefix} 所有关键词对应的记忆都已存在,跳过记忆提取") - else: - # 调用记忆系统获取相关记忆 - related_memory = await HippocampusManager.get_instance().get_memory_from_topic( - valid_keywords=filtered_keywords, max_memory_num=3, max_memory_length=2, max_depth=3 - ) - - logger.debug(f"{self.log_prefix} 获取到的记忆: {related_memory}") - - if related_memory: - for topic, memory in related_memory: - new_item = {"type": "memory", "id": topic, "content": memory, "ttl": 3} - self.structured_info.append(new_item) - logger.debug(f"{self.log_prefix} 添加新记忆: {topic} - {memory}") - else: - logger.debug(f"{self.log_prefix} 没有找到相关记忆") - - except Exception as e: - logger.error(f"{self.log_prefix} 获取记忆时出错: {e}") - logger.error(traceback.format_exc()) - - # ---------- 3. 准备工具和个性化数据 ---------- - # 初始化工具 - tool_instance = ToolUser() - tools = tool_instance._define_tools() - - # 获取个性化信息 - individuality = Individuality.get_instance() - - relation_prompt = "" - # print(f"person_list: {person_list}") - for person in person_list: - relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) - - # print(f"relat22222ion_prompt: {relation_prompt}") - - # 构建个性部分 - prompt_personality = individuality.get_prompt(x_person=2, level=2) - - # 获取当前时间 - time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - # ---------- 4. 构建思考指导部分 ---------- - # 创建本地随机数生成器,基于分钟数作为种子 - local_random = random.Random() - current_minute = int(time.strftime("%M")) - local_random.seed(current_minute) - - # 思考指导选项和权重 - hf_options = [ - ("可以参考之前的想法,在原来想法的基础上继续思考", 0.2), - ("可以参考之前的想法,在原来的想法上尝试新的话题", 0.4), - ("不要太深入", 0.2), - ("进行深入思考", 0.2), - ] - - last_cycle = history_cycle[-1] if history_cycle else None - # 上一次决策信息 - if last_cycle is not None: - last_action = last_cycle.action_type - last_reasoning = last_cycle.reasoning - is_replan = last_cycle.replanned - if is_replan: - if_replan_prompt = f"但是你有了上述想法之后,有了新消息,你决定重新思考后,你做了:{last_action}\n因为:{last_reasoning}\n" - else: - if_replan_prompt = f"出于这个想法,你刚才做了:{last_action}\n因为:{last_reasoning}\n" - else: - last_action = "" - last_reasoning = "" - is_replan = False - if_replan_prompt = "" - if previous_mind: - last_loop_prompt = (await global_prompt_manager.get_prompt_async("last_loop")).format( - current_thinking_info=previous_mind, if_replan_prompt=if_replan_prompt - ) - else: - last_loop_prompt = "" - - # 准备循环信息块 (分析最近的活动循环) - recent_active_cycles = [] - for cycle in reversed(history_cycle): - # 只关心实际执行了动作的循环 - if cycle.action_taken: - recent_active_cycles.append(cycle) - # 最多找最近的3个活动循环 - if len(recent_active_cycles) == 3: - break - - cycle_info_block = "" - consecutive_text_replies = 0 - responses_for_prompt = [] - - # 检查这最近的活动循环中有多少是连续的文本回复 (从最近的开始看) - for cycle in recent_active_cycles: - if cycle.action_type == "text_reply": - consecutive_text_replies += 1 - # 获取回复内容,如果不存在则返回'[空回复]' - response_text = cycle.response_info.get("response_text", []) - # 使用简单的 join 来格式化回复内容列表 - formatted_response = "[空回复]" if not response_text else " ".join(response_text) - responses_for_prompt.append(formatted_response) - else: - # 一旦遇到非文本回复,连续性中断 - break - - # 根据连续文本回复的数量构建提示信息 - # 注意: responses_for_prompt 列表是从最近到最远排序的 - if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复 - cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' - elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复 - cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' - elif consecutive_text_replies == 1: # 如果最近的一个活动是文本回复 - cycle_info_block = f'你刚刚已经回复一条消息(内容: "{responses_for_prompt[0]}")' - - # 包装提示块,增加可读性,即使没有连续回复也给个标记 - if cycle_info_block: - cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" - else: - # 如果最近的活动循环不是文本回复,或者没有活动循环 - cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n" - - # 加权随机选择思考指导 - hf_do_next = local_random.choices( - [option[0] for option in hf_options], weights=[option[1] for option in hf_options], k=1 - )[0] - - # ---------- 5. 构建最终提示词 ---------- - # --- Choose template based on chat type --- - logger.debug(f"is_group_chat: {is_group_chat}") - if is_group_chat: - template_name = "sub_heartflow_prompt_before" - prompt = (await global_prompt_manager.get_prompt_async(template_name)).format( - extra_info=self.structured_info_str, - prompt_personality=prompt_personality, - relation_prompt=relation_prompt, - bot_name=individuality.name, - time_now=time_now, - chat_observe_info=chat_observe_info, - mood_info=mood_info, - hf_do_next=hf_do_next, - last_loop_prompt=last_loop_prompt, - cycle_info_block=cycle_info_block, - # chat_target_name is not used in group prompt - ) - else: # Private chat - template_name = "sub_heartflow_prompt_private_before" - prompt = (await global_prompt_manager.get_prompt_async(template_name)).format( - extra_info=self.structured_info_str, - prompt_personality=prompt_personality, - relation_prompt=relation_prompt, # Might need adjustment for private context - bot_name=individuality.name, - time_now=time_now, - chat_target_name=chat_target_name, # Pass target name - chat_observe_info=chat_observe_info, - mood_info=mood_info, - hf_do_next=hf_do_next, - last_loop_prompt=last_loop_prompt, - cycle_info_block=cycle_info_block, - ) - # --- End choosing template --- - - # ---------- 6. 执行LLM请求并处理响应 ---------- - content = "" # 初始化内容变量 - _reasoning_content = "" # 初始化推理内容变量 - - try: - # 调用LLM生成响应 - response, _reasoning_content, tool_calls = await self.llm_model.generate_response_tool_async( - prompt=prompt, tools=tools - ) - - logger.debug(f"{self.log_prefix} 子心流输出的原始LLM响应: {response}") - - # 直接使用LLM返回的文本响应作为 content - content = response if response else "" - - if tool_calls: - # 直接将 tool_calls 传递给处理函数 - success, valid_tool_calls, error_msg = process_llm_tool_calls( - tool_calls, log_prefix=f"{self.log_prefix} " - ) - - if success and valid_tool_calls: - # 记录工具调用信息 - tool_calls_str = ", ".join( - [call.get("function", {}).get("name", "未知工具") for call in valid_tool_calls] - ) - logger.info(f"{self.log_prefix} 模型请求调用{len(valid_tool_calls)}个工具: {tool_calls_str}") - - # 收集工具执行结果 - await self._execute_tool_calls(valid_tool_calls, tool_instance) - elif not success: - logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}") - else: - logger.info(f"{self.log_prefix} 心流未使用工具") - - except Exception as e: - # 处理总体异常 - logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}") - logger.error(traceback.format_exc()) - content = "思考过程中出现错误" - - # 记录初步思考结果 - logger.debug(f"{self.log_prefix} 初步心流思考结果: {content}\nprompt: {prompt}\n") - - # 处理空响应情况 - if not content: - content = "(不知道该想些什么...)" - logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。") - - # ---------- 7. 应用概率性去重和修饰 ---------- - new_content = content # 保存 LLM 直接输出的结果 - try: - similarity = calculate_similarity(previous_mind, new_content) - replacement_prob = calculate_replacement_probability(similarity) - logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}") - - # 定义词语列表 (移到判断之前) - yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] - zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] - cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] - zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao - - if random.random() < replacement_prob: - # 相似度非常高时,尝试去重或特殊处理 - if similarity == 1.0: - logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...") - # 随机截取大约一半内容 - if len(new_content) > 1: # 避免内容过短无法截取 - split_point = max( - 1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4) - ) - truncated_content = new_content[:split_point] - else: - truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 - - # 添加语气词和转折/承接词 - yu_qi_ci = random.choice(yu_qi_ci_liebiao) - zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao) - content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}" - logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}") - - else: - # 相似度较高但非100%,执行标准去重逻辑 - logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...") - matcher = difflib.SequenceMatcher(None, previous_mind, new_content) - deduplicated_parts = [] - last_match_end_in_b = 0 - for _i, j, n in matcher.get_matching_blocks(): - if last_match_end_in_b < j: - deduplicated_parts.append(new_content[last_match_end_in_b:j]) - last_match_end_in_b = j + n - - deduplicated_content = "".join(deduplicated_parts).strip() - - if deduplicated_content: - # 根据概率决定是否添加词语 - prefix_str = "" - if random.random() < 0.3: # 30% 概率添加语气词 - prefix_str += random.choice(yu_qi_ci_liebiao) - if random.random() < 0.7: # 70% 概率添加转折/承接词 - prefix_str += random.choice(zhuan_jie_ci_liebiao) - - # 组合最终结果 - if prefix_str: - content = f"{prefix_str},{deduplicated_content}" # 更新 content - logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}") - else: - content = deduplicated_content # 更新 content - logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}") - else: - logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}") - content = new_content # 保留原始 content - else: - logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})") - # content 保持 new_content 不变 - - except Exception as e: - logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}") - logger.error(traceback.format_exc()) - # 出错时保留原始 content - content = new_content - - # ---------- 8. 更新思考状态并返回结果 ---------- - logger.info(f"{self.log_prefix} 最终心流思考结果: {content}") - # 更新当前思考内容 - self.update_current_mind(content) - - return self.current_mind, self.past_mind - - async def _execute_tool_calls(self, tool_calls, tool_instance): - """ - 执行一组工具调用并收集结果 - - 参数: - tool_calls: 工具调用列表 - tool_instance: 工具使用器实例 - """ - tool_results = [] - new_structured_items = [] # 收集新产生的结构化信息 - - # 执行所有工具调用 - for tool_call in tool_calls: - try: - result = await tool_instance._execute_tool_call(tool_call) - if result: - tool_results.append(result) - # 创建新的结构化信息项 - new_item = { - "type": result.get("type", "unknown_type"), # 使用 'type' 键 - "id": result.get("id", f"fallback_id_{time.time()}"), # 使用 'id' 键 - "content": result.get("content", ""), # 'content' 键保持不变 - "ttl": 3, - } - new_structured_items.append(new_item) - - except Exception as tool_e: - logger.error(f"[{self.subheartflow_id}] 工具执行失败: {tool_e}") - logger.error(traceback.format_exc()) # 添加 traceback 记录 - - # 如果有新的工具结果,记录并更新结构化信息 - if new_structured_items: - self.structured_info.extend(new_structured_items) # 添加到现有列表 - logger.debug(f"工具调用收集到新的结构化信息: {safe_json_dumps(new_structured_items, ensure_ascii=False)}") - # logger.debug(f"当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}") # 可以取消注释以查看完整列表 - self._update_structured_info_str() # 添加新信息后,更新字符串表示 - - def update_current_mind(self, response): - if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind - self.past_mind.append(self.current_mind) - # 可以考虑限制 past_mind 的大小,例如: - # max_past_mind_size = 10 - # if len(self.past_mind) > max_past_mind_size: - # self.past_mind.pop(0) # 移除最旧的 - - self.current_mind = response - - -init_prompt() diff --git a/src/main.py b/src/main.py index 09570a4f2..34b7eda3d 100644 --- a/src/main.py +++ b/src/main.py @@ -1,27 +1,25 @@ import asyncio import time - from maim_message import MessageServer - -from .plugins.remote.remote import TelemetryHeartBeatTask +from .common.remote import TelemetryHeartBeatTask from .manager.async_task_manager import async_task_manager -from .plugins.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask +from .chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask from .manager.mood_manager import MoodPrintTask, MoodUpdateTask -from .plugins.schedule.schedule_generator import bot_schedule -from .plugins.emoji_system.emoji_manager import emoji_manager -from .plugins.person_info.person_info import person_info_manager -from .plugins.willing.willing_manager import willing_manager -from .plugins.chat.chat_stream import chat_manager -from .heart_flow.heartflow import heartflow -from .plugins.memory_system.Hippocampus import HippocampusManager -from .plugins.chat.message_sender import message_manager -from .plugins.storage.storage import MessageStorage +from .chat.emoji_system.emoji_manager import emoji_manager +from .chat.person_info.person_info import person_info_manager +from .chat.normal_chat.willing.willing_manager import willing_manager +from .chat.message_receive.chat_stream import chat_manager +from src.chat.heart_flow.heartflow import heartflow +from .chat.memory_system.Hippocampus import HippocampusManager +from .chat.message_receive.message_sender import message_manager +from .chat.message_receive.storage import MessageStorage from .config.config import global_config -from .plugins.chat.bot import chat_bot +from .chat.message_receive.bot import chat_bot from .common.logger_manager import get_logger from .individuality.individuality import Individuality from .common.server import global_server, Server from rich.traceback import install +from .chat.focus_chat.expressors.exprssion_learner import expression_learner from .api.main import start_api_server install(extra_lines=3) @@ -35,7 +33,7 @@ class MainSystem: self.individuality: Individuality = Individuality.get_instance() # 使用消息API替代直接的FastAPI实例 - from .plugins.message import global_api + from src.common.message import global_api self.app: MessageServer = global_api self.server: Server = global_server @@ -89,15 +87,6 @@ class MainSystem: self.hippocampus_manager.initialize(global_config=global_config) # await asyncio.sleep(0.5) #防止logger输出飞了 - # 初始化日程 - bot_schedule.initialize( - name=global_config.BOT_NICKNAME, - personality=global_config.personality_core, - behavior=global_config.PROMPT_SCHEDULE_GEN, - interval=global_config.SCHEDULE_DOING_UPDATE_INTERVAL, - ) - asyncio.create_task(bot_schedule.mai_schedule_start()) - # 将bot.py中的chat_bot.message_process消息处理函数注册到api.py的消息处理基类中 self.app.register_message_handler(chat_bot.message_process) @@ -115,6 +104,9 @@ class MainSystem: ) logger.success("个体特征初始化成功") + # 初始化表达方式 + await expression_learner.extract_and_store_personality_expressions() + try: # 启动全局消息管理器 (负责消息发送/排队) await message_manager.start() @@ -137,6 +129,7 @@ class MainSystem: self.build_memory_task(), self.forget_memory_task(), self.consolidate_memory_task(), + self.learn_and_store_expression_task(), self.remove_recalled_message_task(), emoji_manager.start_periodic_check_register(), self.app.run(), @@ -170,6 +163,21 @@ class MainSystem: await HippocampusManager.get_instance().consolidate_memory() print("\033[1;32m[记忆整合]\033[0m 记忆整合完成") + @staticmethod + async def learn_and_store_expression_task(): + """学习并存储表达方式任务""" + while True: + await asyncio.sleep(60) + print("\033[1;32m[表达方式学习]\033[0m 开始学习表达方式...") + await expression_learner.learn_and_store_expression() + print("\033[1;32m[表达方式学习]\033[0m 表达方式学习完成") + + # async def print_mood_task(self): + # """打印情绪状态""" + # while True: + # self.mood_manager.print_mood_status() + # await asyncio.sleep(60) + @staticmethod async def remove_recalled_message_task(): """删除撤回消息任务""" diff --git a/src/plugins/__init__.py b/src/plugins/__init__.py deleted file mode 100644 index 631d9bbb7..000000000 --- a/src/plugins/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -MaiMBot插件系统 -包含聊天、情绪、记忆、日程等功能模块 -""" - -from .chat.chat_stream import chat_manager -from .emoji_system.emoji_manager import emoji_manager -from .person_info.relationship_manager import relationship_manager -from .willing.willing_manager import willing_manager -from .schedule.schedule_generator import bot_schedule - -# 导出主要组件供外部使用 -__all__ = [ - "chat_manager", - "emoji_manager", - "relationship_manager", - "willing_manager", - "bot_schedule", -] diff --git a/src/plugins/chat/mapper.py b/src/plugins/chat/mapper.py deleted file mode 100644 index 2832d9914..000000000 --- a/src/plugins/chat/mapper.py +++ /dev/null @@ -1,190 +0,0 @@ -emojimapper = { - 5: "流泪", - 311: "打 call", - 312: "变形", - 314: "仔细分析", - 317: "菜汪", - 318: "崇拜", - 319: "比心", - 320: "庆祝", - 324: "吃糖", - 325: "惊吓", - 337: "花朵脸", - 338: "我想开了", - 339: "舔屏", - 341: "打招呼", - 342: "酸Q", - 343: "我方了", - 344: "大怨种", - 345: "红包多多", - 346: "你真棒棒", - 181: "戳一戳", - 74: "太阳", - 75: "月亮", - 351: "敲敲", - 349: "坚强", - 350: "贴贴", - 395: "略略略", - 114: "篮球", - 326: "生气", - 53: "蛋糕", - 137: "鞭炮", - 333: "烟花", - 424: "续标识", - 415: "划龙舟", - 392: "龙年快乐", - 425: "求放过", - 427: "偷感", - 426: "玩火", - 419: "火车", - 429: "蛇年快乐", - 14: "微笑", - 1: "撇嘴", - 2: "色", - 3: "发呆", - 4: "得意", - 6: "害羞", - 7: "闭嘴", - 8: "睡", - 9: "大哭", - 10: "尴尬", - 11: "发怒", - 12: "调皮", - 13: "呲牙", - 0: "惊讶", - 15: "难过", - 16: "酷", - 96: "冷汗", - 18: "抓狂", - 19: "吐", - 20: "偷笑", - 21: "可爱", - 22: "白眼", - 23: "傲慢", - 24: "饥饿", - 25: "困", - 26: "惊恐", - 27: "流汗", - 28: "憨笑", - 29: "悠闲", - 30: "奋斗", - 31: "咒骂", - 32: "疑问", - 33: "嘘", - 34: "晕", - 35: "折磨", - 36: "衰", - 37: "骷髅", - 38: "敲打", - 39: "再见", - 97: "擦汗", - 98: "抠鼻", - 99: "鼓掌", - 100: "糗大了", - 101: "坏笑", - 102: "左哼哼", - 103: "右哼哼", - 104: "哈欠", - 105: "鄙视", - 106: "委屈", - 107: "快哭了", - 108: "阴险", - 305: "右亲亲", - 109: "左亲亲", - 110: "吓", - 111: "可怜", - 172: "眨眼睛", - 182: "笑哭", - 179: "doge", - 173: "泪奔", - 174: "无奈", - 212: "托腮", - 175: "卖萌", - 178: "斜眼笑", - 177: "喷血", - 176: "小纠结", - 183: "我最美", - 262: "脑阔疼", - 263: "沧桑", - 264: "捂脸", - 265: "辣眼睛", - 266: "哦哟", - 267: "头秃", - 268: "问号脸", - 269: "暗中观察", - 270: "emm", - 271: "吃瓜", - 272: "呵呵哒", - 277: "汪汪", - 307: "喵喵", - 306: "牛气冲天", - 281: "无眼笑", - 282: "敬礼", - 283: "狂笑", - 284: "面无表情", - 285: "摸鱼", - 293: "摸锦鲤", - 286: "魔鬼笑", - 287: "哦", - 289: "睁眼", - 294: "期待", - 297: "拜谢", - 298: "元宝", - 299: "牛啊", - 300: "胖三斤", - 323: "嫌弃", - 332: "举牌牌", - 336: "豹富", - 353: "拜托", - 355: "耶", - 356: "666", - 354: "尊嘟假嘟", - 352: "咦", - 357: "裂开", - 334: "虎虎生威", - 347: "大展宏兔", - 303: "右拜年", - 302: "左拜年", - 295: "拿到红包", - 49: "拥抱", - 66: "爱心", - 63: "玫瑰", - 64: "凋谢", - 187: "幽灵", - 146: "爆筋", - 116: "示爱", - 67: "心碎", - 60: "咖啡", - 185: "羊驼", - 76: "赞", - 124: "OK", - 118: "抱拳", - 78: "握手", - 119: "勾引", - 79: "胜利", - 120: "拳头", - 121: "差劲", - 77: "踩", - 123: "NO", - 201: "点赞", - 273: "我酸了", - 46: "猪头", - 112: "菜刀", - 56: "刀", - 169: "手枪", - 171: "茶", - 59: "便便", - 144: "喝彩", - 147: "棒棒糖", - 89: "西瓜", - 41: "发抖", - 125: "转圈", - 42: "爱情", - 43: "跳跳", - 86: "怄火", - 129: "挥手", - 85: "飞吻", - 428: "收到", - 423: "复兴号", - 432: "灵蛇献瑞", -} diff --git a/src/plugins/heartFC_chat/heartFC_Cycleinfo.py b/src/plugins/heartFC_chat/heartFC_Cycleinfo.py deleted file mode 100644 index 966773841..000000000 --- a/src/plugins/heartFC_chat/heartFC_Cycleinfo.py +++ /dev/null @@ -1,74 +0,0 @@ -import time -from typing import List, Optional, Dict, Any - - -class CycleInfo: - """循环信息记录类""" - - def __init__(self, cycle_id: int): - self.cycle_id = cycle_id - self.start_time = time.time() - self.end_time: Optional[float] = None - self.action_taken = False - self.action_type = "unknown" - self.reasoning = "" - self.timers: Dict[str, float] = {} - self.thinking_id = "" - self.replanned = False - - # 添加响应信息相关字段 - self.response_info: Dict[str, Any] = { - "response_text": [], # 回复的文本列表 - "emoji_info": "", # 表情信息 - "anchor_message_id": "", # 锚点消息ID - "reply_message_ids": [], # 回复消息ID列表 - "sub_mind_thinking": "", # 子思维思考内容 - } - - def to_dict(self) -> Dict[str, Any]: - """将循环信息转换为字典格式""" - return { - "cycle_id": self.cycle_id, - "start_time": self.start_time, - "end_time": self.end_time, - "action_taken": self.action_taken, - "action_type": self.action_type, - "reasoning": self.reasoning, - "timers": self.timers, - "thinking_id": self.thinking_id, - "response_info": self.response_info, - } - - def complete_cycle(self): - """完成循环,记录结束时间""" - self.end_time = time.time() - - def set_action_info(self, action_type: str, reasoning: str, action_taken: bool): - """设置动作信息""" - self.action_type = action_type - self.reasoning = reasoning - self.action_taken = action_taken - - def set_thinking_id(self, thinking_id: str): - """设置思考消息ID""" - self.thinking_id = thinking_id - - def set_response_info( - self, - response_text: Optional[List[str]] = None, - emoji_info: Optional[str] = None, - anchor_message_id: Optional[str] = None, - reply_message_ids: Optional[List[str]] = None, - sub_mind_thinking: Optional[str] = None, - ): - """设置响应信息""" - if response_text is not None: - self.response_info["response_text"] = response_text - if emoji_info is not None: - self.response_info["emoji_info"] = emoji_info - if anchor_message_id is not None: - self.response_info["anchor_message_id"] = anchor_message_id - if reply_message_ids is not None: - self.response_info["reply_message_ids"] = reply_message_ids - if sub_mind_thinking is not None: - self.response_info["sub_mind_thinking"] = sub_mind_thinking diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py deleted file mode 100644 index 83abfbbed..000000000 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ /dev/null @@ -1,1380 +0,0 @@ -import asyncio -import contextlib -import json # <--- 确保导入 json -import random # <--- 添加导入 -import time -import traceback -from collections import deque -from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine - -from rich.traceback import install - -from src.common.logger_manager import get_logger -from src.config.config import global_config -from src.heart_flow.observation import Observation -from src.heart_flow.sub_mind import SubMind -from src.heart_flow.utils_chat import get_chat_type_and_target_info -from src.manager.mood_manager import mood_manager -from src.plugins.chat.chat_stream import ChatStream -from src.plugins.chat.chat_stream import chat_manager -from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending -from src.plugins.chat.message import Seg # Local import needed after move -from src.plugins.chat.message import UserInfo -from src.plugins.chat.utils import process_llm_response -from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move -from src.plugins.emoji_system.emoji_manager import emoji_manager -from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo -from src.plugins.heartFC_chat.heartflow_prompt_builder import global_prompt_manager, prompt_builder -from src.plugins.models.utils_model import LLMRequest -from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager -from src.plugins.utils.chat_message_builder import num_new_messages_since -from src.plugins.utils.timer_calculator import Timer # <--- Import Timer -from .heartFC_sender import HeartFCSender - -install(extra_lines=3) - - -WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒 - -EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发 - -CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值 - - -logger = get_logger("hfc") # Logger Name Changed - - -# 默认动作定义 -DEFAULT_ACTIONS = {"no_reply": "不回复", "text_reply": "文本回复, 可选附带表情", "emoji_reply": "仅表情回复"} - - -class ActionManager: - """动作管理器:控制每次决策可以使用的动作""" - - def __init__(self): - # 初始化为默认动作集 - self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy() - self._original_actions_backup: Optional[Dict[str, str]] = None # 用于临时移除时的备份 - - def get_available_actions(self) -> Dict[str, str]: - """获取当前可用的动作集""" - return self._available_actions.copy() # 返回副本以防外部修改 - - def add_action(self, action_name: str, description: str) -> bool: - """ - 添加新的动作 - - 参数: - action_name: 动作名称 - description: 动作描述 - - 返回: - bool: 是否添加成功 - """ - if action_name in self._available_actions: - return False - self._available_actions[action_name] = description - return True - - def remove_action(self, action_name: str) -> bool: - """ - 移除指定动作 - - 参数: - action_name: 动作名称 - - 返回: - bool: 是否移除成功 - """ - if action_name not in self._available_actions: - return False - del self._available_actions[action_name] - return True - - def temporarily_remove_actions(self, actions_to_remove: List[str]): - """ - 临时移除指定的动作,备份原始动作集。 - 如果已经有备份,则不重复备份。 - """ - if self._original_actions_backup is None: - self._original_actions_backup = self._available_actions.copy() - - actions_actually_removed = [] - for action_name in actions_to_remove: - if action_name in self._available_actions: - del self._available_actions[action_name] - actions_actually_removed.append(action_name) - # logger.debug(f"临时移除了动作: {actions_actually_removed}") # 可选日志 - - def restore_actions(self): - """ - 恢复之前备份的原始动作集。 - """ - if self._original_actions_backup is not None: - self._available_actions = self._original_actions_backup.copy() - self._original_actions_backup = None - # logger.debug("恢复了原始动作集") # 可选日志 - - def clear_actions(self): - """清空所有动作""" - self._available_actions.clear() - - def reset_to_default(self): - """重置为默认动作集""" - self._available_actions = DEFAULT_ACTIONS.copy() - - -# 在文件开头添加自定义异常类 -class HeartFCError(Exception): - """麦麦聊天系统基础异常类""" - - pass - - -class PlannerError(HeartFCError): - """规划器异常""" - - pass - - -class ReplierError(HeartFCError): - """回复器异常""" - - pass - - -class SenderError(HeartFCError): - """发送器异常""" - - pass - - -async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str): - """处理循环延迟""" - cycle_duration = time.monotonic() - cycle_start_time - - try: - sleep_duration = 0.0 - if not action_taken_this_cycle and cycle_duration < 1: - sleep_duration = 1 - cycle_duration - elif cycle_duration < 0.2: - sleep_duration = 0.2 - - if sleep_duration > 0: - await asyncio.sleep(sleep_duration) - - except asyncio.CancelledError: - logger.info(f"{log_prefix} Sleep interrupted, loop likely cancelling.") - raise - - -class HeartFChatting: - """ - 管理一个连续的Plan-Replier-Sender循环 - 用于在特定聊天流中生成回复。 - 其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。 - """ - - def __init__( - self, - chat_id: str, - sub_mind: SubMind, - observations: list[Observation], - on_consecutive_no_reply_callback: Callable[[], Coroutine[None, None, None]], - ): - """ - HeartFChatting 初始化函数 - - 参数: - chat_id: 聊天流唯一标识符(如stream_id) - sub_mind: 关联的子思维 - observations: 关联的观察列表 - on_consecutive_no_reply_callback: 连续不回复达到阈值时调用的异步回调函数 - """ - # 基础属性 - self.stream_id: str = chat_id # 聊天流ID - self.chat_stream: Optional[ChatStream] = None # 关联的聊天流 - self.sub_mind: SubMind = sub_mind # 关联的子思维 - self.observations: List[Observation] = observations # 关联的观察列表,用于监控聊天流状态 - self.on_consecutive_no_reply_callback = on_consecutive_no_reply_callback - - # 日志前缀 - self.log_prefix: str = str(chat_id) # Initial default, will be updated - - # --- Initialize attributes (defaults) --- - self.is_group_chat: bool = False - self.chat_target_info: Optional[dict] = None - # --- End Initialization --- - - # 动作管理器 - self.action_manager = ActionManager() - - # 初始化状态控制 - self._initialized = False - self._processing_lock = asyncio.Lock() - - # --- 移除 gpt_instance, 直接初始化 LLM 模型 --- - # self.gpt_instance = HeartFCGenerator() # <-- 移除 - self.model_normal = LLMRequest( # <-- 新增 LLM 初始化 - model=global_config.llm_normal, - temperature=global_config.llm_normal["temp"], - max_tokens=256, - request_type="response_heartflow", - ) - self.heart_fc_sender = HeartFCSender() - - # LLM规划器配置 - self.planner_llm = LLMRequest( - model=global_config.llm_plan, - max_tokens=1000, - request_type="action_planning", # 用于动作规划 - ) - - # 循环控制内部状态 - self._loop_active: bool = False # 循环是否正在运行 - self._loop_task: Optional[asyncio.Task] = None # 主循环任务 - - # 添加循环信息管理相关的属性 - self._cycle_counter = 0 - self._cycle_history: Deque[CycleInfo] = deque(maxlen=10) # 保留最近10个循环的信息 - self._current_cycle: Optional[CycleInfo] = None - self._lian_xu_bu_hui_fu_ci_shu: int = 0 # <--- 新增:连续不回复计数器 - self._shutting_down: bool = False # <--- 新增:关闭标志位 - self._lian_xu_deng_dai_shi_jian: float = 0.0 # <--- 新增:累计等待时间 - - async def _initialize(self) -> bool: - """ - 懒初始化,解析chat_stream, 获取聊天类型和目标信息。 - """ - if self._initialized: - return True - - # --- Use utility function to determine chat type and fetch info --- - # Note: get_chat_type_and_target_info handles getting the chat_stream internally - self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.stream_id) - - # Update log prefix based on potential stream name (if needed, or get it from chat_stream if util doesn't return it) - # Assuming get_chat_type_and_target_info focuses only on type/target - # We still need the chat_stream object itself for other operations - try: - self.chat_stream = await asyncio.to_thread(chat_manager.get_stream, self.stream_id) - if not self.chat_stream: - logger.error( - f"[HFC:{self.stream_id}] 获取ChatStream失败 during _initialize, though util func might have succeeded earlier." - ) - return False # Cannot proceed without chat_stream object - # Update log prefix using the fetched stream object - self.log_prefix = f"[{chat_manager.get_stream_name(self.stream_id) or self.stream_id}]" - except Exception as e: - logger.error(f"[HFC:{self.stream_id}] 获取ChatStream时出错 in _initialize: {e}") - return False - - # --- End using utility function --- - - self._initialized = True - logger.debug(f"{self.log_prefix} 麦麦感觉到了,可以开始认真水群 ") - return True - - async def start(self): - """ - 启动 HeartFChatting 的主循环。 - 注意:调用此方法前必须确保已经成功初始化。 - """ - logger.info(f"{self.log_prefix} 开始认真水群(HFC)...") - await self._start_loop_if_needed() - - async def _start_loop_if_needed(self): - """检查是否需要启动主循环,如果未激活则启动。""" - # 如果循环已经激活,直接返回 - if self._loop_active: - return - - # 标记为活动状态,防止重复启动 - self._loop_active = True - - # 检查是否已有任务在运行(理论上不应该,因为 _loop_active=False) - if self._loop_task and not self._loop_task.done(): - logger.warning(f"{self.log_prefix} 发现之前的循环任务仍在运行(不符合预期)。取消旧任务。") - self._loop_task.cancel() - try: - # 等待旧任务确实被取消 - await asyncio.wait_for(self._loop_task, timeout=0.5) - except (asyncio.CancelledError, asyncio.TimeoutError): - pass # 忽略取消或超时错误 - self._loop_task = None # 清理旧任务引用 - - logger.debug(f"{self.log_prefix} 启动认真水群(HFC)主循环...") - # 创建新的循环任务 - self._loop_task = asyncio.create_task(self._hfc_loop()) - # 添加完成回调 - self._loop_task.add_done_callback(self._handle_loop_completion) - - def _handle_loop_completion(self, task: asyncio.Task): - """当 _hfc_loop 任务完成时执行的回调。""" - try: - exception = task.exception() - if exception: - logger.error(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天(异常): {exception}") - logger.error(traceback.format_exc()) # Log full traceback for exceptions - else: - # Loop completing normally now means it was cancelled/shutdown externally - logger.info(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天 (外部停止)") - except asyncio.CancelledError: - logger.info(f"{self.log_prefix} HeartFChatting: 麦麦脱离了聊天(任务取消)") - finally: - self._loop_active = False - self._loop_task = None - if self._processing_lock.locked(): - logger.warning(f"{self.log_prefix} HeartFChatting: 处理锁在循环结束时仍被锁定,强制释放。") - self._processing_lock.release() - - async def _hfc_loop(self): - """主循环,持续进行计划并可能回复消息,直到被外部取消。""" - try: - while True: # 主循环 - logger.debug(f"{self.log_prefix} 开始第{self._cycle_counter}次循环") - # --- 在循环开始处检查关闭标志 --- - if self._shutting_down: - logger.info(f"{self.log_prefix} 检测到关闭标志,退出 HFC 循环。") - break - # -------------------------------- - - # 创建新的循环信息 - self._cycle_counter += 1 - self._current_cycle = CycleInfo(self._cycle_counter) - - # 初始化周期状态 - cycle_timers = {} - loop_cycle_start_time = time.monotonic() - - # 执行规划和处理阶段 - async with self._get_cycle_context() as acquired_lock: - if not acquired_lock: - # 如果未能获取锁(理论上不太可能,除非 shutdown 过程中释放了但又被抢了?) - # 或者也可以在这里再次检查 self._shutting_down - if self._shutting_down: - break # 再次检查,确保退出 - logger.warning(f"{self.log_prefix} 未能获取循环处理锁,跳过本次循环。") - await asyncio.sleep(0.1) # 短暂等待避免空转 - continue - - # 记录规划开始时间点 - planner_start_db_time = time.time() - - # 主循环:思考->决策->执行 - action_taken, thinking_id = await self._think_plan_execute_loop(cycle_timers, planner_start_db_time) - - # 更新循环信息 - self._current_cycle.set_thinking_id(thinking_id) - self._current_cycle.timers = cycle_timers - - # 防止循环过快消耗资源 - await _handle_cycle_delay(action_taken, loop_cycle_start_time, self.log_prefix) - - # 完成当前循环并保存历史 - self._current_cycle.complete_cycle() - self._cycle_history.append(self._current_cycle) - - # 记录循环信息和计时器结果 - timer_strings = [] - for name, elapsed in cycle_timers.items(): - formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" - timer_strings.append(f"{name}: {formatted_time}") - - logger.debug( - f"{self.log_prefix} 第 #{self._current_cycle.cycle_id}次思考完成," - f"耗时: {self._current_cycle.end_time - self._current_cycle.start_time:.2f}秒, " - f"动作: {self._current_cycle.action_type}" - + (f"\n计时器详情: {'; '.join(timer_strings)}" if timer_strings else "") - ) - - except asyncio.CancelledError: - # 设置了关闭标志位后被取消是正常流程 - if not self._shutting_down: - logger.warning(f"{self.log_prefix} HeartFChatting: 麦麦的认真水群(HFC)循环意外被取消") - else: - logger.info(f"{self.log_prefix} HeartFChatting: 麦麦的认真水群(HFC)循环已取消 (正常关闭)") - except Exception as e: - logger.error(f"{self.log_prefix} HeartFChatting: 意外错误: {e}") - logger.error(traceback.format_exc()) - - @contextlib.asynccontextmanager - async def _get_cycle_context(self): - """ - 循环周期的上下文管理器 - - 用于确保资源的正确获取和释放: - 1. 获取处理锁 - 2. 执行操作 - 3. 释放锁 - """ - acquired = False - try: - await self._processing_lock.acquire() - acquired = True - yield acquired - finally: - if acquired and self._processing_lock.locked(): - self._processing_lock.release() - - async def _check_new_messages(self, start_time: float) -> bool: - """ - 检查从指定时间点后是否有新消息 - - 参数: - start_time: 开始检查的时间点 - - 返回: - bool: 是否有新消息 - """ - try: - new_msg_count = num_new_messages_since(self.stream_id, start_time) - if new_msg_count > 0: - logger.info(f"{self.log_prefix} 检测到{new_msg_count}条新消息") - return True - return False - except Exception as e: - logger.error(f"{self.log_prefix} 检查新消息时出错: {e}") - return False - - async def _think_plan_execute_loop(self, cycle_timers: dict, planner_start_db_time: float) -> tuple[bool, str]: - """执行规划阶段""" - try: - # think:思考 - current_mind = await self._get_submind_thinking(cycle_timers) - # 记录子思维思考内容 - if self._current_cycle: - self._current_cycle.set_response_info(sub_mind_thinking=current_mind) - - # plan:决策 - with Timer("决策", cycle_timers): - planner_result = await self._planner(current_mind, cycle_timers) - - # 效果不太好,还没处理replan导致观察时间点改变的问题 - - # action = planner_result.get("action", "error") - # reasoning = planner_result.get("reasoning", "未提供理由") - - # self._current_cycle.set_action_info(action, reasoning, False) - - # 在获取规划结果后检查新消息 - - # if await self._check_new_messages(planner_start_db_time): - # if random.random() < 0.2: - # logger.info(f"{self.log_prefix} 看到了新消息,麦麦决定重新观察和规划...") - # # 重新规划 - # with Timer("重新决策", cycle_timers): - # self._current_cycle.replanned = True - # planner_result = await self._planner(current_mind, cycle_timers, is_re_planned=True) - # logger.info(f"{self.log_prefix} 重新规划完成.") - - # 解析规划结果 - action = planner_result.get("action", "error") - reasoning = planner_result.get("reasoning", "未提供理由") - # 更新循环信息 - self._current_cycle.set_action_info(action, reasoning, True) - - # 处理LLM错误 - if planner_result.get("llm_error"): - logger.error(f"{self.log_prefix} LLM失败: {reasoning}") - return False, "" - - # execute:执行 - - # 在此处添加日志记录 - if action == "text_reply": - action_str = "回复" - elif action == "emoji_reply": - action_str = "回复表情" - else: - action_str = "不回复" - - logger.info(f"{self.log_prefix} 麦麦决定'{action_str}', 原因'{reasoning}'") - - return await self._handle_action( - action, reasoning, planner_result.get("emoji_query", ""), cycle_timers, planner_start_db_time - ) - - except PlannerError as e: - logger.error(f"{self.log_prefix} 规划错误: {e}") - # 更新循环信息 - self._current_cycle.set_action_info("error", str(e), False) - return False, "" - - async def _handle_action( - self, action: str, reasoning: str, emoji_query: str, cycle_timers: dict, planner_start_db_time: float - ) -> tuple[bool, str]: - """ - 处理规划动作 - - 参数: - action: 动作类型 - reasoning: 决策理由 - emoji_query: 表情查询 - cycle_timers: 计时器字典 - planner_start_db_time: 规划开始时间 - - 返回: - tuple[bool, str]: (是否执行了动作, 思考消息ID) - """ - action_handlers = { - "text_reply": self._handle_text_reply, - "emoji_reply": self._handle_emoji_reply, - "no_reply": self._handle_no_reply, - } - - handler = action_handlers.get(action) - if not handler: - logger.warning(f"{self.log_prefix} 未知动作: {action}, 原因: {reasoning}") - return False, "" - - try: - if action == "text_reply": - return await handler(reasoning, emoji_query, cycle_timers) - elif action == "emoji_reply": - return await handler(reasoning, emoji_query), "" - else: # no_reply - return await handler(reasoning, planner_start_db_time, cycle_timers), "" - except HeartFCError as e: - logger.error(f"{self.log_prefix} 处理{action}时出错: {e}") - # 出错时也重置计数器 - self._lian_xu_bu_hui_fu_ci_shu = 0 - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 - return False, "" - - async def _handle_text_reply(self, reasoning: str, emoji_query: str, cycle_timers: dict) -> tuple[bool, str]: - """ - 处理文本回复 - - 工作流程: - 1. 获取锚点消息 - 2. 创建思考消息 - 3. 生成回复 - 4. 发送消息 - - 参数: - reasoning: 回复原因 - emoji_query: 表情查询 - cycle_timers: 计时器字典 - - 返回: - tuple[bool, str]: (是否回复成功, 思考消息ID) - """ - # 重置连续不回复计数器 - self._lian_xu_bu_hui_fu_ci_shu = 0 - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 - - # 获取锚点消息 - anchor_message = await self._get_anchor_message() - if not anchor_message: - raise PlannerError("无法获取锚点消息") - - # 创建思考消息 - thinking_id = await self._create_thinking_message(anchor_message) - if not thinking_id: - raise PlannerError("无法创建思考消息") - - try: - # 生成回复 - with Timer("生成回复", cycle_timers): - reply = await self._replier_work( - anchor_message=anchor_message, - thinking_id=thinking_id, - reason=reasoning, - ) - - if not reply: - raise ReplierError("回复生成失败") - - # 发送消息 - - with Timer("发送消息", cycle_timers): - await self._sender( - thinking_id=thinking_id, - anchor_message=anchor_message, - response_set=reply, - send_emoji=emoji_query, - ) - - return True, thinking_id - - except (ReplierError, SenderError) as e: - logger.error(f"{self.log_prefix} 回复失败: {e}") - return True, thinking_id # 仍然返回thinking_id以便跟踪 - - async def _handle_emoji_reply(self, reasoning: str, emoji_query: str) -> bool: - """ - 处理表情回复 - - 工作流程: - 1. 获取锚点消息 - 2. 发送表情 - - 参数: - reasoning: 回复原因 - emoji_query: 表情查询 - - 返回: - bool: 是否发送成功 - """ - logger.info(f"{self.log_prefix} 决定回复表情({emoji_query}): {reasoning}") - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间(即使不计数也保持一致性) - - try: - anchor = await self._get_anchor_message() - if not anchor: - raise PlannerError("无法获取锚点消息") - - await self._handle_emoji(anchor, [], emoji_query) - return True - - except Exception as e: - logger.error(f"{self.log_prefix} 表情发送失败: {e}") - return False - - async def _handle_no_reply(self, reasoning: str, planner_start_db_time: float, cycle_timers: dict) -> bool: - """ - 处理不回复的情况 - - 工作流程: - 1. 等待新消息、超时或关闭信号 - 2. 根据等待结果更新连续不回复计数 - 3. 如果达到阈值,触发回调 - - 参数: - reasoning: 不回复的原因 - planner_start_db_time: 规划开始时间 - cycle_timers: 计时器字典 - - 返回: - bool: 是否成功处理 - """ - logger.info(f"{self.log_prefix} 决定不回复: {reasoning}") - - observation = self.observations[0] if self.observations else None - - try: - with Timer("等待新消息", cycle_timers): - # 等待新消息、超时或关闭信号,并获取结果 - await self._wait_for_new_message(observation, planner_start_db_time, self.log_prefix) - # 从计时器获取实际等待时间 - current_waiting = cycle_timers.get("等待新消息", 0.0) - - if not self._shutting_down: - self._lian_xu_bu_hui_fu_ci_shu += 1 - self._lian_xu_deng_dai_shi_jian += current_waiting # 累加等待时间 - logger.debug( - f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{CONSECUTIVE_NO_REPLY_THRESHOLD}, " - f"本次等待: {current_waiting:.2f}秒, 累计等待: {self._lian_xu_deng_dai_shi_jian:.2f}秒" - ) - - # 检查是否同时达到次数和时间阈值 - time_threshold = 0.66 * WAITING_TIME_THRESHOLD * CONSECUTIVE_NO_REPLY_THRESHOLD - if ( - self._lian_xu_bu_hui_fu_ci_shu >= CONSECUTIVE_NO_REPLY_THRESHOLD - and self._lian_xu_deng_dai_shi_jian >= time_threshold - ): - logger.info( - f"{self.log_prefix} 连续不回复达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) " - f"且累计等待时间达到 {self._lian_xu_deng_dai_shi_jian:.2f}秒 (阈值 {time_threshold}秒)," - f"调用回调请求状态转换" - ) - # 调用回调。注意:这里不重置计数器和时间,依赖回调函数成功改变状态来隐式重置上下文。 - await self.on_consecutive_no_reply_callback() - elif self._lian_xu_bu_hui_fu_ci_shu >= CONSECUTIVE_NO_REPLY_THRESHOLD: - # 仅次数达到阈值,但时间未达到 - logger.debug( - f"{self.log_prefix} 连续不回复次数达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) " - f"但累计等待时间 {self._lian_xu_deng_dai_shi_jian:.2f}秒 未达到时间阈值 ({time_threshold}秒),暂不调用回调" - ) - # else: 次数和时间都未达到阈值,不做处理 - - return True - - except asyncio.CancelledError: - # 如果在等待过程中任务被取消(可能是因为 shutdown) - logger.info(f"{self.log_prefix} 处理 'no_reply' 时等待被中断 (CancelledError)") - # 让异常向上传播,由 _hfc_loop 的异常处理逻辑接管 - raise - except Exception as e: # 捕获调用管理器或其他地方可能发生的错误 - logger.error(f"{self.log_prefix} 处理 'no_reply' 时发生错误: {e}") - logger.error(traceback.format_exc()) - # 发生意外错误时,可以选择是否重置计数器,这里选择不重置 - return False # 表示动作未成功 - - async def _wait_for_new_message(self, observation, planner_start_db_time: float, log_prefix: str) -> bool: - """ - 等待新消息 或 检测到关闭信号 - - 参数: - observation: 观察实例 - planner_start_db_time: 开始等待的时间 - log_prefix: 日志前缀 - - 返回: - bool: 是否检测到新消息 (如果因关闭信号退出则返回 False) - """ - wait_start_time = time.monotonic() - while True: - # --- 在每次循环开始时检查关闭标志 --- - if self._shutting_down: - logger.info(f"{log_prefix} 等待新消息时检测到关闭信号,中断等待。") - return False # 表示因为关闭而退出 - # ----------------------------------- - - # 检查新消息 - if await observation.has_new_messages_since(planner_start_db_time): - logger.info(f"{log_prefix} 检测到新消息") - return True - - # 检查超时 (放在检查新消息和关闭之后) - if time.monotonic() - wait_start_time > WAITING_TIME_THRESHOLD: - logger.warning(f"{log_prefix} 等待新消息超时({WAITING_TIME_THRESHOLD}秒)") - return False - - try: - # 短暂休眠,让其他任务有机会运行,并能更快响应取消或关闭 - await asyncio.sleep(0.5) # 缩短休眠时间 - except asyncio.CancelledError: - # 如果在休眠时被取消,再次检查关闭标志 - # 如果是正常关闭,则不需要警告 - if not self._shutting_down: - logger.warning(f"{log_prefix} _wait_for_new_message 的休眠被意外取消") - # 无论如何,重新抛出异常,让上层处理 - raise - - async def _log_cycle_timers(self, cycle_timers: dict, log_prefix: str): - """记录循环周期的计时器结果""" - if cycle_timers: - timer_strings = [] - for name, elapsed in cycle_timers.items(): - formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" - timer_strings.append(f"{name}: {formatted_time}") - - if timer_strings: - # 在记录前检查关闭标志 - if not self._shutting_down: - logger.debug(f"{log_prefix} 该次决策耗时: {'; '.join(timer_strings)}") - - async def _get_submind_thinking(self, cycle_timers: dict) -> str: - """ - 获取子思维的思考结果 - - 返回: - str: 思考结果,如果思考失败则返回错误信息 - """ - try: - with Timer("观察", cycle_timers): - observation = self.observations[0] - await observation.observe() - - # 获取上一个循环的信息 - # last_cycle = self._cycle_history[-1] if self._cycle_history else None - - with Timer("思考", cycle_timers): - # 获取上一个循环的动作 - # 传递上一个循环的信息给 do_thinking_before_reply - current_mind, _past_mind = await self.sub_mind.do_thinking_before_reply( - history_cycle=self._cycle_history - ) - return current_mind - except Exception as e: - logger.error(f"{self.log_prefix}子心流 思考失败: {e}") - logger.error(traceback.format_exc()) - return "[思考时出错]" - - async def _planner(self, current_mind: str, cycle_timers: dict, is_re_planned: bool = False) -> Dict[str, Any]: - """ - 规划器 (Planner): 使用LLM根据上下文决定是否和如何回复。 - 重构为:让LLM返回结构化JSON文本,然后在代码中解析。 - - 参数: - current_mind: 子思维的当前思考结果 - cycle_timers: 计时器字典 - is_re_planned: 是否为重新规划 (此重构中暂时简化,不处理 is_re_planned 的特殊逻辑) - """ - logger.info(f"{self.log_prefix}开始想要做什么") - - actions_to_remove_temporarily = [] - # --- 检查历史动作并决定临时移除动作 (逻辑保持不变) --- - lian_xu_wen_ben_hui_fu = 0 - probability_roll = random.random() - for cycle in reversed(self._cycle_history): - if cycle.action_taken: - if cycle.action_type == "text_reply": - lian_xu_wen_ben_hui_fu += 1 - else: - break - if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + ( - len(self._cycle_history) - 4 - ): - break - logger.debug(f"{self.log_prefix}[Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}") - - if lian_xu_wen_ben_hui_fu >= 3: - logger.info(f"{self.log_prefix}[Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply") - actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) - elif lian_xu_wen_ben_hui_fu == 2: - if probability_roll < 0.8: - logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (触发)") - actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) - else: - logger.info( - f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)" - ) - elif lian_xu_wen_ben_hui_fu == 1: - if probability_roll < 0.4: - logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (触发)") - actions_to_remove_temporarily.append("text_reply") - else: - logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (未触发)") - # --- 结束检查历史动作 --- - - # 获取观察信息 - observation = self.observations[0] - # if is_re_planned: # 暂时简化,不处理重新规划 - # await observation.observe() - observed_messages = observation.talking_message - observed_messages_str = observation.talking_message_str_truncate - - # --- 使用 LLM 进行决策 (JSON 输出模式) --- # - action = "no_reply" # 默认动作 - reasoning = "规划器初始化默认" - emoji_query = "" - llm_error = False # LLM 请求或解析错误标志 - - # 获取我们将传递给 prompt 构建器和用于验证的当前可用动作 - current_available_actions = self.action_manager.get_available_actions() - - try: - # --- 应用临时动作移除 --- - if actions_to_remove_temporarily: - self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily) - # 更新 current_available_actions 以反映移除后的状态 - current_available_actions = self.action_manager.get_available_actions() - logger.debug( - f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(current_available_actions.keys())}" - ) - - # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- - prompt = await prompt_builder.build_planner_prompt( - is_group_chat=self.is_group_chat, # <-- Pass HFC state - chat_target_info=self.chat_target_info, # <-- Pass HFC state - cycle_history=self._cycle_history, # <-- Pass HFC state - observed_messages_str=observed_messages_str, # <-- Pass local variable - current_mind=current_mind, # <-- Pass argument - structured_info=self.sub_mind.structured_info_str, # <-- Pass SubMind info - current_available_actions=current_available_actions, # <-- Pass determined actions - ) - - # --- 调用 LLM (普通文本生成) --- - llm_content = None - try: - # 假设 LLMRequest 有 generate_response 方法返回 (content, reasoning, model_name) - # 我们只需要 content - # !! 注意:这里假设 self.planner_llm 有 generate_response 方法 - # !! 如果你的 LLMRequest 类使用的是其他方法名,请相应修改 - llm_content, _, _ = await self.planner_llm.generate_response(prompt=prompt) - logger.debug(f"{self.log_prefix}[Planner] LLM 原始 JSON 响应 (预期): {llm_content}") - except Exception as req_e: - logger.error(f"{self.log_prefix}[Planner] LLM 请求执行失败: {req_e}") - reasoning = f"LLM 请求失败: {req_e}" - llm_error = True - # 直接使用默认动作返回错误结果 - action = "no_reply" # 明确设置为默认值 - emoji_query = "" # 明确设置为空 - # 不再立即返回,而是继续执行 finally 块以恢复动作 - # return { ... } - - # --- 解析 LLM 返回的 JSON (仅当 LLM 请求未出错时进行) --- - if not llm_error and llm_content: - try: - # 尝试去除可能的 markdown 代码块标记 - cleaned_content = ( - llm_content.strip().removeprefix("```json").removeprefix("```").removesuffix("```").strip() - ) - if not cleaned_content: - raise json.JSONDecodeError("Cleaned content is empty", cleaned_content, 0) - parsed_json = json.loads(cleaned_content) - - # 提取决策,提供默认值 - extracted_action = parsed_json.get("action", "no_reply") - extracted_reasoning = parsed_json.get("reasoning", "LLM未提供理由") - extracted_emoji_query = parsed_json.get("emoji_query", "") - - # 验证动作是否在当前可用列表中 - # !! 使用调用 prompt 时实际可用的动作列表进行验证 - if extracted_action not in current_available_actions: - logger.warning( - f"{self.log_prefix}[Planner] LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" - ) - action = "no_reply" - reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}" - emoji_query = "" - # 检查 no_reply 是否也恰好被移除了 (极端情况) - if "no_reply" not in current_available_actions: - logger.error( - f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。" - ) - action = "error" # 回退到错误状态 - reasoning = "无法执行任何有效动作,包括 no_reply" - llm_error = True # 标记为严重错误 - else: - llm_error = False # 视为逻辑修正而非 LLM 错误 - else: - # 动作有效且可用 - action = extracted_action - reasoning = extracted_reasoning - emoji_query = extracted_emoji_query - llm_error = False # 解析成功 - logger.debug( - f"{self.log_prefix}[要做什么]\nPrompt:\n{prompt}\n\n决策结果 (来自JSON): {action}, 理由: {reasoning}, 表情查询: '{emoji_query}'" - ) - - except json.JSONDecodeError as json_e: - logger.warning( - f"{self.log_prefix}[Planner] 解析LLM响应JSON失败: {json_e}. LLM原始输出: '{llm_content}'" - ) - reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_reply'." - action = "no_reply" # 解析失败则默认不回复 - emoji_query = "" - llm_error = True # 标记解析错误 - except Exception as parse_e: - logger.error(f"{self.log_prefix}[Planner] 处理LLM响应时发生意外错误: {parse_e}") - reasoning = f"处理LLM响应时发生意外错误: {parse_e}. 将使用默认动作 'no_reply'." - action = "no_reply" - emoji_query = "" - llm_error = True - elif not llm_error and not llm_content: - # LLM 请求成功但返回空内容 - logger.warning(f"{self.log_prefix}[Planner] LLM 返回了空内容。") - reasoning = "LLM 返回了空内容,使用默认动作 'no_reply'." - action = "no_reply" - emoji_query = "" - llm_error = True # 标记为空响应错误 - - # 如果 llm_error 在此阶段为 True,意味着请求成功但解析失败或返回空 - # 如果 llm_error 在请求阶段就为 True,则跳过了此解析块 - - except Exception as outer_e: - logger.error(f"{self.log_prefix}[Planner] Planner 处理过程中发生意外错误: {outer_e}") - logger.error(traceback.format_exc()) - action = "error" # 发生未知错误,标记为 error 动作 - reasoning = f"Planner 内部处理错误: {outer_e}" - emoji_query = "" - llm_error = True - finally: - # --- 确保动作恢复 --- - # 检查 self._original_actions_backup 是否有值来判断是否需要恢复 - if self.action_manager._original_actions_backup is not None: - self.action_manager.restore_actions() - logger.debug( - f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}" - ) - # --- 结束确保动作恢复 --- - - # --- 概率性忽略文本回复附带的表情 (逻辑保持不变) --- - if action == "text_reply" and emoji_query: - logger.debug(f"{self.log_prefix}[Planner] 大模型建议文字回复带表情: '{emoji_query}'") - if random.random() > EMOJI_SEND_PRO: - logger.info( - f"{self.log_prefix}但是麦麦这次不想加表情 ({1 - EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'" - ) - emoji_query = "" # 清空表情请求 - else: - logger.info(f"{self.log_prefix}好吧,加上表情 '{emoji_query}'") - # --- 结束概率性忽略 --- - - # 返回结果字典 - return { - "action": action, - "reasoning": reasoning, - "emoji_query": emoji_query, - "current_mind": current_mind, - "observed_messages": observed_messages, - "llm_error": llm_error, # 返回错误状态 - } - - async def _get_anchor_message(self) -> Optional[MessageRecv]: - """ - 重构观察到的最后一条消息作为回复的锚点, - 如果重构失败或观察为空,则创建一个占位符。 - """ - - try: - placeholder_id = f"mid_pf_{int(time.time() * 1000)}" - placeholder_user = UserInfo( - user_id="system_trigger", user_nickname="System Trigger", platform=self.chat_stream.platform - ) - placeholder_msg_info = BaseMessageInfo( - message_id=placeholder_id, - platform=self.chat_stream.platform, - group_info=self.chat_stream.group_info, - user_info=placeholder_user, - time=time.time(), - ) - placeholder_msg_dict = { - "message_info": placeholder_msg_info.to_dict(), - "processed_plain_text": "[System Trigger Context]", - "raw_message": "", - "time": placeholder_msg_info.time, - } - anchor_message = MessageRecv(placeholder_msg_dict) - anchor_message.update_chat_stream(self.chat_stream) - logger.debug(f"{self.log_prefix} 创建占位符锚点消息: ID={anchor_message.message_info.message_id}") - return anchor_message - - except Exception as e: - logger.error(f"{self.log_prefix} Error getting/creating anchor message: {e}") - logger.error(traceback.format_exc()) - return None - - # --- 发送器 (Sender) --- # - async def _sender( - self, - thinking_id: str, - anchor_message: MessageRecv, - response_set: List[str], - send_emoji: str, # Emoji query decided by planner or tools - ): - """ - 发送器 (Sender): 使用 HeartFCSender 实例发送生成的回复。 - 处理相关的操作,如发送表情和更新关系。 - """ - logger.info(f"{self.log_prefix}开始发送回复 (使用 HeartFCSender)") - - first_bot_msg: Optional[MessageSending] = None - try: - # _send_response_messages 现在将使用 self.sender 内部处理注册和发送 - # 它需要负责创建 MessageThinking 和 MessageSending 对象 - # 并调用 self.sender.register_thinking 和 self.sender.type_and_send_message - first_bot_msg = await self._send_response_messages( - anchor_message=anchor_message, response_set=response_set, thinking_id=thinking_id - ) - - if first_bot_msg: - # --- 处理关联表情(如果指定) --- # - if send_emoji: - logger.info(f"{self.log_prefix}正在发送关联表情: '{send_emoji}'") - # 优先使用 first_bot_msg 作为锚点,否则回退到原始锚点 - emoji_anchor = first_bot_msg - await self._handle_emoji(emoji_anchor, response_set, send_emoji) - else: - # 如果 _send_response_messages 返回 None,表示在发送前就失败或没有消息可发送 - logger.warning( - f"{self.log_prefix}[Sender-{thinking_id}] 未能发送任何回复消息 (_send_response_messages 返回 None)。" - ) - # 这里可能不需要抛出异常,取决于 _send_response_messages 的具体实现 - - except Exception as e: - # 异常现在由 type_and_send_message 内部处理日志,这里只记录发送流程失败 - logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 发送回复过程中遇到错误: {e}") - # 思考状态应已在 type_and_send_message 的 finally 块中清理 - # 可以选择重新抛出或根据业务逻辑处理 - # raise RuntimeError(f"发送回复失败: {e}") from e - - async def shutdown(self): - """优雅关闭HeartFChatting实例,取消活动循环任务""" - logger.info(f"{self.log_prefix} 正在关闭HeartFChatting...") - self._shutting_down = True # <-- 在开始关闭时设置标志位 - - # 取消循环任务 - if self._loop_task and not self._loop_task.done(): - logger.info(f"{self.log_prefix} 正在取消HeartFChatting循环任务") - self._loop_task.cancel() - try: - await asyncio.wait_for(self._loop_task, timeout=1.0) - logger.info(f"{self.log_prefix} HeartFChatting循环任务已取消") - except (asyncio.CancelledError, asyncio.TimeoutError): - pass - except Exception as e: - logger.error(f"{self.log_prefix} 取消循环任务出错: {e}") - else: - logger.info(f"{self.log_prefix} 没有活动的HeartFChatting循环任务") - - # 清理状态 - self._loop_active = False - self._loop_task = None - if self._processing_lock.locked(): - self._processing_lock.release() - logger.warning(f"{self.log_prefix} 已释放处理锁") - - logger.info(f"{self.log_prefix} HeartFChatting关闭完成") - - async def _build_replan_prompt(self, action: str, reasoning: str) -> str: - """构建 Replanner LLM 的提示词""" - prompt = (await global_prompt_manager.get_prompt_async("replan_prompt")).format( - action=action, - reasoning=reasoning, - ) - - # 在记录循环日志前检查关闭标志 - if not self._shutting_down: - self._current_cycle.complete_cycle() - self._cycle_history.append(self._current_cycle) - - # 记录循环信息和计时器结果 - timer_strings = [] - for name, elapsed in self._current_cycle.timers.items(): - formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" - timer_strings.append(f"{name}: {formatted_time}") - - logger.debug( - f"{self.log_prefix} 第 #{self._current_cycle.cycle_id}次思考完成," - f"耗时: {self._current_cycle.end_time - self._current_cycle.start_time:.2f}秒, " - f"动作: {self._current_cycle.action_type}" - + (f"\n计时器详情: {'; '.join(timer_strings)}" if timer_strings else "") - ) - - return prompt - - async def _send_response_messages( - self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str - ) -> Optional[MessageSending]: - """发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender""" - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法发送回复,缺少有效的锚点消息或聊天流。") - return None - - chat = anchor_message.chat_stream - chat_id = chat.stream_id - stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志 - - # 检查思考过程是否仍在进行,并获取开始时间 - thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id) - - if thinking_start_time is None: - logger.warning(f"[{stream_name}] {thinking_id} 思考过程未找到或已结束,无法发送回复。") - return None - - # 记录锚点消息ID和回复文本(在发送前记录) - self._current_cycle.set_response_info( - response_text=response_set, anchor_message_id=anchor_message.message_info.message_id - ) - - mark_head = False - first_bot_msg: Optional[MessageSending] = None - reply_message_ids = [] # 记录实际发送的消息ID - bot_user_info = UserInfo( - user_id=global_config.BOT_QQ, - user_nickname=global_config.BOT_NICKNAME, - platform=anchor_message.message_info.platform, - ) - - for i, msg_text in enumerate(response_set): - # 为每个消息片段生成唯一ID - part_message_id = f"{thinking_id}_{i}" - message_segment = Seg(type="text", data=msg_text) - bot_message = MessageSending( - message_id=part_message_id, # 使用片段的唯一ID - chat_stream=chat, - bot_user_info=bot_user_info, - sender_info=anchor_message.message_info.user_info, - message_segment=message_segment, - reply=anchor_message, # 回复原始锚点 - is_head=not mark_head, - is_emoji=False, - thinking_start_time=thinking_start_time, # 传递原始思考开始时间 - ) - try: - if not mark_head: - mark_head = True - first_bot_msg = bot_message # 保存第一个成功发送的消息对象 - await self.heart_fc_sender.type_and_send_message(bot_message, typing=False) - else: - await self.heart_fc_sender.type_and_send_message(bot_message, typing=True) - - reply_message_ids.append(part_message_id) # 记录我们生成的ID - - except Exception as e: - logger.error( - f"{self.log_prefix}[Sender-{thinking_id}] 发送回复片段 {i} ({part_message_id}) 时失败: {e}" - ) - # 这里可以选择是继续发送下一个片段还是中止 - - # 在尝试发送完所有片段后,完成原始的 thinking_id 状态 - try: - await self.heart_fc_sender.complete_thinking(chat_id, thinking_id) - except Exception as e: - logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 完成思考状态 {thinking_id} 时出错: {e}") - - self._current_cycle.set_response_info( - response_text=response_set, # 保留原始文本 - anchor_message_id=anchor_message.message_info.message_id, # 保留锚点ID - reply_message_ids=reply_message_ids, # 添加实际发送的ID列表 - ) - - return first_bot_msg # 返回第一个成功发送的消息对象 - - async def _handle_emoji(self, anchor_message: Optional[MessageRecv], response_set: List[str], send_emoji: str = ""): - """处理表情包 (尝试锚定到 anchor_message),使用 HeartFCSender""" - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法处理表情包,缺少有效的锚点消息或聊天流。") - return - - chat = anchor_message.chat_stream - - emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji) - - if emoji_raw: - emoji_path, description = emoji_raw - - emoji_cq = image_path_to_base64(emoji_path) - thinking_time_point = round(time.time(), 2) # 用于唯一ID - message_segment = Seg(type="emoji", data=emoji_cq) - bot_user_info = UserInfo( - user_id=global_config.BOT_QQ, - user_nickname=global_config.BOT_NICKNAME, - platform=anchor_message.message_info.platform, - ) - bot_message = MessageSending( - message_id="me" + str(thinking_time_point), # 表情消息的唯一ID - chat_stream=chat, - bot_user_info=bot_user_info, - sender_info=anchor_message.message_info.user_info, - message_segment=message_segment, - reply=anchor_message, # 回复原始锚点 - is_head=False, # 表情通常不是头部消息 - is_emoji=True, - # 不需要 thinking_start_time - ) - - try: - await self.heart_fc_sender.send_and_store(bot_message) - except Exception as e: - logger.error(f"{self.log_prefix} 发送表情包 {bot_message.message_info.message_id} 时失败: {e}") - - def get_cycle_history(self, last_n: Optional[int] = None) -> List[Dict[str, Any]]: - """获取循环历史记录 - - 参数: - last_n: 获取最近n个循环的信息,如果为None则获取所有历史记录 - - 返回: - List[Dict[str, Any]]: 循环历史记录列表 - """ - history = list(self._cycle_history) - if last_n is not None: - history = history[-last_n:] - return [cycle.to_dict() for cycle in history] - - def get_last_cycle_info(self) -> Optional[Dict[str, Any]]: - """获取最近一个循环的信息""" - if self._cycle_history: - return self._cycle_history[-1].to_dict() - return None - - # --- 回复器 (Replier) 的定义 --- # - async def _replier_work( - self, - reason: str, - anchor_message: MessageRecv, - thinking_id: str, - ) -> Optional[List[str]]: - """ - 回复器 (Replier): 核心逻辑,负责生成回复文本。 - (已整合原 HeartFCGenerator 的功能) - """ - try: - # 1. 获取情绪影响因子并调整模型温度 - arousal_multiplier = mood_manager.get_arousal_multiplier() - current_temp = global_config.llm_normal["temp"] * arousal_multiplier - self.model_normal.temperature = current_temp # 动态调整温度 - - # 2. 获取信息捕捉器 - info_catcher = info_catcher_manager.get_info_catcher(thinking_id) - - # --- Determine sender_name for private chat --- - sender_name_for_prompt = "某人" # Default for group or if info unavailable - if not self.is_group_chat and self.chat_target_info: - # Prioritize person_name, then nickname - sender_name_for_prompt = ( - self.chat_target_info.get("person_name") - or self.chat_target_info.get("user_nickname") - or sender_name_for_prompt - ) - # --- End determining sender_name --- - - # 3. 构建 Prompt - with Timer("构建Prompt", {}): # 内部计时器,可选保留 - prompt = await prompt_builder.build_prompt( - build_mode="focus", - chat_stream=self.chat_stream, # Pass the stream object - # Focus specific args: - reason=reason, - current_mind_info=self.sub_mind.current_mind, - structured_info=self.sub_mind.structured_info_str, - sender_name=sender_name_for_prompt, # Pass determined name - # Normal specific args (not used in focus mode): - # message_txt="", - ) - - # 4. 调用 LLM 生成回复 - content = None - reasoning_content = None - model_name = "unknown_model" - if not prompt: - logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。") - return None - - try: - with Timer("LLM生成", {}): # 内部计时器,可选保留 - content, reasoning_content, model_name = await self.model_normal.generate_response(prompt) - # logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n生成回复: {content}\n") - # 捕捉 LLM 输出信息 - info_catcher.catch_after_llm_generated( - prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name - ) - - except Exception as llm_e: - # 精简报错信息 - logger.error(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成失败: {llm_e}") - return None # LLM 调用失败则无法生成回复 - - # 5. 处理 LLM 响应 - if not content: - logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成了空内容。") - return None - - with Timer("处理响应", {}): # 内部计时器,可选保留 - processed_response = process_llm_response(content) - - if not processed_response: - logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] 处理后的回复为空。") - return None - - return processed_response - - except Exception as e: - # 更通用的错误处理,精简信息 - logger.error(f"{self.log_prefix}[Replier-{thinking_id}] 回复生成意外失败: {e}") - # logger.error(traceback.format_exc()) # 可以取消注释这行以在调试时查看完整堆栈 - return None - - # --- Methods moved from HeartFCController start --- - async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]: - """创建思考消息 (尝试锚定到 anchor_message)""" - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。") - return None - - chat = anchor_message.chat_stream - messageinfo = anchor_message.message_info - bot_user_info = UserInfo( - user_id=global_config.BOT_QQ, - user_nickname=global_config.BOT_NICKNAME, - platform=messageinfo.platform, - ) - - thinking_time_point = round(time.time(), 2) - thinking_id = "mt" + str(thinking_time_point) - thinking_message = MessageThinking( - message_id=thinking_id, - chat_stream=chat, - bot_user_info=bot_user_info, - reply=anchor_message, # 回复的是锚点消息 - thinking_start_time=thinking_time_point, - ) - # Access MessageManager directly (using heart_fc_sender) - await self.heart_fc_sender.register_thinking(thinking_message) - return thinking_id diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py deleted file mode 100644 index 6bd2e587a..000000000 --- a/src/plugins/schedule/schedule_generator.py +++ /dev/null @@ -1,307 +0,0 @@ -import datetime -import os -import sys -import asyncio -from dateutil import tz - -# 添加项目根目录到 Python 路径 -root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) -sys.path.append(root_path) - -from src.common.database import db # noqa: E402 -from src.common.logger import get_module_logger, SCHEDULE_STYLE_CONFIG, LogConfig # noqa: E402 -from src.plugins.models.utils_model import LLMRequest # noqa: E402 -from src.config.config import global_config # noqa: E402 - -TIME_ZONE = tz.gettz(global_config.TIME_ZONE) # 设置时区 - - -schedule_config = LogConfig( - # 使用海马体专用样式 - console_format=SCHEDULE_STYLE_CONFIG["console_format"], - file_format=SCHEDULE_STYLE_CONFIG["file_format"], -) -logger = get_module_logger("scheduler", config=schedule_config) - - -class ScheduleGenerator: - # enable_output: bool = True - - def __init__(self): - # 使用离线LLM模型 - self.enable_output = None - self.llm_scheduler_all = LLMRequest( - model=global_config.llm_reasoning, - temperature=global_config.SCHEDULE_TEMPERATURE + 0.3, - max_tokens=7000, - request_type="schedule", - ) - self.llm_scheduler_doing = LLMRequest( - model=global_config.llm_normal, - temperature=global_config.SCHEDULE_TEMPERATURE, - max_tokens=2048, - request_type="schedule", - ) - - self.today_schedule_text = "" - self.today_done_list = [] - - self.yesterday_schedule_text = "" - self.yesterday_done_list = [] - - self.name = "" - self.personality = "" - self.behavior = "" - - self.start_time = datetime.datetime.now(TIME_ZONE) - - self.schedule_doing_update_interval = 300 # 最好大于60 - - def initialize( - self, - name: str = "bot_name", - personality: str = "你是一个爱国爱党的新时代青年", - behavior: str = "你非常外向,喜欢尝试新事物和人交流", - interval: int = 60, - ): - """初始化日程系统""" - self.name = name - self.behavior = behavior - self.schedule_doing_update_interval = interval - self.personality = personality - - async def mai_schedule_start(self): - """启动日程系统,每5分钟执行一次move_doing,并在日期变化时重新检查日程""" - try: - if global_config.ENABLE_SCHEDULE_GEN: - logger.info(f"日程系统启动/刷新时间: {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}") - # 初始化日程 - await self.check_and_create_today_schedule() - # self.print_schedule() - - while True: - # print(self.get_current_num_task(1, True)) - - current_time = datetime.datetime.now(TIME_ZONE) - - # 检查是否需要重新生成日程(日期变化) - if current_time.date() != self.start_time.date(): - logger.info("检测到日期变化,重新生成日程") - self.start_time = current_time - await self.check_and_create_today_schedule() - # self.print_schedule() - - # 执行当前活动 - # mind_thinking = heartflow.current_state.current_mind - - await self.move_doing() - - await asyncio.sleep(self.schedule_doing_update_interval) - else: - logger.info("日程系统未启用") - - except Exception as e: - logger.error(f"日程系统运行时出错: {str(e)}") - logger.exception("详细错误信息:") - - async def check_and_create_today_schedule(self): - """检查昨天的日程,并确保今天有日程安排 - - Returns: - tuple: (today_schedule_text, today_schedule) 今天的日程文本和解析后的日程字典 - """ - today = datetime.datetime.now(TIME_ZONE) - yesterday = today - datetime.timedelta(days=1) - - # 先检查昨天的日程 - self.yesterday_schedule_text, self.yesterday_done_list = self.load_schedule_from_db(yesterday) - if self.yesterday_schedule_text: - logger.debug(f"已加载{yesterday.strftime('%Y-%m-%d')}的日程") - - # 检查今天的日程 - self.today_schedule_text, self.today_done_list = self.load_schedule_from_db(today) - if not self.today_done_list: - self.today_done_list = [] - if not self.today_schedule_text: - logger.info(f"{today.strftime('%Y-%m-%d')}的日程不存在,准备生成新的日程") - try: - self.today_schedule_text = await self.generate_daily_schedule(target_date=today) - except Exception as e: - logger.error(f"生成日程时发生错误: {str(e)}") - self.today_schedule_text = "" - - self.save_today_schedule_to_db() - - def construct_daytime_prompt(self, target_date: datetime.datetime): - date_str = target_date.strftime("%Y-%m-%d") - weekday = target_date.strftime("%A") - - prompt = f"你是{self.name},{self.personality},{self.behavior}" - prompt += f"你昨天的日程是:{self.yesterday_schedule_text}\n" - prompt += f"请为你生成{date_str}({weekday}),也就是今天的日程安排,结合你的个人特点和行为习惯以及昨天的安排\n" - prompt += "推测你的日程安排,包括你一天都在做什么,从起床到睡眠,有什么发现和思考,具体一些,详细一些,需要1500字以上,精确到每半个小时,记得写明时间\n" # noqa: E501 - prompt += "直接返回你的日程,现实一点,不要浮夸,从起床到睡觉,不要输出其他内容:" - return prompt - - def construct_doing_prompt(self, time: datetime.datetime, mind_thinking: str = ""): - now_time = time.strftime("%H:%M") - previous_doings = self.get_current_num_task(5, True) - - prompt = f"你是{self.name},{self.personality},{self.behavior}" - prompt += f"你今天的日程是:{self.today_schedule_text}\n" - if previous_doings: - prompt += f"你之前做了的事情是:{previous_doings},从之前到现在已经过去了{self.schedule_doing_update_interval / 60}分钟了\n" # noqa: E501 - if mind_thinking: - prompt += f"你脑子里在想:{mind_thinking}\n" - prompt += f"现在是{now_time},结合你的个人特点和行为习惯,注意关注你今天的日程安排和想法安排你接下来做什么,现实一点,不要浮夸" - prompt += "安排你接下来做什么,具体一些,详细一些\n" - prompt += "直接返回你在做的事情,注意是当前时间,不要输出其他内容:" - return prompt - - async def generate_daily_schedule( - self, - target_date: datetime.datetime = None, - ) -> dict[str, str]: - daytime_prompt = self.construct_daytime_prompt(target_date) - daytime_response, _ = await self.llm_scheduler_all.generate_response_async(daytime_prompt) - return daytime_response - - def print_schedule(self): - """打印完整的日程安排""" - if not self.today_schedule_text: - logger.warning("今日日程有误,将在下次运行时重新生成") - db.schedule.delete_one({"date": datetime.datetime.now(TIME_ZONE).strftime("%Y-%m-%d")}) - else: - logger.info("=== 今日日程安排 ===") - logger.info(self.today_schedule_text) - logger.info("==================") - self.enable_output = False - - async def update_today_done_list(self): - # 更新数据库中的 today_done_list - today_str = datetime.datetime.now(TIME_ZONE).strftime("%Y-%m-%d") - existing_schedule = db.schedule.find_one({"date": today_str}) - - if existing_schedule: - # 更新数据库中的 today_done_list - db.schedule.update_one({"date": today_str}, {"$set": {"today_done_list": self.today_done_list}}) - logger.debug(f"已更新{today_str}的已完成活动列表") - else: - logger.warning(f"未找到{today_str}的日程记录") - - async def move_doing(self, mind_thinking: str = ""): - try: - current_time = datetime.datetime.now(TIME_ZONE) - if mind_thinking: - doing_prompt = self.construct_doing_prompt(current_time, mind_thinking) - else: - doing_prompt = self.construct_doing_prompt(current_time) - - doing_response, _ = await self.llm_scheduler_doing.generate_response_async(doing_prompt) - self.today_done_list.append((current_time, doing_response)) - - await self.update_today_done_list() - - logger.info(f"当前活动: {doing_response}") - - return doing_response - except GeneratorExit: - logger.warning("日程生成被中断") - return "日程生成被中断" - except Exception as e: - logger.error(f"生成日程时发生错误: {str(e)}") - return "生成日程时发生错误" - - async def get_task_from_time_to_time(self, start_time: str, end_time: str): - """获取指定时间范围内的任务列表 - - Args: - start_time (str): 开始时间,格式为"HH:MM" - end_time (str): 结束时间,格式为"HH:MM" - - Returns: - list: 时间范围内的任务列表 - """ - result = [] - for task in self.today_done_list: - task_time = task[0] # 获取任务的时间戳 - task_time_str = task_time.strftime("%H:%M") - - # 检查任务时间是否在指定范围内 - if self._time_diff(start_time, task_time_str) >= 0 and self._time_diff(task_time_str, end_time) >= 0: - result.append(task) - - return result - - def get_current_num_task(self, num=1, time_info=False): - """获取最新加入的指定数量的日程 - - Args: - num (int): 需要获取的日程数量,默认为1 - time_info (bool): 是否包含时间信息,默认为False - - Returns: - list: 最新加入的日程列表 - """ - if not self.today_done_list: - return [] - - # 确保num不超过列表长度 - num = min(num, len(self.today_done_list)) - pre_doings = "" - for doing in self.today_done_list[-num:]: - if time_info: - time_str = doing[0].strftime("%H:%M") - pre_doings += time_str + "时," + doing[1] + "\n" - else: - pre_doings += doing[1] + "\n" - - # 返回最新的num条日程 - return pre_doings - - def save_today_schedule_to_db(self): - """保存日程到数据库,同时初始化 today_done_list""" - date_str = datetime.datetime.now(TIME_ZONE).strftime("%Y-%m-%d") - schedule_data = { - "date": date_str, - "schedule": self.today_schedule_text, - "today_done_list": self.today_done_list if hasattr(self, "today_done_list") else [], - } - # 使用 upsert 操作,如果存在则更新,不存在则插入 - db.schedule.update_one({"date": date_str}, {"$set": schedule_data}, upsert=True) - logger.debug(f"已保存{date_str}的日程到数据库") - - @staticmethod - def load_schedule_from_db(date: datetime.datetime): - """从数据库加载日程,同时加载 today_done_list""" - date_str = date.strftime("%Y-%m-%d") - existing_schedule = db.schedule.find_one({"date": date_str}) - - if existing_schedule: - schedule_text = existing_schedule["schedule"] - return schedule_text, existing_schedule.get("today_done_list", []) - else: - logger.debug(f"{date_str}的日程不存在") - return None, None - - -async def main(): - # 使用示例 - scheduler = ScheduleGenerator() - scheduler.initialize( - name="麦麦", - personality="你叫麦麦,你19岁,是一个大二的女大学生,你有一头黑色短发,你会刷贴吧,你现在在学习心理学", - behavior="你比较内向,一般熬夜比较晚,然后第二天早上10点起床吃早午饭", - interval=60, - ) - await scheduler.mai_schedule_start() - - -# 当作为组件导入时使用的实例 -bot_schedule = ScheduleGenerator() - -if __name__ == "__main__": - import asyncio - - # 当直接运行此文件时执行 - asyncio.run(main()) diff --git a/src/plugins/topic_identify/topic_identifier.py b/src/plugins/topic_identify/topic_identifier.py deleted file mode 100644 index 25c290a3a..000000000 --- a/src/plugins/topic_identify/topic_identifier.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import List, Optional - - -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from src.common.logger import get_module_logger, LogConfig, TOPIC_STYLE_CONFIG - -# 定义日志配置 -topic_config = LogConfig( - # 使用海马体专用样式 - console_format=TOPIC_STYLE_CONFIG["console_format"], - file_format=TOPIC_STYLE_CONFIG["file_format"], -) - -logger = get_module_logger("topic_identifier", config=topic_config) - - -class TopicIdentifier: - def __init__(self): - self.llm_topic_judge = LLMRequest(model=global_config.llm_topic_judge, request_type="topic") - - async def identify_topic_llm(self, text: str) -> Optional[List[str]]: - """识别消息主题,返回主题列表""" - - prompt = f"""判断这条消息的主题,如果没有明显主题请回复"无主题",要求: -1. 主题通常2-4个字,必须简短,要求精准概括,不要太具体。 -2. 建议给出多个主题,之间用英文逗号分割。只输出主题本身就好,不要有前后缀。 - -消息内容:{text}""" - - # 使用 LLMRequest 类进行请求 - try: - topic, _, _ = await self.llm_topic_judge.generate_response(prompt) - except Exception as e: - logger.error(f"LLM 请求topic失败: {e}") - return None - if not topic: - logger.error("LLM 得到的topic为空") - return None - - # 直接在这里处理主题解析 - if not topic or topic == "无主题": - return None - - # 解析主题字符串为列表 - topic_list = [t.strip() for t in topic.split(",") if t.strip()] - - logger.info(f"主题: {topic_list}") - return topic_list if topic_list else None - - -topic_identifier = TopicIdentifier() diff --git a/src/plugins/willing/mode_dynamic.py b/src/plugins/willing/mode_dynamic.py deleted file mode 100644 index 029da4e0f..000000000 --- a/src/plugins/willing/mode_dynamic.py +++ /dev/null @@ -1,233 +0,0 @@ -import asyncio -import random -import time -from typing import Dict -from .willing_manager import BaseWillingManager - - -class DynamicWillingManager(BaseWillingManager): - def __init__(self): - super().__init__() - self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿 - self.chat_high_willing_mode: Dict[str, bool] = {} # 存储每个聊天流是否处于高回复意愿期 - self.chat_msg_count: Dict[str, int] = {} # 存储每个聊天流接收到的消息数量 - self.chat_last_mode_change: Dict[str, float] = {} # 存储每个聊天流上次模式切换的时间 - self.chat_high_willing_duration: Dict[str, int] = {} # 高意愿期持续时间(秒) - self.chat_low_willing_duration: Dict[str, int] = {} # 低意愿期持续时间(秒) - self.chat_last_reply_time: Dict[str, float] = {} # 存储每个聊天流上次回复的时间 - self.chat_last_sender_id: Dict[str, str] = {} # 存储每个聊天流上次回复的用户ID - self.chat_conversation_context: Dict[str, bool] = {} # 标记是否处于对话上下文中 - self._decay_task = None - self._mode_switch_task = None - - async def async_task_starter(self): - if self._decay_task is None: - self._decay_task = asyncio.create_task(self._decay_reply_willing()) - if self._mode_switch_task is None: - self._mode_switch_task = asyncio.create_task(self._mode_switch_check()) - - async def _decay_reply_willing(self): - """定期衰减回复意愿""" - while True: - await asyncio.sleep(5) - for chat_id in self.chat_reply_willing: - is_high_mode = self.chat_high_willing_mode.get(chat_id, False) - if is_high_mode: - # 高回复意愿期内轻微衰减 - self.chat_reply_willing[chat_id] = max(0.5, self.chat_reply_willing[chat_id] * 0.95) - else: - # 低回复意愿期内正常衰减 - self.chat_reply_willing[chat_id] = max(0, self.chat_reply_willing[chat_id] * 0.8) - - async def _mode_switch_check(self): - """定期检查是否需要切换回复意愿模式""" - while True: - current_time = time.time() - await asyncio.sleep(10) # 每10秒检查一次 - - for chat_id in self.chat_high_willing_mode: - last_change_time = self.chat_last_mode_change.get(chat_id, 0) - is_high_mode = self.chat_high_willing_mode.get(chat_id, False) - - # 获取当前模式的持续时间 - if is_high_mode: - duration = self.chat_high_willing_duration.get(chat_id, 180) # 默认3分钟 - else: - duration = self.chat_low_willing_duration.get(chat_id, random.randint(300, 1200)) # 默认5-20分钟 - - # 检查是否需要切换模式 - if current_time - last_change_time > duration: - self._switch_willing_mode(chat_id) - elif not is_high_mode and random.random() < 0.1: - # 低回复意愿期有10%概率随机切换到高回复期 - self._switch_willing_mode(chat_id) - - # 检查对话上下文状态是否需要重置 - last_reply_time = self.chat_last_reply_time.get(chat_id, 0) - if current_time - last_reply_time > 300: # 5分钟无交互,重置对话上下文 - self.chat_conversation_context[chat_id] = False - - def _switch_willing_mode(self, chat_id: str): - """切换聊天流的回复意愿模式""" - is_high_mode = self.chat_high_willing_mode.get(chat_id, False) - - if is_high_mode: - # 从高回复期切换到低回复期 - self.chat_high_willing_mode[chat_id] = False - self.chat_reply_willing[chat_id] = 0.1 # 设置为最低回复意愿 - self.chat_low_willing_duration[chat_id] = random.randint(600, 1200) # 10-20分钟 - self.logger.debug(f"聊天流 {chat_id} 切换到低回复意愿期,持续 {self.chat_low_willing_duration[chat_id]} 秒") - else: - # 从低回复期切换到高回复期 - self.chat_high_willing_mode[chat_id] = True - self.chat_reply_willing[chat_id] = 1.0 # 设置为较高回复意愿 - self.chat_high_willing_duration[chat_id] = random.randint(180, 240) # 3-4分钟 - self.logger.debug( - f"聊天流 {chat_id} 切换到高回复意愿期,持续 {self.chat_high_willing_duration[chat_id]} 秒" - ) - - self.chat_last_mode_change[chat_id] = time.time() - self.chat_msg_count[chat_id] = 0 # 重置消息计数 - - def _ensure_chat_initialized(self, chat_id: str): - """确保聊天流的所有数据已初始化""" - if chat_id not in self.chat_reply_willing: - self.chat_reply_willing[chat_id] = 0.1 - - if chat_id not in self.chat_high_willing_mode: - self.chat_high_willing_mode[chat_id] = False - self.chat_last_mode_change[chat_id] = time.time() - self.chat_low_willing_duration[chat_id] = random.randint(300, 1200) # 5-20分钟 - - if chat_id not in self.chat_msg_count: - self.chat_msg_count[chat_id] = 0 - - if chat_id not in self.chat_conversation_context: - self.chat_conversation_context[chat_id] = False - - async def get_reply_probability(self, message_id): - """改变指定聊天流的回复意愿并返回回复概率""" - # 获取或创建聊天流 - willing_info = self.ongoing_messages[message_id] - stream = willing_info.chat - chat_id = stream.stream_id - sender_id = str(willing_info.message.message_info.user_info.user_id) - current_time = time.time() - - self._ensure_chat_initialized(chat_id) - - # 增加消息计数 - self.chat_msg_count[chat_id] = self.chat_msg_count.get(chat_id, 0) + 1 - - current_willing = self.chat_reply_willing.get(chat_id, 0) - is_high_mode = self.chat_high_willing_mode.get(chat_id, False) - msg_count = self.chat_msg_count.get(chat_id, 0) - in_conversation_context = self.chat_conversation_context.get(chat_id, False) - - # 检查是否是对话上下文中的追问 - last_reply_time = self.chat_last_reply_time.get(chat_id, 0) - last_sender = self.chat_last_sender_id.get(chat_id, "") - - # 如果是同一个人在短时间内(2分钟内)发送消息,且消息数量较少(<=5条),视为追问 - if sender_id and sender_id == last_sender and current_time - last_reply_time < 120 and msg_count <= 5: - in_conversation_context = True - self.chat_conversation_context[chat_id] = True - self.logger.debug("检测到追问 (同一用户), 提高回复意愿") - current_willing += 0.3 - - # 特殊情况处理 - if willing_info.is_mentioned_bot: - current_willing += 0.5 - in_conversation_context = True - self.chat_conversation_context[chat_id] = True - self.logger.debug(f"被提及, 当前意愿: {current_willing}") - - if willing_info.is_emoji: - current_willing = self.global_config.emoji_response_penalty * 0.1 - self.logger.debug(f"表情包, 当前意愿: {current_willing}") - - # 根据话题兴趣度适当调整 - if willing_info.interested_rate > 0.5: - current_willing += ( - (willing_info.interested_rate - 0.5) * 0.5 * self.global_config.response_interested_rate_amplifier - ) - - # 根据当前模式计算回复概率 - if in_conversation_context: - # 在对话上下文中,降低基础回复概率 - base_probability = 0.5 if is_high_mode else 0.25 - self.logger.debug(f"处于对话上下文中,基础回复概率: {base_probability}") - elif is_high_mode: - # 高回复周期:4-8句话有50%的概率会回复一次 - base_probability = 0.50 if 4 <= msg_count <= 8 else 0.2 - else: - # 低回复周期:需要最少15句才有30%的概率会回一句 - base_probability = 0.30 if msg_count >= 15 else 0.03 * min(msg_count, 10) - - # 考虑回复意愿的影响 - reply_probability = base_probability * current_willing * self.global_config.response_willing_amplifier - - # 检查群组权限(如果是群聊) - if willing_info.group_info: - if willing_info.group_info.group_id in self.global_config.talk_frequency_down_groups: - reply_probability = reply_probability / self.global_config.down_frequency_rate - - # 限制最大回复概率 - reply_probability = min(reply_probability, 0.75) # 设置最大回复概率为75% - if reply_probability < 0: - reply_probability = 0 - - # 记录当前发送者ID以便后续追踪 - if sender_id: - self.chat_last_sender_id[chat_id] = sender_id - - self.chat_reply_willing[chat_id] = min(current_willing, 3.0) - - return reply_probability - - async def before_generate_reply_handle(self, message_id): - """开始思考后降低聊天流的回复意愿""" - stream = self.ongoing_messages[message_id].chat - if stream: - chat_id = stream.stream_id - self._ensure_chat_initialized(chat_id) - current_willing = self.chat_reply_willing.get(chat_id, 0) - - # 回复后减少回复意愿 - self.chat_reply_willing[chat_id] = max(0.0, current_willing - 0.3) - - # 标记为对话上下文中 - self.chat_conversation_context[chat_id] = True - - # 记录最后回复时间 - self.chat_last_reply_time[chat_id] = time.time() - - # 重置消息计数 - self.chat_msg_count[chat_id] = 0 - - async def not_reply_handle(self, message_id): - """决定不回复后提高聊天流的回复意愿""" - stream = self.ongoing_messages[message_id].chat - if stream: - chat_id = stream.stream_id - self._ensure_chat_initialized(chat_id) - is_high_mode = self.chat_high_willing_mode.get(chat_id, False) - current_willing = self.chat_reply_willing.get(chat_id, 0) - in_conversation_context = self.chat_conversation_context.get(chat_id, False) - - # 根据当前模式调整不回复后的意愿增加 - if is_high_mode: - willing_increase = 0.1 - elif in_conversation_context: - # 在对话上下文中但决定不回复,小幅增加回复意愿 - willing_increase = 0.15 - else: - willing_increase = random.uniform(0.05, 0.1) - - self.chat_reply_willing[chat_id] = min(2.0, current_willing + willing_increase) - - async def bombing_buffer_message_handle(self, message_id): - return await super().bombing_buffer_message_handle(message_id) - - async def after_generate_reply_handle(self, message_id): - return await super().after_generate_reply_handle(message_id) diff --git a/src/plugins/willing/mode_llmcheck.py b/src/plugins/willing/mode_llmcheck.py deleted file mode 100644 index 697621b11..000000000 --- a/src/plugins/willing/mode_llmcheck.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -llmcheck 模式: -此模式的一些参数不会在配置文件中显示,要修改请在可变参数下修改 -此模式的特点: -1.在群聊内的连续对话场景下,使用大语言模型来判断回复概率 -2.非连续对话场景,使用mxp模式的意愿管理器(可另外配置) -3.默认配置的是model_v3,当前参数适用于deepseek-v3-0324 - -继承自其他模式,实质上仅重写get_reply_probability方法,未来可能重构成一个插件,可方便地组装到其他意愿模式上。 -目前的使用方式是拓展到其他意愿管理模式 - -""" - -import time -from loguru import logger -from ..models.utils_model import LLMRequest -from ...config.config import global_config - -# from ..chat.chat_stream import ChatStream -from ..chat.utils import get_recent_group_detailed_plain_text - -# from .willing_manager import BaseWillingManager -from .mode_mxp import MxpWillingManager -import re -from functools import wraps - - -def is_continuous_chat(self, message_id: str): - # 判断是否是连续对话,出于成本考虑,默认限制5条 - willing_info = self.ongoing_messages[message_id] - chat_id = willing_info.chat_id - group_info = willing_info.group_info - config = self.global_config - length = 5 - if chat_id: - chat_talking_text = get_recent_group_detailed_plain_text(chat_id, limit=length, combine=True) - if group_info: - if str(config.BOT_QQ) in chat_talking_text: - return True - else: - return False - return False - - -def llmcheck_decorator(trigger_condition_func): - def decorator(func): - @wraps(func) - def wrapper(self, message_id: str): - if trigger_condition_func(self, message_id): - # 满足条件,走llm流程 - return self.get_llmreply_probability(message_id) - else: - # 不满足条件,走默认流程 - return func(self, message_id) - - return wrapper - - return decorator - - -class LlmcheckWillingManager(MxpWillingManager): - def __init__(self): - super().__init__() - self.model_v3 = LLMRequest(model=global_config.llm_normal, temperature=0.3) - - async def get_llmreply_probability(self, message_id: str): - message_info = self.ongoing_messages[message_id] - chat_id = message_info.chat_id - config = self.global_config - # 获取信息的长度 - length = 5 - if message_info.group_info and config: - if message_info.group_info.group_id not in config.talk_allowed_groups: - reply_probability = 0 - return reply_probability - - current_date = time.strftime("%Y-%m-%d", time.localtime()) - current_time = time.strftime("%H:%M:%S", time.localtime()) - chat_talking_prompt = get_recent_group_detailed_plain_text(chat_id, limit=length, combine=True) - if not chat_id: - return 0 - - # if is_mentioned_bot: - # return 1.0 - prompt = f""" - 假设你正在查看一个群聊,你在这个群聊里的网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)}, - 现在群里聊天的内容是{chat_talking_prompt}, - 今天是{current_date},现在是{current_time}。 - 综合群内的氛围和你自己之前的发言,给出你认为**最新的消息**需要你回复的概率,数值在0到1之间。请注意,群聊内容杂乱,很多时候对话连续,但很可能不是在和你说话。 - 如果最新的消息和你之前的发言在内容上连续,或者提到了你的名字或者称谓,将其视作明确指向你的互动,给出高于0.8的概率。如果现在是睡眠时间,直接概率为0。如果话题内容与你之前不是紧密相关,请不要给出高于0.1的概率。 - 请注意是判断概率,而不是编写回复内容, - 仅输出在0到1区间内的概率值,不要给出你的判断依据。 - """ - - content_check, reasoning_check, _ = await self.model_v3.generate_response(prompt) - # logger.info(f"{prompt}") - logger.info(f"{content_check} {reasoning_check}") - probability = self.extract_marked_probability(content_check) - # 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整 - probability += message_info.interested_rate * 0.25 - probability = min(1.0, probability) - if probability <= 0.1: - probability = min(0.03, probability) - if probability >= 0.8: - probability = max(probability, 0.90) - - # 当前表情包理解能力较差,少说就少错 - if message_info.is_emoji: - probability *= global_config.emoji_response_penalty - - return probability - - @staticmethod - def extract_marked_probability(text): - """提取带标记的概率值 该方法主要用于测试微调prompt阶段""" - text = text.strip() - pattern = r"##PROBABILITY_START##(.*?)##PROBABILITY_END##" - match = re.search(pattern, text, re.DOTALL) - if match: - prob_str = match.group(1).strip() - # 处理百分比(65% → 0.65) - if "%" in prob_str: - return float(prob_str.replace("%", "")) / 100 - # 处理分数(2/3 → 0.666...) - elif "/" in prob_str: - numerator, denominator = map(float, prob_str.split("/")) - return numerator / denominator - # 直接处理小数 - else: - return float(prob_str) - - percent_match = re.search(r"(\d{1,3})%", text) # 65% - decimal_match = re.search(r"(0\.\d+|1\.0+)", text) # 0.65 - fraction_match = re.search(r"(\d+)/(\d+)", text) # 2/3 - try: - if percent_match: - prob = float(percent_match.group(1)) / 100 - elif decimal_match: - prob = float(decimal_match.group(0)) - elif fraction_match: - numerator, denominator = map(float, fraction_match.groups()) - prob = numerator / denominator - else: - return 0 # 无匹配格式 - - # 验证范围是否合法 - if 0 <= prob <= 1: - return prob - return 0 - except (ValueError, ZeroDivisionError): - return 0 - - @llmcheck_decorator(is_continuous_chat) - def get_reply_probability(self, message_id): - return super().get_reply_probability(message_id) diff --git a/src/do_tool/not_used/change_mood.py b/src/tools/not_used/change_mood.py similarity index 97% rename from src/do_tool/not_used/change_mood.py rename to src/tools/not_used/change_mood.py index 5d1e7f7a6..c34bebb93 100644 --- a/src/do_tool/not_used/change_mood.py +++ b/src/tools/not_used/change_mood.py @@ -2,7 +2,7 @@ from typing import Any from src.common.logger_manager import get_logger from src.config.config import global_config -from src.do_tool.tool_can_use.base_tool import BaseTool +from src.tools.tool_can_use.base_tool import BaseTool from src.manager.mood_manager import mood_manager logger = get_logger("change_mood_tool") diff --git a/src/do_tool/not_used/change_relationship.py b/src/tools/not_used/change_relationship.py similarity index 96% rename from src/do_tool/not_used/change_relationship.py rename to src/tools/not_used/change_relationship.py index 96f512e56..b038a3e62 100644 --- a/src/do_tool/not_used/change_relationship.py +++ b/src/tools/not_used/change_relationship.py @@ -1,6 +1,6 @@ from typing import Any from src.common.logger_manager import get_logger -from src.do_tool.tool_can_use.base_tool import BaseTool +from src.tools.tool_can_use.base_tool import BaseTool logger = get_logger("relationship_tool") diff --git a/src/do_tool/not_used/get_memory.py b/src/tools/not_used/get_memory.py similarity index 94% rename from src/do_tool/not_used/get_memory.py rename to src/tools/not_used/get_memory.py index 481942daf..2f40d3813 100644 --- a/src/do_tool/not_used/get_memory.py +++ b/src/tools/not_used/get_memory.py @@ -1,5 +1,5 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool -from src.plugins.memory_system.Hippocampus import HippocampusManager +from src.tools.tool_can_use.base_tool import BaseTool +from src.chat.memory_system.Hippocampus import HippocampusManager from src.common.logger import get_module_logger from typing import Dict, Any diff --git a/src/do_tool/not_used/mid_chat_mem.py b/src/tools/not_used/mid_chat_mem.py similarity index 95% rename from src/do_tool/not_used/mid_chat_mem.py rename to src/tools/not_used/mid_chat_mem.py index 0340df139..fc64ab299 100644 --- a/src/do_tool/not_used/mid_chat_mem.py +++ b/src/tools/not_used/mid_chat_mem.py @@ -1,4 +1,4 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool +from src.tools.tool_can_use.base_tool import BaseTool from src.common.logger import get_module_logger from typing import Any diff --git a/src/do_tool/not_used/send_emoji.py b/src/tools/not_used/send_emoji.py similarity index 93% rename from src/do_tool/not_used/send_emoji.py rename to src/tools/not_used/send_emoji.py index d2d00a92e..698ba2a75 100644 --- a/src/do_tool/not_used/send_emoji.py +++ b/src/tools/not_used/send_emoji.py @@ -1,4 +1,4 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool +from src.tools.tool_can_use.base_tool import BaseTool from src.common.logger import get_module_logger from typing import Any diff --git a/src/do_tool/tool_can_use/README.md b/src/tools/tool_can_use/README.md similarity index 96% rename from src/do_tool/tool_can_use/README.md rename to src/tools/tool_can_use/README.md index 0b746b4ec..ef6760b5b 100644 --- a/src/do_tool/tool_can_use/README.md +++ b/src/tools/tool_can_use/README.md @@ -9,7 +9,7 @@ 每个工具应该继承 `BaseTool` 基类并实现必要的属性和方法: ```python -from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool +from src.tools.tool_can_use.base_tool import BaseTool, register_tool class MyNewTool(BaseTool): # 工具名称,必须唯一 @@ -86,7 +86,7 @@ register_tool(MyNewTool) ## 使用示例 ```python -from src.do_tool.tool_use import ToolUser +from src.tools.tool_use import ToolUser # 创建工具用户 tool_user = ToolUser() diff --git a/src/do_tool/tool_can_use/__init__.py b/src/tools/tool_can_use/__init__.py similarity index 86% rename from src/do_tool/tool_can_use/__init__.py rename to src/tools/tool_can_use/__init__.py index a7ea17ab7..14bae04c0 100644 --- a/src/do_tool/tool_can_use/__init__.py +++ b/src/tools/tool_can_use/__init__.py @@ -1,4 +1,4 @@ -from src.do_tool.tool_can_use.base_tool import ( +from src.tools.tool_can_use.base_tool import ( BaseTool, register_tool, discover_tools, diff --git a/src/do_tool/tool_can_use/base_tool.py b/src/tools/tool_can_use/base_tool.py similarity index 97% rename from src/do_tool/tool_can_use/base_tool.py rename to src/tools/tool_can_use/base_tool.py index b0f04ffe8..62697168f 100644 --- a/src/do_tool/tool_can_use/base_tool.py +++ b/src/tools/tool_can_use/base_tool.py @@ -81,7 +81,7 @@ def discover_tools(): continue # 导入模块 - module = importlib.import_module(f"src.do_tool.{package_name}.{module_name}") + module = importlib.import_module(f"src.tools.{package_name}.{module_name}") # 查找模块中的工具类 for _, obj in inspect.getmembers(module): diff --git a/src/do_tool/tool_can_use/compare_numbers_tool.py b/src/tools/tool_can_use/compare_numbers_tool.py similarity index 96% rename from src/do_tool/tool_can_use/compare_numbers_tool.py rename to src/tools/tool_can_use/compare_numbers_tool.py index 2bb292a1a..72c7d7d15 100644 --- a/src/do_tool/tool_can_use/compare_numbers_tool.py +++ b/src/tools/tool_can_use/compare_numbers_tool.py @@ -1,4 +1,4 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool +from src.tools.tool_can_use.base_tool import BaseTool from src.common.logger import get_module_logger from typing import Any diff --git a/src/do_tool/tool_can_use/get_knowledge.py b/src/tools/tool_can_use/get_knowledge.py similarity index 97% rename from src/do_tool/tool_can_use/get_knowledge.py rename to src/tools/tool_can_use/get_knowledge.py index 90a446550..65acd55c0 100644 --- a/src/do_tool/tool_can_use/get_knowledge.py +++ b/src/tools/tool_can_use/get_knowledge.py @@ -1,5 +1,5 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool -from src.plugins.chat.utils import get_embedding +from src.tools.tool_can_use.base_tool import BaseTool +from src.chat.utils.utils import get_embedding from src.common.database import db from src.common.logger_manager import get_logger from typing import Any, Union diff --git a/src/do_tool/tool_can_use/get_time_date.py b/src/tools/tool_can_use/get_time_date.py similarity index 95% rename from src/do_tool/tool_can_use/get_time_date.py rename to src/tools/tool_can_use/get_time_date.py index 1cb23fdbd..8b0986743 100644 --- a/src/do_tool/tool_can_use/get_time_date.py +++ b/src/tools/tool_can_use/get_time_date.py @@ -1,4 +1,4 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool +from src.tools.tool_can_use.base_tool import BaseTool from src.common.logger_manager import get_logger from typing import Dict, Any from datetime import datetime diff --git a/src/do_tool/tool_can_use/lpmm_get_knowledge.py b/src/tools/tool_can_use/lpmm_get_knowledge.py similarity index 97% rename from src/do_tool/tool_can_use/lpmm_get_knowledge.py rename to src/tools/tool_can_use/lpmm_get_knowledge.py index a4ded910f..f7c0bd753 100644 --- a/src/do_tool/tool_can_use/lpmm_get_knowledge.py +++ b/src/tools/tool_can_use/lpmm_get_knowledge.py @@ -1,10 +1,10 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool -from src.plugins.chat.utils import get_embedding +from src.tools.tool_can_use.base_tool import BaseTool +from src.chat.utils.utils import get_embedding # from src.common.database import db from src.common.logger_manager import get_logger from typing import Dict, Any -from src.plugins.knowledge.knowledge_lib import qa_manager +from src.chat.knowledge.knowledge_lib import qa_manager logger = get_logger("lpmm_get_knowledge_tool") diff --git a/src/do_tool/tool_can_use/rename_person_tool.py b/src/tools/tool_can_use/rename_person_tool.py similarity index 97% rename from src/do_tool/tool_can_use/rename_person_tool.py rename to src/tools/tool_can_use/rename_person_tool.py index 3b95bc43a..c9914a4e4 100644 --- a/src/do_tool/tool_can_use/rename_person_tool.py +++ b/src/tools/tool_can_use/rename_person_tool.py @@ -1,5 +1,5 @@ -from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool -from src.plugins.person_info.person_info import person_info_manager +from src.tools.tool_can_use.base_tool import BaseTool, register_tool +from src.chat.person_info.person_info import person_info_manager from src.common.logger_manager import get_logger import time diff --git a/src/do_tool/tool_use.py b/src/tools/tool_use.py similarity index 94% rename from src/do_tool/tool_use.py rename to src/tools/tool_use.py index b2f59cc8b..c55170b88 100644 --- a/src/do_tool/tool_use.py +++ b/src/tools/tool_use.py @@ -1,13 +1,13 @@ -from src.plugins.models.utils_model import LLMRequest +from src.chat.models.utils_model import LLMRequest from src.config.config import global_config import json from src.common.logger_manager import get_logger -from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance +from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance import traceback -from src.plugins.person_info.relationship_manager import relationship_manager -from src.plugins.chat.utils import parse_text_timestamps -from src.plugins.chat.chat_stream import ChatStream -from src.heart_flow.observation import ChattingObservation +from src.chat.person_info.relationship_manager import relationship_manager +from src.chat.utils.utils import parse_text_timestamps +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.heart_flow.observation.chatting_observation import ChattingObservation logger = get_logger("tool_use") diff --git a/template/bot_config_meta.toml b/template/bot_config_meta.toml index 459b70268..c3541baad 100644 --- a/template/bot_config_meta.toml +++ b/template/bot_config_meta.toml @@ -63,36 +63,6 @@ describe = "外貌特征描述,该选项还在调试中,暂时未生效" important = false can_edit = true -[schedule.enable_schedule_gen] -describe = "是否启用日程表" -important = false -can_edit = true - -[schedule.enable_schedule_interaction] -describe = "日程表是否影响回复模式" -important = false -can_edit = true - -[schedule.prompt_schedule_gen] -describe = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表" -important = false -can_edit = true - -[schedule.schedule_doing_update_interval] -describe = "日程表更新间隔,单位秒" -important = false -can_edit = true - -[schedule.schedule_temperature] -describe = "日程表温度,建议0.1-0.5" -important = false -can_edit = true - -[schedule.time_zone] -describe = "时区设置,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程" -important = false -can_edit = true - [platforms.nonebot-qq] describe = "nonebot-qq适配器提供的链接" important = true diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 8eab299cb..931afe2ed 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.6.1" +version = "1.7.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -43,6 +43,10 @@ personality_sides = [ "用一句话或几句话描述人格的一些细节", ]# 条数任意,不能为0, 该选项还在调试中,可能未完全生效 +# 表达方式 +expression_style = "描述麦麦说话的表达风格,表达习惯" + + [identity] #アイデンティティがない 生まれないらららら # 兴趣爱好 未完善,有些条目未使用 identity_detail = [ @@ -54,23 +58,15 @@ age = 20 # 年龄 单位岁 gender = "男" # 性别 appearance = "用几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效 -[schedule] -enable_schedule_gen = true # 是否启用日程表 -enable_schedule_interaction = true # 日程表是否影响回复模式 -prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表" -schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒 -schedule_temperature = 0.1 # 日程表温度,建议0.1-0.5 -time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程 - [platforms] # 必填项目,填写每个平台适配器提供的链接 -nonebot-qq="http://127.0.0.1:18002/api/message" +qq="http://127.0.0.1:18002/api/message" [chat] #麦麦的聊天通用设置 -allow_focus_mode = true # 是否允许专注聊天状态 +allow_focus_mode = false # 是否允许专注聊天状态 # 是否启用heart_flowC(HFC)模式 # 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token -base_normal_chat_num = 8 # 最多允许多少个群进行普通聊天 -base_focused_chat_num = 5 # 最多允许多少个群进行专注聊天 +base_normal_chat_num = 999 # 最多允许多少个群进行普通聊天 +base_focused_chat_num = 4 # 最多允许多少个群进行专注聊天 observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖 message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟 @@ -95,7 +91,7 @@ model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概 emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发 thinking_timeout = 100 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢) -willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,动态模式:dynamic,mxp模式:mxp,自定义模式:custom(需要你自己实现) +willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现) response_willing_amplifier = 1 # 麦麦回复意愿放大系数,一般为1 response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数 down_frequency_rate = 3 # 降低回复频率的群组回复意愿降低系数 除法 diff --git a/template/template.env b/template/template.env index c1a6dd0dc..6165a0df9 100644 --- a/template/template.env +++ b/template/template.env @@ -1,9 +1,6 @@ HOST=127.0.0.1 PORT=8000 -# 插件配置 -PLUGINS=["src2.plugins.chat"] - # 默认配置 # 如果工作在Docker下,请改成 MONGODB_HOST=mongodb MONGODB_HOST=127.0.0.1