From c7a804e28631a5dd06fc71e0f848cffb3f1413a1 Mon Sep 17 00:00:00 2001 From: Todysheep Date: Mon, 23 Jun 2025 16:09:14 +0800 Subject: [PATCH 01/63] =?UTF-8?q?feat:=20=E6=9B=B4=E6=96=B0=20LLMRequest?= =?UTF-8?q?=20=E7=B1=BB=E4=BB=A5=E6=94=AF=E6=8C=81=E8=87=AA=E5=AE=9A?= =?UTF-8?q?=E4=B9=89=E5=8F=82=E6=95=B0=EF=BC=8C=E6=9B=B4=E6=96=B0=20payloa?= =?UTF-8?q?d=20=E9=94=AE=E5=80=BC=E6=B7=BB=E5=8A=A0=E9=80=BB=E8=BE=91?= =?UTF-8?q?=EF=BC=8C=E5=85=BC=E5=AE=B9=E4=B8=8D=E6=94=AF=E6=8C=81=E6=9F=90?= =?UTF-8?q?=E4=BA=9B=E9=94=AE=E5=80=BC=E7=9A=84api?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/llm_models/utils_model.py | 38 ++++++++--------------------------- 1 file changed, 8 insertions(+), 30 deletions(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 377fd3813..12f396758 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -122,14 +122,16 @@ class LLMRequest: self.model_name: str = model["name"] self.params = kwargs - self.enable_thinking = model.get("enable_thinking", False) + self.enable_thinking = model.get("enable_thinking", None) self.temp = model.get("temp", 0.7) - self.thinking_budget = model.get("thinking_budget", 4096) + self.thinking_budget = model.get("thinking_budget", None) self.stream = model.get("stream", False) self.pri_in = model.get("pri_in", 0) self.pri_out = model.get("pri_out", 0) self.max_tokens = model.get("max_tokens", global_config.model.model_max_output_length) # print(f"max_tokens: {self.max_tokens}") + self.custom_params = model.get("custom_params", "{}") + self.custom_params = json.loads(self.custom_params) # 获取数据库实例 self._init_database() @@ -247,28 +249,6 @@ class LLMRequest: elif payload is None: payload = await self._build_payload(prompt) - if stream_mode: - payload["stream"] = stream_mode - - if self.temp != 0.7: - payload["temperature"] = self.temp - - # 添加enable_thinking参数(如果不是默认值False) - if not self.enable_thinking: - payload["enable_thinking"] = False - - if self.thinking_budget != 4096: - payload["thinking_budget"] = self.thinking_budget - - if self.max_tokens: - payload["max_tokens"] = self.max_tokens - - # if "max_tokens" not in payload and "max_completion_tokens" not in payload: - # payload["max_tokens"] = global_config.model.model_max_output_length - # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 - if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: - payload["max_completion_tokens"] = payload.pop("max_tokens") - return { "policy": policy, "payload": payload, @@ -668,18 +648,16 @@ class LLMRequest: if self.temp != 0.7: payload["temperature"] = self.temp - # 添加enable_thinking参数(如果不是默认值False) - if not self.enable_thinking: - payload["enable_thinking"] = False + # 仅当配置文件中存在参数时,添加对应参数 + if self.enable_thinking is not None: + payload["enable_thinking"] = self.enable_thinking - if self.thinking_budget != 4096: + if self.thinking_budget is not None: payload["thinking_budget"] = self.thinking_budget if self.max_tokens: payload["max_tokens"] = self.max_tokens - # if "max_tokens" not in payload and "max_completion_tokens" not in payload: - # payload["max_tokens"] = global_config.model.model_max_output_length # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: payload["max_completion_tokens"] = payload.pop("max_tokens") From 7961a1f04c08553befcb95fe4e97d97c6c0fb50d Mon Sep 17 00:00:00 2001 From: Todysheep <97968466+Todysheep@users.noreply.github.com> Date: Mon, 23 Jun 2025 16:30:25 +0800 Subject: [PATCH 02/63] Update src/llm_models/utils_model.py Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- src/llm_models/utils_model.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 12f396758..52f5f2139 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -130,8 +130,13 @@ class LLMRequest: self.pri_out = model.get("pri_out", 0) self.max_tokens = model.get("max_tokens", global_config.model.model_max_output_length) # print(f"max_tokens: {self.max_tokens}") - self.custom_params = model.get("custom_params", "{}") - self.custom_params = json.loads(self.custom_params) + custom_params_str = model.get("custom_params", "{}") + try: + self.custom_params = json.loads(custom_params_str) + except json.JSONDecodeError as e: + logger.error(f"Invalid JSON in custom_params for model '{self.model_name}': {custom_params_str}") + self.custom_params = {} + # 获取数据库实例 self._init_database() From 289a92293b0194d5662ddf15218a3947e1a61ccf Mon Sep 17 00:00:00 2001 From: tcmofashi Date: Wed, 2 Jul 2025 13:05:57 +0800 Subject: [PATCH 03/63] =?UTF-8?q?feat:=20=E5=A2=9E=E5=8A=A0=E6=B6=88?= =?UTF-8?q?=E6=81=AF=E6=AE=B5mention=5Fbot=E7=9A=84=E8=A7=A3=E6=9E=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/message_receive/message.py | 8 ++------ src/chat/normal_chat/normal_chat.py | 6 +++++- src/chat/utils/utils.py | 3 ++- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/chat/message_receive/message.py b/src/chat/message_receive/message.py index 1c8f7789e..6f2c3f784 100644 --- a/src/chat/message_receive/message.py +++ b/src/chat/message_receive/message.py @@ -108,7 +108,7 @@ class MessageRecv(Message): self.detailed_plain_text = message_dict.get("detailed_plain_text", "") self.is_emoji = False self.is_picid = False - self.is_mentioned = 0.0 + self.is_mentioned = None self.priority_mode = "interest" self.priority_info = None @@ -152,14 +152,10 @@ class MessageRecv(Message): elif segment.type == "mention_bot": self.is_mentioned = float(segment.data) return "" - elif segment.type == "set_priority_mode": - # 处理设置优先级模式的消息段 - if isinstance(segment.data, str): - self.priority_mode = segment.data - return "" elif segment.type == "priority_info": if isinstance(segment.data, dict): # 处理优先级信息 + self.priority_mode = "priority" self.priority_info = segment.data """ { diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 6c285f21d..04958b607 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -494,7 +494,11 @@ class NormalChat: # 检查是否有用户满足关系构建条件 asyncio.create_task(self._check_relation_building_conditions()) - await self.reply_one_message(message) + do_reply = await self.reply_one_message(message) + response_set = do_reply if do_reply else [] + factor = 0.5 + cnt = sum([len(r) for r in response_set]) + await asyncio.sleep(max(1, factor * cnt - 3)) # 等待tts # 等待一段时间再检查队列 await asyncio.sleep(1) diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index a147846ca..edfb9f31c 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -47,7 +47,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]: reply_probability = 0.0 is_at = False is_mentioned = False - + if message.is_mentioned is not None: + return bool(message.is_mentioned), message.is_mentioned if ( message.message_info.additional_config is not None and message.message_info.additional_config.get("is_mentioned") is not None From 482a171710f0a91344cf5d6a7fe0632bb7715497 Mon Sep 17 00:00:00 2001 From: tcmofashi Date: Wed, 2 Jul 2025 13:25:05 +0800 Subject: [PATCH 04/63] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E7=BC=A9?= =?UTF-8?q?=E8=BF=9B=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index be13c0605..4d37ff08f 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -808,7 +808,6 @@ class NormalChat: # 回复前处理 thinking_id = await self._create_thinking_message(message) - # 如果启用planner,预先修改可用actions(避免在并行任务中重复调用) available_actions = None if self.enable_planner: @@ -821,19 +820,17 @@ class NormalChat: logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}") available_actions = None - # 定义并行执行的任务 - async def generate_normal_response(): - """生成普通回复""" - try: - return await self.gpt.generate_response( - message=message, - available_actions=available_actions, - ) - except Exception as e: - logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}") - return None - - + # 定义并行执行的任务 + async def generate_normal_response(): + """生成普通回复""" + try: + return await self.gpt.generate_response( + message=message, + available_actions=available_actions, + ) + except Exception as e: + logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}") + return None async def plan_and_execute_actions(): """规划和执行额外动作""" From 46ad6fd808ee5f015ba334d271b5c13df112233e Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 3 Jul 2025 18:15:21 +0800 Subject: [PATCH 05/63] =?UTF-8?q?fix=EF=BC=9A=E7=A7=81=E8=81=8A=E7=88=86?= =?UTF-8?q?=E7=82=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/replyer/default_generator.py | 12 ++++++------ src/mais4u/mais4u_chat/s4u_chat.py | 9 ++++++--- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index cae4e3e10..41eb8a584 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -617,13 +617,13 @@ class DefaultReplyer: chat_target_name = ( self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方" ) - chat_target_1 = await global_prompt_manager.get_prompt_async( + chat_target_1 = await global_prompt_manager.format_prompt( "chat_target_private1", sender_name=chat_target_name ) - chat_target_2 = await global_prompt_manager.get_prompt_async( + chat_target_2 = await global_prompt_manager.format_prompt( "chat_target_private2", sender_name=chat_target_name ) - + prompt = await global_prompt_manager.format_prompt( template_name, expression_habits_block=expression_habits_block, @@ -747,13 +747,13 @@ class DefaultReplyer: chat_target_name = ( self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方" ) - chat_target_1 = await global_prompt_manager.get_prompt_async( + chat_target_1 = await global_prompt_manager.format_prompt( "chat_target_private1", sender_name=chat_target_name ) - chat_target_2 = await global_prompt_manager.get_prompt_async( + chat_target_2 = await global_prompt_manager.format_prompt( "chat_target_private2", sender_name=chat_target_name ) - + template_name = "default_expressor_prompt" prompt = await global_prompt_manager.format_prompt( diff --git a/src/mais4u/mais4u_chat/s4u_chat.py b/src/mais4u/mais4u_chat/s4u_chat.py index dac652a98..28c19ab74 100644 --- a/src/mais4u/mais4u_chat/s4u_chat.py +++ b/src/mais4u/mais4u_chat/s4u_chat.py @@ -165,6 +165,9 @@ class S4UChat: self._is_replying = False self.gpt = S4UStreamGenerator() + self.interest_dict: Dict[str, float] = {} # 用户兴趣分 + self.at_bot_priority_bonus = 100.0 # @机器人的优先级加成 + self.normal_queue_max_size = 50 # 普通队列最大容量 logger.info(f"[{self.stream_name}] S4UChat with two-queue system initialized.") def _is_vip(self, message: MessageRecv) -> bool: @@ -196,7 +199,7 @@ class S4UChat: async def add_message(self, message: MessageRecv) -> None: """根据VIP状态和中断逻辑将消息放入相应队列。""" is_vip = self._is_vip(message) - self._get_message_priority(message) + new_priority_score = self._calculate_base_priority_score(message) should_interrupt = False if self._current_generation_task and not self._current_generation_task.done(): @@ -218,11 +221,11 @@ class S4UChat: new_sender_id = message.message_info.user_info.user_id current_sender_id = current_msg.message_info.user_info.user_id # 新消息优先级更高 - if new_priority_score > current_priority_score: + if new_priority_score > current_priority: should_interrupt = True logger.info(f"[{self.stream_name}] New normal message has higher priority, interrupting.") # 同用户,新消息的优先级不能更低 - elif new_sender_id == current_sender_id and new_priority_score >= current_priority_score: + elif new_sender_id == current_sender_id and new_priority_score >= current_priority: should_interrupt = True logger.info(f"[{self.stream_name}] Same user sent new message, interrupting.") From 42a6ddedb985231b0f163623c549df42149d4a17 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 3 Jul 2025 10:15:40 +0000 Subject: [PATCH 06/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/replyer/default_generator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 41eb8a584..4aa275d6f 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -623,7 +623,7 @@ class DefaultReplyer: chat_target_2 = await global_prompt_manager.format_prompt( "chat_target_private2", sender_name=chat_target_name ) - + prompt = await global_prompt_manager.format_prompt( template_name, expression_habits_block=expression_habits_block, @@ -753,7 +753,7 @@ class DefaultReplyer: chat_target_2 = await global_prompt_manager.format_prompt( "chat_target_private2", sender_name=chat_target_name ) - + template_name = "default_expressor_prompt" prompt = await global_prompt_manager.format_prompt( From 77473b58175fdfbc654a4acbfb4c28b2ea230ac5 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 3 Jul 2025 21:18:15 +0800 Subject: [PATCH 07/63] =?UTF-8?q?fix=EF=BC=9A=E7=A7=BB=E9=99=A4=E5=A4=9A?= =?UTF-8?q?=E4=BD=99=E9=A1=B9=E7=9B=AE=EF=BC=8C=E4=BF=AE=E6=94=B9=E6=8F=92?= =?UTF-8?q?=E4=BB=B6=E9=85=8D=E7=BD=AE=E6=96=87=E4=BB=B6=E4=BD=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/main.py | 3 + src/plugin_system/base/base_plugin.py | 146 ++++++++++---------- src/plugins/built_in/core_actions/plugin.py | 2 +- template/bot_config_template.toml | 1 - 4 files changed, 75 insertions(+), 77 deletions(-) diff --git a/src/main.py b/src/main.py index 768913c4b..e814a86b7 100644 --- a/src/main.py +++ b/src/main.py @@ -42,6 +42,9 @@ willing_manager = get_willing_manager() logger = get_logger("main") +from src.manager.local_store_manager import local_storage +from src.manager.mood_manager import MoodUpdateTask, MoodPrintTask + class MainSystem: def __init__(self): diff --git a/src/plugin_system/base/base_plugin.py b/src/plugin_system/base/base_plugin.py index 5c7edd23b..5afb06d78 100644 --- a/src/plugin_system/base/base_plugin.py +++ b/src/plugin_system/base/base_plugin.py @@ -292,15 +292,15 @@ class BasePlugin(ABC): if "plugin" in self.config_schema and isinstance(self.config_schema["plugin"], dict): config_version_field = self.config_schema["plugin"].get("config_version") if isinstance(config_version_field, ConfigField): - return config_version_field.default - return "1.0.0" + return str(config_version_field.default) + return "" def _get_current_config_version(self, config: Dict[str, Any]) -> str: - """从配置文件中获取当前版本号""" - if "plugin" in config and "config_version" in config["plugin"]: - return str(config["plugin"]["config_version"]) - # 如果没有config_version字段,视为最早的版本 - return "0.0.0" + """从已加载的配置中获取当前版本号""" + # 兼容旧版,尝试从'plugin'或'Plugin'节获取 + if "plugin" in config and isinstance(config.get("plugin"), dict): + return str(config["plugin"].get("config_version", "")) + return "" # 返回空字符串表示未找到 def _backup_config_file(self, config_file_path: str) -> str: """备份配置文件""" @@ -383,6 +383,23 @@ class BasePlugin(ABC): return migrated_config + def _ensure_config_completeness(self, existing_config: Dict[str, Any]) -> Dict[str, Any]: + """确保现有配置的完整性,用schema中的默认值填充缺失的键""" + if not self.config_schema: + return existing_config + + # 创建一个基于schema的完整配置作为参考 + full_config = self._generate_config_from_schema() + migrated_config = self._migrate_config_values(existing_config, full_config) + + # 检查是否有任何值被修改过(即,有缺失的键被填充) + if migrated_config != existing_config: + logger.info(f"{self.log_prefix} 检测到配置文件中缺少部分字段,已使用默认值补全。") + # 注意:这里可以选择是否要自动写回文件,目前只在内存中更新 + # self._save_config_to_file(migrated_config, config_file_path) + + return migrated_config + def _generate_config_from_schema(self) -> Dict[str, Any]: """根据schema生成配置数据结构(不写入文件)""" if not self.config_schema: @@ -474,86 +491,65 @@ class BasePlugin(ABC): logger.error(f"{self.log_prefix} 保存配置文件失败: {e}", exc_info=True) def _load_plugin_config(self): - """加载插件配置文件,支持版本检查和自动迁移""" + """加载插件配置文件,并处理版本迁移""" if not self.config_file_name: - logger.debug(f"{self.log_prefix} 未指定配置文件,跳过加载") + logger.debug(f"{self.log_prefix} 插件未指定配置文件,跳过加载") return - # 优先使用传入的插件目录路径 - if self.plugin_dir: - plugin_dir = self.plugin_dir - else: - # fallback:尝试从类的模块信息获取路径 - try: - plugin_module_path = inspect.getfile(self.__class__) - plugin_dir = os.path.dirname(plugin_module_path) - except (TypeError, OSError): - # 最后的fallback:从模块的__file__属性获取 - module = inspect.getmodule(self.__class__) - if module and hasattr(module, "__file__") and module.__file__: - plugin_dir = os.path.dirname(module.__file__) - else: - logger.warning(f"{self.log_prefix} 无法获取插件目录路径,跳过配置加载") - return + config_dir = os.path.join("config", "plugins", self.plugin_name) + os.makedirs(config_dir, exist_ok=True) + config_file_path = os.path.join(config_dir, self.config_file_name) - config_file_path = os.path.join(plugin_dir, self.config_file_name) - - # 如果配置文件不存在,生成默认配置 + # 1. 配置文件不存在 if not os.path.exists(config_file_path): - logger.info(f"{self.log_prefix} 配置文件 {config_file_path} 不存在,将生成默认配置。") - self._generate_and_save_default_config(config_file_path) - - if not os.path.exists(config_file_path): - logger.warning(f"{self.log_prefix} 配置文件 {config_file_path} 不存在且无法生成。") + logger.info(f"{self.log_prefix} 未找到配置文件,将创建默认配置: {config_file_path}") + self.config = self._generate_config_from_schema() + self._save_config_to_file(self.config, config_file_path) return - file_ext = os.path.splitext(self.config_file_name)[1].lower() - - if file_ext == ".toml": - # 加载现有配置 + # 2. 配置文件存在,加载并检查版本 + try: with open(config_file_path, "r", encoding="utf-8") as f: - existing_config = toml.load(f) or {} + loaded_config = toml.load(f) + except Exception as e: + logger.error(f"{self.log_prefix} 加载配置文件失败: {e},将使用默认配置") + self.config = self._generate_config_from_schema() + return - # 检查配置版本 - current_version = self._get_current_config_version(existing_config) + expected_version = self._get_expected_config_version() + current_version = self._get_current_config_version(loaded_config) - # 如果配置文件没有版本信息,跳过版本检查 - if current_version == "0.0.0": - logger.debug(f"{self.log_prefix} 配置文件无版本信息,跳过版本检查") - self.config = existing_config - else: - expected_version = self._get_expected_config_version() + # 3. 版本匹配,直接加载 + # 如果版本匹配,或者没有可预期的版本(例如插件未定义),则直接加载 + if not expected_version or (current_version and expected_version == current_version): + logger.debug(f"{self.log_prefix} 配置文件版本匹配 (v{current_version}),直接加载") + self.config = self._ensure_config_completeness(loaded_config) + return - if current_version != expected_version: - logger.info( - f"{self.log_prefix} 检测到配置版本需要更新: 当前=v{current_version}, 期望=v{expected_version}" - ) - - # 生成新的默认配置结构 - new_config_structure = self._generate_config_from_schema() - - # 迁移旧配置值到新结构 - migrated_config = self._migrate_config_values(existing_config, new_config_structure) - - # 保存迁移后的配置 - self._save_config_to_file(migrated_config, config_file_path) - - logger.info(f"{self.log_prefix} 配置文件已从 v{current_version} 更新到 v{expected_version}") - - self.config = migrated_config - else: - logger.debug(f"{self.log_prefix} 配置版本匹配 (v{current_version}),直接加载") - self.config = existing_config - - logger.debug(f"{self.log_prefix} 配置已从 {config_file_path} 加载") - - # 从配置中更新 enable_plugin - if "plugin" in self.config and "enabled" in self.config["plugin"]: - self.enable_plugin = self.config["plugin"]["enabled"] - logger.debug(f"{self.log_prefix} 从配置更新插件启用状态: {self.enable_plugin}") + # 4. 版本不匹配或当前版本未知,执行迁移 + if current_version: + logger.info( + f"{self.log_prefix} 配置文件版本不匹配 (v{current_version} -> v{expected_version}),开始迁移..." + ) else: - logger.warning(f"{self.log_prefix} 不支持的配置文件格式: {file_ext},仅支持 .toml") - self.config = {} + # 如果配置文件中没有版本信息,也触发更新 + logger.info(f"{self.log_prefix} 未在配置文件中找到版本信息,将执行更新...") + + # 备份旧文件 + backup_path = self._backup_config_file(config_file_path) + logger.info(f"{self.log_prefix} 已备份旧配置文件到: {backup_path}") + + # 生成新的配置结构 + new_config = self._generate_config_from_schema() + + # 迁移旧的配置值 + migrated_config = self._migrate_config_values(loaded_config, new_config) + + # 保存新的配置文件 + self._save_config_to_file(migrated_config, config_file_path) + logger.info(f"{self.log_prefix} 配置文件更新完成!") + + self.config = migrated_config @abstractmethod def get_plugin_components(self) -> List[tuple[ComponentInfo, Type]]: diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index c34adbfd2..cb469ae87 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -141,7 +141,7 @@ class CoreActionsPlugin(BasePlugin): config_schema = { "plugin": { "enabled": ConfigField(type=bool, default=True, description="是否启用插件"), - "config_version": ConfigField(type=str, default="0.2.0", description="配置文件版本"), + "config_version": ConfigField(type=str, default="0.3.1", description="配置文件版本"), }, "components": { "enable_reply": ConfigField(type=bool, default=True, description="是否启用'回复'动作"), diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 365d6db4f..84bca3718 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -116,7 +116,6 @@ ban_msgs_regex = [ [normal_chat] #普通聊天 #一般回复参数 -replyer_random_probability = 0.5 # 麦麦回答时选择首要模型的概率(与之相对的,次要模型的概率为1 - replyer_random_probability) emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发 thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢) From 0a2001294a60cfadfcaeec1862ad8fb0daf69110 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 3 Jul 2025 13:18:41 +0000 Subject: [PATCH 08/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/main.py | 3 --- src/plugin_system/base/base_plugin.py | 1 - 2 files changed, 4 deletions(-) diff --git a/src/main.py b/src/main.py index e814a86b7..768913c4b 100644 --- a/src/main.py +++ b/src/main.py @@ -42,9 +42,6 @@ willing_manager = get_willing_manager() logger = get_logger("main") -from src.manager.local_store_manager import local_storage -from src.manager.mood_manager import MoodUpdateTask, MoodPrintTask - class MainSystem: def __init__(self): diff --git a/src/plugin_system/base/base_plugin.py b/src/plugin_system/base/base_plugin.py index 5afb06d78..70d08f8ae 100644 --- a/src/plugin_system/base/base_plugin.py +++ b/src/plugin_system/base/base_plugin.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod from typing import Dict, List, Type, Optional, Any, Union import os -import inspect import toml import json from src.common.logger import get_logger From 011032c876e09fd7db2fd8d919d6563835028cad Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 3 Jul 2025 21:26:16 +0800 Subject: [PATCH 09/63] Update base_plugin.py --- src/plugin_system/base/base_plugin.py | 28 +++++---------------------- 1 file changed, 5 insertions(+), 23 deletions(-) diff --git a/src/plugin_system/base/base_plugin.py b/src/plugin_system/base/base_plugin.py index 5afb06d78..a2e616224 100644 --- a/src/plugin_system/base/base_plugin.py +++ b/src/plugin_system/base/base_plugin.py @@ -302,22 +302,6 @@ class BasePlugin(ABC): return str(config["plugin"].get("config_version", "")) return "" # 返回空字符串表示未找到 - def _backup_config_file(self, config_file_path: str) -> str: - """备份配置文件""" - import shutil - import datetime - - timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") - backup_path = f"{config_file_path}.backup_{timestamp}" - - try: - shutil.copy2(config_file_path, backup_path) - logger.info(f"{self.log_prefix} 配置文件已备份到: {backup_path}") - return backup_path - except Exception as e: - logger.error(f"{self.log_prefix} 备份配置文件失败: {e}") - return "" - def _migrate_config_values(self, old_config: Dict[str, Any], new_config: Dict[str, Any]) -> Dict[str, Any]: """将旧配置值迁移到新配置结构中 @@ -496,9 +480,11 @@ class BasePlugin(ABC): logger.debug(f"{self.log_prefix} 插件未指定配置文件,跳过加载") return - config_dir = os.path.join("config", "plugins", self.plugin_name) - os.makedirs(config_dir, exist_ok=True) - config_file_path = os.path.join(config_dir, self.config_file_name) + if not self.plugin_dir: + logger.warning(f"{self.log_prefix} 插件目录未设置,无法加载配置文件") + return + + config_file_path = os.path.join(self.plugin_dir, self.config_file_name) # 1. 配置文件不存在 if not os.path.exists(config_file_path): @@ -535,10 +521,6 @@ class BasePlugin(ABC): # 如果配置文件中没有版本信息,也触发更新 logger.info(f"{self.log_prefix} 未在配置文件中找到版本信息,将执行更新...") - # 备份旧文件 - backup_path = self._backup_config_file(config_file_path) - logger.info(f"{self.log_prefix} 已备份旧配置文件到: {backup_path}") - # 生成新的配置结构 new_config = self._generate_config_from_schema() From a15f3a63d2c7b4bde82de8838a452deb4e8c44bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com> Date: Thu, 3 Jul 2025 21:51:21 +0800 Subject: [PATCH 10/63] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c2b9461a1..69c36e595 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ ## 🔥 更新和安装 -**最新版本: v0.7.0** ([更新日志](changelogs/changelog.md)) +**最新版本: v0.8.0** ([更新日志](changelogs/changelog.md)) 可前往 [Release](https://github.com/MaiM-with-u/MaiBot/releases/) 页面下载最新版本 可前往 [启动器发布页面](https://github.com/MaiM-with-u/mailauncher/releases/tag/v0.1.0)下载最新启动器 **GitHub 分支说明:** From 3e51c4fdf39d4a91140670e642dba32d917fa59c Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Thu, 3 Jul 2025 21:58:04 +0800 Subject: [PATCH 11/63] Update base_plugin.py --- src/plugin_system/base/base_plugin.py | 171 +++++++++++++++----------- 1 file changed, 97 insertions(+), 74 deletions(-) diff --git a/src/plugin_system/base/base_plugin.py b/src/plugin_system/base/base_plugin.py index 3807a8f5f..1f7941b10 100644 --- a/src/plugin_system/base/base_plugin.py +++ b/src/plugin_system/base/base_plugin.py @@ -1,6 +1,7 @@ from abc import ABC, abstractmethod from typing import Dict, List, Type, Optional, Any, Union import os +import inspect import toml import json from src.common.logger import get_logger @@ -291,15 +292,31 @@ class BasePlugin(ABC): if "plugin" in self.config_schema and isinstance(self.config_schema["plugin"], dict): config_version_field = self.config_schema["plugin"].get("config_version") if isinstance(config_version_field, ConfigField): - return str(config_version_field.default) - return "" + return config_version_field.default + return "1.0.0" def _get_current_config_version(self, config: Dict[str, Any]) -> str: - """从已加载的配置中获取当前版本号""" - # 兼容旧版,尝试从'plugin'或'Plugin'节获取 - if "plugin" in config and isinstance(config.get("plugin"), dict): - return str(config["plugin"].get("config_version", "")) - return "" # 返回空字符串表示未找到 + """从配置文件中获取当前版本号""" + if "plugin" in config and "config_version" in config["plugin"]: + return str(config["plugin"]["config_version"]) + # 如果没有config_version字段,视为最早的版本 + return "0.0.0" + + def _backup_config_file(self, config_file_path: str) -> str: + """备份配置文件""" + import shutil + import datetime + + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + backup_path = f"{config_file_path}.backup_{timestamp}" + + try: + shutil.copy2(config_file_path, backup_path) + logger.info(f"{self.log_prefix} 配置文件已备份到: {backup_path}") + return backup_path + except Exception as e: + logger.error(f"{self.log_prefix} 备份配置文件失败: {e}") + return "" def _migrate_config_values(self, old_config: Dict[str, Any], new_config: Dict[str, Any]) -> Dict[str, Any]: """将旧配置值迁移到新配置结构中 @@ -366,23 +383,6 @@ class BasePlugin(ABC): return migrated_config - def _ensure_config_completeness(self, existing_config: Dict[str, Any]) -> Dict[str, Any]: - """确保现有配置的完整性,用schema中的默认值填充缺失的键""" - if not self.config_schema: - return existing_config - - # 创建一个基于schema的完整配置作为参考 - full_config = self._generate_config_from_schema() - migrated_config = self._migrate_config_values(existing_config, full_config) - - # 检查是否有任何值被修改过(即,有缺失的键被填充) - if migrated_config != existing_config: - logger.info(f"{self.log_prefix} 检测到配置文件中缺少部分字段,已使用默认值补全。") - # 注意:这里可以选择是否要自动写回文件,目前只在内存中更新 - # self._save_config_to_file(migrated_config, config_file_path) - - return migrated_config - def _generate_config_from_schema(self) -> Dict[str, Any]: """根据schema生成配置数据结构(不写入文件)""" if not self.config_schema: @@ -474,63 +474,86 @@ class BasePlugin(ABC): logger.error(f"{self.log_prefix} 保存配置文件失败: {e}", exc_info=True) def _load_plugin_config(self): - """加载插件配置文件,并处理版本迁移""" + """加载插件配置文件,支持版本检查和自动迁移""" if not self.config_file_name: - logger.debug(f"{self.log_prefix} 插件未指定配置文件,跳过加载") + logger.debug(f"{self.log_prefix} 未指定配置文件,跳过加载") return - if not self.plugin_dir: - logger.warning(f"{self.log_prefix} 插件目录未设置,无法加载配置文件") - return - - config_file_path = os.path.join(self.plugin_dir, self.config_file_name) - - # 1. 配置文件不存在 - if not os.path.exists(config_file_path): - logger.info(f"{self.log_prefix} 未找到配置文件,将创建默认配置: {config_file_path}") - self.config = self._generate_config_from_schema() - self._save_config_to_file(self.config, config_file_path) - return - - # 2. 配置文件存在,加载并检查版本 - try: - with open(config_file_path, "r", encoding="utf-8") as f: - loaded_config = toml.load(f) - except Exception as e: - logger.error(f"{self.log_prefix} 加载配置文件失败: {e},将使用默认配置") - self.config = self._generate_config_from_schema() - return - - expected_version = self._get_expected_config_version() - current_version = self._get_current_config_version(loaded_config) - - # 3. 版本匹配,直接加载 - # 如果版本匹配,或者没有可预期的版本(例如插件未定义),则直接加载 - if not expected_version or (current_version and expected_version == current_version): - logger.debug(f"{self.log_prefix} 配置文件版本匹配 (v{current_version}),直接加载") - self.config = self._ensure_config_completeness(loaded_config) - return - - # 4. 版本不匹配或当前版本未知,执行迁移 - if current_version: - logger.info( - f"{self.log_prefix} 配置文件版本不匹配 (v{current_version} -> v{expected_version}),开始迁移..." - ) + # 优先使用传入的插件目录路径 + if self.plugin_dir: + plugin_dir = self.plugin_dir else: - # 如果配置文件中没有版本信息,也触发更新 - logger.info(f"{self.log_prefix} 未在配置文件中找到版本信息,将执行更新...") + # fallback:尝试从类的模块信息获取路径 + try: + plugin_module_path = inspect.getfile(self.__class__) + plugin_dir = os.path.dirname(plugin_module_path) + except (TypeError, OSError): + # 最后的fallback:从模块的__file__属性获取 + module = inspect.getmodule(self.__class__) + if module and hasattr(module, "__file__") and module.__file__: + plugin_dir = os.path.dirname(module.__file__) + else: + logger.warning(f"{self.log_prefix} 无法获取插件目录路径,跳过配置加载") + return - # 生成新的配置结构 - new_config = self._generate_config_from_schema() + config_file_path = os.path.join(plugin_dir, self.config_file_name) - # 迁移旧的配置值 - migrated_config = self._migrate_config_values(loaded_config, new_config) + # 如果配置文件不存在,生成默认配置 + if not os.path.exists(config_file_path): + logger.info(f"{self.log_prefix} 配置文件 {config_file_path} 不存在,将生成默认配置。") + self._generate_and_save_default_config(config_file_path) - # 保存新的配置文件 - self._save_config_to_file(migrated_config, config_file_path) - logger.info(f"{self.log_prefix} 配置文件更新完成!") + if not os.path.exists(config_file_path): + logger.warning(f"{self.log_prefix} 配置文件 {config_file_path} 不存在且无法生成。") + return - self.config = migrated_config + file_ext = os.path.splitext(self.config_file_name)[1].lower() + + if file_ext == ".toml": + # 加载现有配置 + with open(config_file_path, "r", encoding="utf-8") as f: + existing_config = toml.load(f) or {} + + # 检查配置版本 + current_version = self._get_current_config_version(existing_config) + + # 如果配置文件没有版本信息,跳过版本检查 + if current_version == "0.0.0": + logger.debug(f"{self.log_prefix} 配置文件无版本信息,跳过版本检查") + self.config = existing_config + else: + expected_version = self._get_expected_config_version() + + if current_version != expected_version: + logger.info( + f"{self.log_prefix} 检测到配置版本需要更新: 当前=v{current_version}, 期望=v{expected_version}" + ) + + # 生成新的默认配置结构 + new_config_structure = self._generate_config_from_schema() + + # 迁移旧配置值到新结构 + migrated_config = self._migrate_config_values(existing_config, new_config_structure) + + # 保存迁移后的配置 + self._save_config_to_file(migrated_config, config_file_path) + + logger.info(f"{self.log_prefix} 配置文件已从 v{current_version} 更新到 v{expected_version}") + + self.config = migrated_config + else: + logger.debug(f"{self.log_prefix} 配置版本匹配 (v{current_version}),直接加载") + self.config = existing_config + + logger.debug(f"{self.log_prefix} 配置已从 {config_file_path} 加载") + + # 从配置中更新 enable_plugin + if "plugin" in self.config and "enabled" in self.config["plugin"]: + self.enable_plugin = self.config["plugin"]["enabled"] + logger.debug(f"{self.log_prefix} 从配置更新插件启用状态: {self.enable_plugin}") + else: + logger.warning(f"{self.log_prefix} 不支持的配置文件格式: {file_ext},仅支持 .toml") + self.config = {} @abstractmethod def get_plugin_components(self) -> List[tuple[ComponentInfo, Type]]: @@ -657,4 +680,4 @@ def instantiate_and_register_plugin(plugin_class: Type["BasePlugin"], plugin_dir import traceback logger.error(traceback.format_exc()) - return False + return False \ No newline at end of file From 81156bf40345bfe0fdb25ea601d44992b9c09e43 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 3 Jul 2025 14:01:23 +0000 Subject: [PATCH 12/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugin_system/base/base_plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugin_system/base/base_plugin.py b/src/plugin_system/base/base_plugin.py index 1f7941b10..5c7edd23b 100644 --- a/src/plugin_system/base/base_plugin.py +++ b/src/plugin_system/base/base_plugin.py @@ -680,4 +680,4 @@ def instantiate_and_register_plugin(plugin_class: Type["BasePlugin"], plugin_dir import traceback logger.error(traceback.format_exc()) - return False \ No newline at end of file + return False From 2e1ecb41b7a16e7a1ae787c9bb35e19d7602991a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 01:18:50 +0800 Subject: [PATCH 13/63] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E7=A6=81?= =?UTF-8?q?=E8=A8=80=E6=8F=92=E4=BB=B6=E6=89=BE=E4=B8=8D=E5=88=B0user=5Fid?= =?UTF-8?q?=E4=BB=8D=E7=84=B6=E5=8F=91=E9=80=81=E6=8C=87=E4=BB=A4=E7=9A=84?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/built_in/mute_plugin/plugin.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/built_in/mute_plugin/plugin.py b/src/plugins/built_in/mute_plugin/plugin.py index 394d38f5d..9df166a53 100644 --- a/src/plugins/built_in/mute_plugin/plugin.py +++ b/src/plugins/built_in/mute_plugin/plugin.py @@ -369,10 +369,10 @@ class MuteCommand(BaseCommand): # 获取用户ID person_id = person_api.get_person_id_by_name(target) - user_id = person_api.get_person_value(person_id, "user_id") - if not user_id: - error_msg = f"未找到用户 {target} 的ID" - await self.send_text(f"❌ 找不到用户: {target}") + user_id = await person_api.get_person_value(person_id, "user_id") + if not user_id or user_id == "unknown": + error_msg = f"未找到用户 {target} 的ID,请输入person_name进行禁言" + await self.send_text(f"❌ 找不到用户 {target} 的ID,请输入person_name进行禁言,而不是qq号或者昵称") logger.error(f"{self.log_prefix} {error_msg}") return False, error_msg From a221f8c5abe6943e46aaa72fe4684eeb0b40fb75 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 01:20:18 +0800 Subject: [PATCH 14/63] Update plugin.py --- src/plugins/built_in/mute_plugin/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/built_in/mute_plugin/plugin.py b/src/plugins/built_in/mute_plugin/plugin.py index 9df166a53..a4fa7d17a 100644 --- a/src/plugins/built_in/mute_plugin/plugin.py +++ b/src/plugins/built_in/mute_plugin/plugin.py @@ -475,7 +475,7 @@ class MutePlugin(BasePlugin): }, "components": { "enable_smart_mute": ConfigField(type=bool, default=True, description="是否启用智能禁言Action"), - "enable_mute_command": ConfigField(type=bool, default=False, description="是否启用禁言命令Command"), + "enable_mute_command": ConfigField(type=bool, default=False, description="是否启用禁言命令Command(调试用)"), }, "permissions": { "allowed_users": ConfigField( From 2683e0b24ade5b281c46f90a90e02c1597089ffe Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 4 Jul 2025 17:20:30 +0000 Subject: [PATCH 15/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/built_in/mute_plugin/plugin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/plugins/built_in/mute_plugin/plugin.py b/src/plugins/built_in/mute_plugin/plugin.py index a4fa7d17a..43f5f81c4 100644 --- a/src/plugins/built_in/mute_plugin/plugin.py +++ b/src/plugins/built_in/mute_plugin/plugin.py @@ -475,7 +475,9 @@ class MutePlugin(BasePlugin): }, "components": { "enable_smart_mute": ConfigField(type=bool, default=True, description="是否启用智能禁言Action"), - "enable_mute_command": ConfigField(type=bool, default=False, description="是否启用禁言命令Command(调试用)"), + "enable_mute_command": ConfigField( + type=bool, default=False, description="是否启用禁言命令Command(调试用)" + ), }, "permissions": { "allowed_users": ConfigField( From 621b706d415d7d0f3d7d96cddfead2c7c4617848 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 01:37:54 +0800 Subject: [PATCH 16/63] =?UTF-8?q?fix=E4=BF=AE=E5=A4=8Dfocus=E5=86=B7?= =?UTF-8?q?=E5=8D=B4=E6=97=B6=E9=97=B4=E5=AF=BC=E8=87=B4=E7=9A=84=E5=9B=BA?= =?UTF-8?q?=E5=AE=9A=E6=B2=89=E9=BB=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 13 +++++++++++++ src/chat/heart_flow/sub_heartflow.py | 8 ++++++-- src/chat/normal_chat/normal_chat.py | 12 +++++++----- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/changelogs/changelog.md b/changelogs/changelog.md index 92d59d18c..8fe8c5e32 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -2,8 +2,21 @@ ## [0.8.1] - 2025-6-27 +功能更新: + +- normal现在和focus一样支持tool +- focus现在和normal一样每次调用lpmm +- 移除人格表达 + +优化和修复: + - 修复表情包配置无效问题 - 合并normal和focus的prompt构建 +- 非TTY环境禁用console_input_loop +- 修复过滤消息仍被存储至数据库的问题 +- 私聊强制开启focus模式 +- 支持解析reply_to和at +- 修复focus冷却时间导致的固定沉默 diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index 03bb71c62..d255061fb 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -137,27 +137,31 @@ class SubHeartflow: self.normal_chat_instance = None # 启动/初始化失败,清理实例 return False - async def _handle_switch_to_focus_request(self) -> None: + async def _handle_switch_to_focus_request(self) -> bool: """ 处理来自NormalChat的切换到focus模式的请求 Args: stream_id: 请求切换的stream_id + Returns: + bool: 切换成功返回True,失败返回False """ logger.info(f"{self.log_prefix} 收到NormalChat请求切换到focus模式") # 检查是否在focus冷却期内 if self.is_in_focus_cooldown(): logger.info(f"{self.log_prefix} 正在focus冷却期内,忽略切换到focus模式的请求") - return + return False # 切换到focus模式 current_state = self.chat_state.chat_status if current_state == ChatState.NORMAL: await self.change_chat_state(ChatState.FOCUSED) logger.info(f"{self.log_prefix} 已根据NormalChat请求从NORMAL切换到FOCUSED状态") + return True else: logger.warning(f"{self.log_prefix} 当前状态为{current_state.value},无法切换到FOCUSED状态") + return False async def _handle_stop_focus_chat_request(self) -> None: """ diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index c7edbff3b..a53a3d185 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -124,8 +124,6 @@ class NormalChat: self._chat_task: Optional[asyncio.Task] = None self._disabled = False # 停用标志 - self.on_switch_to_focus_callback = on_switch_to_focus_callback - # 新增:回复模式和优先级管理器 self.reply_mode = self.chat_stream.context.get_priority_mode() if self.reply_mode == "priority": @@ -729,10 +727,14 @@ class NormalChat: # 新增:在auto模式下检查是否需要直接切换到focus模式 if global_config.chat.chat_mode == "auto": if await self._check_should_switch_to_focus(): - logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,直接执行切换") + logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,尝试执行切换") if self.on_switch_to_focus_callback: - await self.on_switch_to_focus_callback() - return + switched_successfully = await self.on_switch_to_focus_callback() + if switched_successfully: + logger.info(f"[{self.stream_name}] 成功切换到focus模式,中止NormalChat处理") + return + else: + logger.info(f"[{self.stream_name}] 切换到focus模式失败(可能在冷却中),继续NormalChat处理") else: logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数,无法执行切换") From 40109b2e66409b3aabac126c4ff505f2b7be96c9 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 19:31:34 +0800 Subject: [PATCH 17/63] =?UTF-8?q?feat=EF=BC=9A=E7=96=B2=E5=8A=B3=E6=97=B6?= =?UTF-8?q?=E9=99=8D=E4=BD=8E=E5=9B=9E=E5=A4=8D=E9=A2=91=E7=8E=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/sub_heartflow.py | 24 +++++++++++++++++ src/chat/normal_chat/normal_chat.py | 40 +++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index d255061fb..cecacec2e 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -126,6 +126,7 @@ class SubHeartflow: chat_stream=chat_stream, interest_dict=self.interest_dict, on_switch_to_focus_callback=self._handle_switch_to_focus_request, + get_cooldown_progress_callback=self.get_cooldown_progress, ) logger.info(f"{log_prefix} 开始普通聊天,随便水群...") @@ -443,3 +444,26 @@ class SubHeartflow: ) return is_cooling + + def get_cooldown_progress(self) -> float: + """获取冷却进度,返回0-1之间的值 + + Returns: + float: 0表示刚开始冷却,1表示冷却完成 + """ + if self.last_focus_exit_time == 0: + return 1.0 # 没有冷却,返回1表示完全恢复 + + # 基础冷却时间10分钟,受auto_focus_threshold调控 + base_cooldown = 10 * 60 # 10分钟转换为秒 + cooldown_duration = base_cooldown / global_config.chat.auto_focus_threshold + + current_time = time.time() + elapsed_since_exit = current_time - self.last_focus_exit_time + + if elapsed_since_exit >= cooldown_duration: + return 1.0 # 冷却完成 + + # 计算进度:0表示刚开始冷却,1表示冷却完成 + progress = elapsed_since_exit / cooldown_duration + return progress diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index a53a3d185..edaf39003 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -54,7 +54,7 @@ class NormalChat: 每个聊天(私聊或群聊)都会有一个独立的NormalChat实例。 """ - def __init__(self, chat_stream: ChatStream, interest_dict: dict = None, on_switch_to_focus_callback=None): + def __init__(self, chat_stream: ChatStream, interest_dict: dict = None, on_switch_to_focus_callback=None, get_cooldown_progress_callback=None): """ 初始化NormalChat实例。 @@ -109,6 +109,9 @@ class NormalChat: # 添加回调函数,用于在满足条件时通知切换到focus_chat模式 self.on_switch_to_focus_callback = on_switch_to_focus_callback + + # 添加回调函数,用于获取冷却进度 + self.get_cooldown_progress_callback = get_cooldown_progress_callback self._disabled = False # 增加停用标志 @@ -767,6 +770,17 @@ class NormalChat: reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"] reply_probability = min(max(reply_probability, 0), 1) # 确保概率在 0-1 之间 + # 应用疲劳期回复频率调整 + fatigue_multiplier = self._get_fatigue_reply_multiplier() + original_probability = reply_probability + reply_probability *= fatigue_multiplier + + # 如果应用了疲劳调整,记录日志 + if fatigue_multiplier < 1.0: + logger.info( + f"[{self.stream_name}] 疲劳期回复频率调整: {original_probability * 100:.1f}% -> {reply_probability * 100:.1f}% (系数: {fatigue_multiplier:.2f})" + ) + # 打印消息信息 mes_name = self.chat_stream.group_info.group_name if self.chat_stream.group_info else "私聊" # current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time)) @@ -1323,6 +1337,30 @@ class NormalChat: logger.error(f"[{self.stream_name}] 为 {person_id} 更新印象时发生错误: {e}") logger.error(traceback.format_exc()) + def _get_fatigue_reply_multiplier(self) -> float: + """获取疲劳期回复频率调整系数 + + Returns: + float: 回复频率调整系数,范围0.5-1.0 + """ + if not self.get_cooldown_progress_callback: + return 1.0 # 没有冷却进度回调,返回正常系数 + + try: + cooldown_progress = self.get_cooldown_progress_callback() + + if cooldown_progress >= 1.0: + return 1.0 # 冷却完成,正常回复频率 + + # 疲劳期间:从0.5逐渐恢复到1.0 + # progress=0时系数为0.5,progress=1时系数为1.0 + multiplier = 0.2 + (0.8 * cooldown_progress) + + return multiplier + except Exception as e: + logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}") + return 1.0 # 出错时返回正常系数 + async def _check_should_switch_to_focus(self) -> bool: """ 检查是否满足切换到focus模式的条件 From 324b4b1b3f42102ba5ea8216cf41e422b8971b54 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 5 Jul 2025 11:31:56 +0000 Subject: [PATCH 18/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/sub_heartflow.py | 10 +++++----- src/chat/normal_chat/normal_chat.py | 22 ++++++++++++++-------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index cecacec2e..98bc07337 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -447,23 +447,23 @@ class SubHeartflow: def get_cooldown_progress(self) -> float: """获取冷却进度,返回0-1之间的值 - + Returns: float: 0表示刚开始冷却,1表示冷却完成 """ if self.last_focus_exit_time == 0: return 1.0 # 没有冷却,返回1表示完全恢复 - + # 基础冷却时间10分钟,受auto_focus_threshold调控 base_cooldown = 10 * 60 # 10分钟转换为秒 cooldown_duration = base_cooldown / global_config.chat.auto_focus_threshold - + current_time = time.time() elapsed_since_exit = current_time - self.last_focus_exit_time - + if elapsed_since_exit >= cooldown_duration: return 1.0 # 冷却完成 - + # 计算进度:0表示刚开始冷却,1表示冷却完成 progress = elapsed_since_exit / cooldown_duration return progress diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index edaf39003..d88b9167f 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -54,7 +54,13 @@ class NormalChat: 每个聊天(私聊或群聊)都会有一个独立的NormalChat实例。 """ - def __init__(self, chat_stream: ChatStream, interest_dict: dict = None, on_switch_to_focus_callback=None, get_cooldown_progress_callback=None): + def __init__( + self, + chat_stream: ChatStream, + interest_dict: dict = None, + on_switch_to_focus_callback=None, + get_cooldown_progress_callback=None, + ): """ 初始化NormalChat实例。 @@ -109,7 +115,7 @@ class NormalChat: # 添加回调函数,用于在满足条件时通知切换到focus_chat模式 self.on_switch_to_focus_callback = on_switch_to_focus_callback - + # 添加回调函数,用于获取冷却进度 self.get_cooldown_progress_callback = get_cooldown_progress_callback @@ -774,7 +780,7 @@ class NormalChat: fatigue_multiplier = self._get_fatigue_reply_multiplier() original_probability = reply_probability reply_probability *= fatigue_multiplier - + # 如果应用了疲劳调整,记录日志 if fatigue_multiplier < 1.0: logger.info( @@ -1339,23 +1345,23 @@ class NormalChat: def _get_fatigue_reply_multiplier(self) -> float: """获取疲劳期回复频率调整系数 - + Returns: float: 回复频率调整系数,范围0.5-1.0 """ if not self.get_cooldown_progress_callback: return 1.0 # 没有冷却进度回调,返回正常系数 - + try: cooldown_progress = self.get_cooldown_progress_callback() - + if cooldown_progress >= 1.0: return 1.0 # 冷却完成,正常回复频率 - + # 疲劳期间:从0.5逐渐恢复到1.0 # progress=0时系数为0.5,progress=1时系数为1.0 multiplier = 0.2 + (0.8 * cooldown_progress) - + return multiplier except Exception as e: logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}") From c33b8f67bdf206c3fb0f636459cdc97edde9cda0 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 21:10:06 +0800 Subject: [PATCH 19/63] =?UTF-8?q?remove:=E7=A7=BB=E9=99=A4=E8=B1=86?= =?UTF-8?q?=E5=8C=85=E7=94=BB=E5=9B=BE=E6=8F=92=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat_generator.py | 53 +- .../built_in/doubao_pic_plugin/_manifest.json | 45 -- .../built_in/doubao_pic_plugin/plugin.py | 477 ------------------ 3 files changed, 1 insertion(+), 574 deletions(-) delete mode 100644 src/plugins/built_in/doubao_pic_plugin/_manifest.json delete mode 100644 src/plugins/built_in/doubao_pic_plugin/plugin.py diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py index f140bacbc..41e8eecbe 100644 --- a/src/chat/normal_chat/normal_chat_generator.py +++ b/src/chat/normal_chat/normal_chat_generator.py @@ -69,55 +69,4 @@ class NormalChatGenerator: except Exception: logger.exception("生成回复时出错") - return None - - return content - - async def _get_emotion_tags(self, content: str, processed_plain_text: str): - """提取情感标签,结合立场和情绪""" - try: - # 构建提示词,结合回复内容、被回复的内容以及立场分析 - prompt = f""" - 请严格根据以下对话内容,完成以下任务: - 1. 判断回复者对被回复者观点的直接立场: - - "支持":明确同意或强化被回复者观点 - - "反对":明确反驳或否定被回复者观点 - - "中立":不表达明确立场或无关回应 - 2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签 - 3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒" - 4. 考虑回复者的人格设定为{global_config.personality.personality_core} - - 对话示例: - 被回复:「A就是笨」 - 回复:「A明明很聪明」 → 反对-愤怒 - - 当前对话: - 被回复:「{processed_plain_text}」 - 回复:「{content}」 - - 输出要求: - - 只需输出"立场-情绪"结果,不要解释 - - 严格基于文字直接表达的对立关系判断 - """ - - # 调用模型生成结果 - result, (reasoning_content, model_name) = await self.model_sum.generate_response_async(prompt) - result = result.strip() - - # 解析模型输出的结果 - if "-" in result: - stance, emotion = result.split("-", 1) - valid_stances = ["支持", "反对", "中立"] - valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"] - if stance in valid_stances and emotion in valid_emotions: - return stance, emotion # 返回有效的立场-情绪组合 - else: - logger.debug(f"无效立场-情感组合:{result}") - return "中立", "平静" # 默认返回中立-平静 - else: - logger.debug(f"立场-情感格式错误:{result}") - return "中立", "平静" # 格式错误时返回默认值 - - except Exception as e: - logger.debug(f"获取情感标签时出错: {e}") - return "中立", "平静" # 出错时返回默认值 + return None \ No newline at end of file diff --git a/src/plugins/built_in/doubao_pic_plugin/_manifest.json b/src/plugins/built_in/doubao_pic_plugin/_manifest.json deleted file mode 100644 index eeedcb3fc..000000000 --- a/src/plugins/built_in/doubao_pic_plugin/_manifest.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "manifest_version": 1, - "name": "豆包图片生成插件 (Doubao Image Generator)", - "version": "2.0.0", - "description": "基于火山引擎豆包模型的AI图片生成插件,支持智能LLM判定、高质量图片生成、结果缓存和多尺寸支持。", - "author": { - "name": "MaiBot团队", - "url": "https://github.com/MaiM-with-u" - }, - "license": "GPL-v3.0-or-later", - - "host_application": { - "min_version": "0.8.0", - "max_version": "0.8.10" - }, - "homepage_url": "https://github.com/MaiM-with-u/maibot", - "repository_url": "https://github.com/MaiM-with-u/maibot", - "keywords": ["ai", "image", "generation", "doubao", "volcengine", "art"], - "categories": ["AI Tools", "Image Processing", "Content Generation"], - - "default_locale": "zh-CN", - "locales_path": "_locales", - - "plugin_info": { - "is_built_in": true, - "plugin_type": "content_generator", - "api_dependencies": ["volcengine"], - "components": [ - { - "type": "action", - "name": "doubao_image_generation", - "description": "根据描述使用火山引擎豆包API生成高质量图片", - "activation_modes": ["llm_judge", "keyword"], - "keywords": ["画", "图片", "生成", "画画", "绘制"] - } - ], - "features": [ - "智能LLM判定生成时机", - "高质量AI图片生成", - "结果缓存机制", - "多种图片尺寸支持", - "完整的错误处理" - ] - } -} diff --git a/src/plugins/built_in/doubao_pic_plugin/plugin.py b/src/plugins/built_in/doubao_pic_plugin/plugin.py deleted file mode 100644 index 28d37e88f..000000000 --- a/src/plugins/built_in/doubao_pic_plugin/plugin.py +++ /dev/null @@ -1,477 +0,0 @@ -""" -豆包图片生成插件 - -基于火山引擎豆包模型的AI图片生成插件。 - -功能特性: -- 智能LLM判定:根据聊天内容智能判断是否需要生成图片 -- 高质量图片生成:使用豆包Seed Dream模型生成图片 -- 结果缓存:避免重复生成相同内容的图片 -- 配置验证:自动验证和修复配置文件 -- 参数验证:完整的输入参数验证和错误处理 -- 多尺寸支持:支持多种图片尺寸生成 - -包含组件: -- 图片生成Action - 根据描述使用火山引擎API生成图片 -""" - -import asyncio -import json -import urllib.request -import urllib.error -import base64 -import traceback -from typing import List, Tuple, Type, Optional - -# 导入新插件系统 -from src.plugin_system.base.base_plugin import BasePlugin -from src.plugin_system.base.base_plugin import register_plugin -from src.plugin_system.base.base_action import BaseAction -from src.plugin_system.base.component_types import ComponentInfo, ActionActivationType, ChatMode -from src.plugin_system.base.config_types import ConfigField -from src.common.logger import get_logger - -logger = get_logger("doubao_pic_plugin") - - -# ===== Action组件 ===== - - -class DoubaoImageGenerationAction(BaseAction): - """豆包图片生成Action - 根据描述使用火山引擎API生成图片""" - - # 激活设置 - focus_activation_type = ActionActivationType.LLM_JUDGE # Focus模式使用LLM判定,精确理解需求 - normal_activation_type = ActionActivationType.KEYWORD # Normal模式使用关键词激活,快速响应 - mode_enable = ChatMode.ALL - parallel_action = True - - # 动作基本信息 - action_name = "doubao_image_generation" - action_description = ( - "可以根据特定的描述,生成并发送一张图片,如果没提供描述,就根据聊天内容生成,你可以立刻画好,不用等待" - ) - - # 关键词设置(用于Normal模式) - activation_keywords = ["画", "绘制", "生成图片", "画图", "draw", "paint", "图片生成"] - keyword_case_sensitive = False - - # LLM判定提示词(用于Focus模式) - llm_judge_prompt = """ -判定是否需要使用图片生成动作的条件: -1. 用户明确要求画图、生成图片或创作图像 -2. 用户描述了想要看到的画面或场景 -3. 对话中提到需要视觉化展示某些概念 -4. 用户想要创意图片或艺术作品 - -适合使用的情况: -- "画一张..."、"画个..."、"生成图片" -- "我想看看...的样子" -- "能画出...吗" -- "创作一幅..." - -绝对不要使用的情况: -1. 纯文字聊天和问答 -2. 只是提到"图片"、"画"等词但不是要求生成 -3. 谈论已存在的图片或照片 -4. 技术讨论中提到绘图概念但无生成需求 -5. 用户明确表示不需要图片时 -""" - - # 动作参数定义 - action_parameters = { - "description": "图片描述,输入你想要生成并发送的图片的描述,必填", - "size": "图片尺寸,例如 '1024x1024' (可选, 默认从配置或 '1024x1024')", - } - - # 动作使用场景 - action_require = [ - "当有人让你画东西时使用,你可以立刻画好,不用等待", - "当有人要求你生成并发送一张图片时使用", - "当有人让你画一张图时使用", - ] - - # 关联类型 - associated_types = ["image", "text"] - - # 简单的请求缓存,避免短时间内重复请求 - _request_cache = {} - _cache_max_size = 10 - - async def execute(self) -> Tuple[bool, Optional[str]]: - """执行图片生成动作""" - logger.info(f"{self.log_prefix} 执行豆包图片生成动作") - - # 配置验证 - http_base_url = self.api.get_config("api.base_url") - http_api_key = self.api.get_config("api.volcano_generate_api_key") - - if not (http_base_url and http_api_key): - error_msg = "抱歉,图片生成功能所需的HTTP配置(如API地址或密钥)不完整,无法提供服务。" - await self.send_text(error_msg) - logger.error(f"{self.log_prefix} HTTP调用配置缺失: base_url 或 volcano_generate_api_key.") - return False, "HTTP配置不完整" - - # API密钥验证 - if http_api_key == "YOUR_DOUBAO_API_KEY_HERE": - error_msg = "图片生成功能尚未配置,请设置正确的API密钥。" - await self.send_text(error_msg) - logger.error(f"{self.log_prefix} API密钥未配置") - return False, "API密钥未配置" - - # 参数验证 - description = self.action_data.get("description") - if not description or not description.strip(): - logger.warning(f"{self.log_prefix} 图片描述为空,无法生成图片。") - await self.send_text("你需要告诉我想要画什么样的图片哦~ 比如说'画一只可爱的小猫'") - return False, "图片描述为空" - - # 清理和验证描述 - description = description.strip() - if len(description) > 1000: # 限制描述长度 - description = description[:1000] - logger.info(f"{self.log_prefix} 图片描述过长,已截断") - - # 获取配置 - default_model = self.api.get_config("generation.default_model", "doubao-seedream-3-0-t2i-250415") - image_size = self.action_data.get("size", self.api.get_config("generation.default_size", "1024x1024")) - - # 验证图片尺寸格式 - if not self._validate_image_size(image_size): - logger.warning(f"{self.log_prefix} 无效的图片尺寸: {image_size},使用默认值") - image_size = "1024x1024" - - # 检查缓存 - cache_key = self._get_cache_key(description, default_model, image_size) - if cache_key in self._request_cache: - cached_result = self._request_cache[cache_key] - logger.info(f"{self.log_prefix} 使用缓存的图片结果") - await self.send_text("我之前画过类似的图片,用之前的结果~") - - # 直接发送缓存的结果 - send_success = await self._send_image(cached_result) - if send_success: - await self.send_text("图片已发送!") - return True, "图片已发送(缓存)" - else: - # 缓存失败,清除这个缓存项并继续正常流程 - del self._request_cache[cache_key] - - # 获取其他配置参数 - guidance_scale_val = self._get_guidance_scale() - seed_val = self._get_seed() - watermark_val = self._get_watermark() - - await self.send_text( - f"收到!正在为您生成关于 '{description}' 的图片,请稍候...(模型: {default_model}, 尺寸: {image_size})" - ) - - try: - success, result = await asyncio.to_thread( - self._make_http_image_request, - prompt=description, - model=default_model, - size=image_size, - seed=seed_val, - guidance_scale=guidance_scale_val, - watermark=watermark_val, - ) - except Exception as e: - logger.error(f"{self.log_prefix} (HTTP) 异步请求执行失败: {e!r}", exc_info=True) - traceback.print_exc() - success = False - result = f"图片生成服务遇到意外问题: {str(e)[:100]}" - - if success: - image_url = result - # print(f"image_url: {image_url}") - # print(f"result: {result}") - logger.info(f"{self.log_prefix} 图片URL获取成功: {image_url[:70]}... 下载并编码.") - - try: - encode_success, encode_result = await asyncio.to_thread(self._download_and_encode_base64, image_url) - except Exception as e: - logger.error(f"{self.log_prefix} (B64) 异步下载/编码失败: {e!r}", exc_info=True) - traceback.print_exc() - encode_success = False - encode_result = f"图片下载或编码时发生内部错误: {str(e)[:100]}" - - if encode_success: - base64_image_string = encode_result - send_success = await self._send_image(base64_image_string) - if send_success: - # 缓存成功的结果 - self._request_cache[cache_key] = base64_image_string - self._cleanup_cache() - - await self.send_message_by_expressor("图片已发送!") - return True, "图片已成功生成并发送" - else: - print(f"send_success: {send_success}") - await self.send_message_by_expressor("图片已处理为Base64,但发送失败了。") - return False, "图片发送失败 (Base64)" - else: - await self.send_message_by_expressor(f"获取到图片URL,但在处理图片时失败了:{encode_result}") - return False, f"图片处理失败(Base64): {encode_result}" - else: - error_message = result - await self.send_message_by_expressor(f"哎呀,生成图片时遇到问题:{error_message}") - return False, f"图片生成失败: {error_message}" - - def _get_guidance_scale(self) -> float: - """获取guidance_scale配置值""" - guidance_scale_input = self.api.get_config("generation.default_guidance_scale", 2.5) - try: - return float(guidance_scale_input) - except (ValueError, TypeError): - logger.warning(f"{self.log_prefix} default_guidance_scale 值无效,使用默认值 2.5") - return 2.5 - - def _get_seed(self) -> int: - """获取seed配置值""" - seed_config_value = self.api.get_config("generation.default_seed") - if seed_config_value is not None: - try: - return int(seed_config_value) - except (ValueError, TypeError): - logger.warning(f"{self.log_prefix} default_seed 值无效,使用默认值 42") - return 42 - - def _get_watermark(self) -> bool: - """获取watermark配置值""" - watermark_source = self.api.get_config("generation.default_watermark", True) - if isinstance(watermark_source, bool): - return watermark_source - elif isinstance(watermark_source, str): - return watermark_source.lower() == "true" - else: - logger.warning(f"{self.log_prefix} default_watermark 值无效,使用默认值 True") - return True - - async def _send_image(self, base64_image: str) -> bool: - """发送图片""" - try: - # 使用聊天流信息确定发送目标 - chat_stream = self.api.get_service("chat_stream") - if not chat_stream: - logger.error(f"{self.log_prefix} 没有可用的聊天流发送图片") - return False - - if chat_stream.group_info: - # 群聊 - return await self.api.send_message_to_target( - message_type="image", - content=base64_image, - platform=chat_stream.platform, - target_id=str(chat_stream.group_info.group_id), - is_group=True, - display_message="发送生成的图片", - ) - else: - # 私聊 - return await self.api.send_message_to_target( - message_type="image", - content=base64_image, - platform=chat_stream.platform, - target_id=str(chat_stream.user_info.user_id), - is_group=False, - display_message="发送生成的图片", - ) - except Exception as e: - logger.error(f"{self.log_prefix} 发送图片时出错: {e}") - return False - - @classmethod - def _get_cache_key(cls, description: str, model: str, size: str) -> str: - """生成缓存键""" - return f"{description[:100]}|{model}|{size}" - - @classmethod - def _cleanup_cache(cls): - """清理缓存,保持大小在限制内""" - if len(cls._request_cache) > cls._cache_max_size: - keys_to_remove = list(cls._request_cache.keys())[: -cls._cache_max_size // 2] - for key in keys_to_remove: - del cls._request_cache[key] - - def _validate_image_size(self, image_size: str) -> bool: - """验证图片尺寸格式""" - try: - width, height = map(int, image_size.split("x")) - return 100 <= width <= 10000 and 100 <= height <= 10000 - except (ValueError, TypeError): - return False - - def _download_and_encode_base64(self, image_url: str) -> Tuple[bool, str]: - """下载图片并将其编码为Base64字符串""" - logger.info(f"{self.log_prefix} (B64) 下载并编码图片: {image_url[:70]}...") - try: - with urllib.request.urlopen(image_url, timeout=30) as response: - if response.status == 200: - image_bytes = response.read() - base64_encoded_image = base64.b64encode(image_bytes).decode("utf-8") - logger.info(f"{self.log_prefix} (B64) 图片下载编码完成. Base64长度: {len(base64_encoded_image)}") - return True, base64_encoded_image - else: - error_msg = f"下载图片失败 (状态: {response.status})" - logger.error(f"{self.log_prefix} (B64) {error_msg} URL: {image_url}") - return False, error_msg - except Exception as e: - logger.error(f"{self.log_prefix} (B64) 下载或编码时错误: {e!r}", exc_info=True) - traceback.print_exc() - return False, f"下载或编码图片时发生错误: {str(e)[:100]}" - - def _make_http_image_request( - self, prompt: str, model: str, size: str, seed: int, guidance_scale: float, watermark: bool - ) -> Tuple[bool, str]: - """发送HTTP请求生成图片""" - base_url = self.api.get_config("api.base_url") - generate_api_key = self.api.get_config("api.volcano_generate_api_key") - - endpoint = f"{base_url.rstrip('/')}/images/generations" - - payload_dict = { - "model": model, - "prompt": prompt, - "response_format": "url", - "size": size, - "guidance_scale": guidance_scale, - "watermark": watermark, - "seed": seed, - "api-key": generate_api_key, - } - - data = json.dumps(payload_dict).encode("utf-8") - headers = { - "Content-Type": "application/json", - "Accept": "application/json", - "Authorization": f"Bearer {generate_api_key}", - } - - logger.info(f"{self.log_prefix} (HTTP) 发起图片请求: {model}, Prompt: {prompt[:30]}... To: {endpoint}") - - req = urllib.request.Request(endpoint, data=data, headers=headers, method="POST") - - try: - with urllib.request.urlopen(req, timeout=60) as response: - response_status = response.status - response_body_bytes = response.read() - response_body_str = response_body_bytes.decode("utf-8") - - logger.info(f"{self.log_prefix} (HTTP) 响应: {response_status}. Preview: {response_body_str[:150]}...") - - if 200 <= response_status < 300: - response_data = json.loads(response_body_str) - image_url = None - if ( - isinstance(response_data.get("data"), list) - and response_data["data"] - and isinstance(response_data["data"][0], dict) - ): - image_url = response_data["data"][0].get("url") - elif response_data.get("url"): - image_url = response_data.get("url") - - if image_url: - logger.info(f"{self.log_prefix} (HTTP) 图片生成成功,URL: {image_url[:70]}...") - return True, image_url - else: - logger.error(f"{self.log_prefix} (HTTP) API成功但无图片URL") - return False, "图片生成API响应成功但未找到图片URL" - else: - logger.error(f"{self.log_prefix} (HTTP) API请求失败. 状态: {response.status}") - return False, f"图片API请求失败(状态码 {response.status})" - except Exception as e: - logger.error(f"{self.log_prefix} (HTTP) 图片生成时意外错误: {e!r}", exc_info=True) - traceback.print_exc() - return False, f"图片生成HTTP请求时发生意外错误: {str(e)[:100]}" - - -# ===== 插件主类 ===== - - -@register_plugin -class DoubaoImagePlugin(BasePlugin): - """豆包图片生成插件 - - 基于火山引擎豆包模型的AI图片生成插件: - - 图片生成Action:根据描述使用火山引擎API生成图片 - """ - - # 插件基本信息 - plugin_name = "doubao_pic_plugin" # 内部标识符 - enable_plugin = True - config_file_name = "config.toml" - - # 配置节描述 - config_section_descriptions = { - "plugin": "插件基本信息配置", - "api": "API相关配置,包含火山引擎API的访问信息", - "generation": "图片生成参数配置,控制生成图片的各种参数", - "cache": "结果缓存配置", - "components": "组件启用配置", - } - - # 配置Schema定义 - config_schema = { - "plugin": { - "name": ConfigField(type=str, default="doubao_pic_plugin", description="插件名称", required=True), - "version": ConfigField(type=str, default="2.0.0", description="插件版本号"), - "enabled": ConfigField(type=bool, default=False, description="是否启用插件"), - "description": ConfigField( - type=str, default="基于火山引擎豆包模型的AI图片生成插件", description="插件描述", required=True - ), - }, - "api": { - "base_url": ConfigField( - type=str, - default="https://ark.cn-beijing.volces.com/api/v3", - description="API基础URL", - example="https://api.example.com/v1", - ), - "volcano_generate_api_key": ConfigField( - type=str, default="YOUR_DOUBAO_API_KEY_HERE", description="火山引擎豆包API密钥", required=True - ), - }, - "generation": { - "default_model": ConfigField( - type=str, - default="doubao-seedream-3-0-t2i-250415", - description="默认使用的文生图模型", - choices=["doubao-seedream-3-0-t2i-250415", "doubao-seedream-2-0-t2i"], - ), - "default_size": ConfigField( - type=str, - default="1024x1024", - description="默认图片尺寸", - example="1024x1024", - choices=["1024x1024", "1024x1280", "1280x1024", "1024x1536", "1536x1024"], - ), - "default_watermark": ConfigField(type=bool, default=True, description="是否默认添加水印"), - "default_guidance_scale": ConfigField( - type=float, default=2.5, description="模型指导强度,影响图片与提示的关联性", example="2.0" - ), - "default_seed": ConfigField(type=int, default=42, description="随机种子,用于复现图片"), - }, - "cache": { - "enabled": ConfigField(type=bool, default=True, description="是否启用请求缓存"), - "max_size": ConfigField(type=int, default=10, description="最大缓存数量"), - }, - "components": { - "enable_image_generation": ConfigField(type=bool, default=True, description="是否启用图片生成Action") - }, - } - - def get_plugin_components(self) -> List[Tuple[ComponentInfo, Type]]: - """返回插件包含的组件列表""" - - # 从配置获取组件启用状态 - enable_image_generation = self.get_config("components.enable_image_generation", True) - - components = [] - - # 添加图片生成Action - if enable_image_generation: - components.append((DoubaoImageGenerationAction.get_action_info(), DoubaoImageGenerationAction)) - - return components From 71e749ce976d0d1fbd58158da713bc75b7b0a793 Mon Sep 17 00:00:00 2001 From: A0000Xz <122650088+A0000Xz@users.noreply.github.com> Date: Sat, 5 Jul 2025 21:59:55 +0800 Subject: [PATCH 20/63] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=AF=B9=E4=BA=8E?= =?UTF-8?q?=E5=8F=A4=E6=97=A9=E7=BC=BA=E5=A4=B1=E5=AD=97=E6=AE=B5=E7=9A=84?= =?UTF-8?q?=E5=9B=BE=E7=89=87=E5=8F=8D=E5=A4=8D=E7=94=9F=E6=88=90=E6=96=B0?= =?UTF-8?q?picid=E8=AE=B0=E5=BD=95=E7=9A=84BUG?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/utils/utils_image.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py index e87f4bf91..293bdaf38 100644 --- a/src/chat/utils/utils_image.py +++ b/src/chat/utils/utils_image.py @@ -403,7 +403,16 @@ class ImageManager: or existing_image.vlm_processed is None ): logger.debug(f"图片记录缺少必要字段,补全旧记录: {image_hash}") - image_id = str(uuid.uuid4()) + if not existing_image.image_id: + existing_image.image_id = str(uuid.uuid4()) + if existing_image.count is None: + existing_image.count = 0 + if existing_image.vlm_processed is None: + existing_image.vlm_processed = False + + existing_image.count += 1 + existing_image.save() + return existing_image.image_id, f"[picid:{existing_image.image_id}]" else: # print(f"图片已存在: {existing_image.image_id}") # print(f"图片描述: {existing_image.description}") From 6230920d315be1fe667411412e7eb3f42fd36535 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 5 Jul 2025 14:10:08 +0000 Subject: [PATCH 21/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/utils/utils_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py index 293bdaf38..25b753bab 100644 --- a/src/chat/utils/utils_image.py +++ b/src/chat/utils/utils_image.py @@ -409,7 +409,7 @@ class ImageManager: existing_image.count = 0 if existing_image.vlm_processed is None: existing_image.vlm_processed = False - + existing_image.count += 1 existing_image.save() return existing_image.image_id, f"[picid:{existing_image.image_id}]" From cad3f881967a9062d6de624da3a3c0430fbd98fe Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 22:29:55 +0800 Subject: [PATCH 22/63] Update default_generator.py --- src/chat/replyer/default_generator.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 4aa275d6f..da9d9a584 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -255,8 +255,6 @@ class DefaultReplyer: with Timer("构建Prompt", {}): # 内部计时器,可选保留 prompt = await self.build_prompt_rewrite_context( - raw_reply=raw_reply, - reason=reason, reply_data=reply_data, ) @@ -652,8 +650,6 @@ class DefaultReplyer: async def build_prompt_rewrite_context( self, reply_data: Dict[str, Any], - raw_reply: str = "", - reason: str = "", ) -> str: chat_stream = self.chat_stream chat_id = chat_stream.stream_id @@ -662,6 +658,8 @@ class DefaultReplyer: is_group_chat = bool(chat_stream.group_info) reply_to = reply_data.get("reply_to", "none") + raw_reply = reply_data.get("raw_reply", "") + reason = reply_data.get("reason", "") sender, target = self._parse_reply_target(reply_to) message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( From 56c3d5bd8a1c34c45bea493b31fa91202602b38a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 5 Jul 2025 14:32:27 +0000 Subject: [PATCH 23/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py index 41e8eecbe..df7cc6876 100644 --- a/src/chat/normal_chat/normal_chat_generator.py +++ b/src/chat/normal_chat/normal_chat_generator.py @@ -69,4 +69,4 @@ class NormalChatGenerator: except Exception: logger.exception("生成回复时出错") - return None \ No newline at end of file + return None From 0077bfa77fb24062bdcf9d0ebd1ba4658fc39261 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 22:55:57 +0800 Subject: [PATCH 24/63] =?UTF-8?q?-=20=E7=A7=BB=E9=99=A4=E8=B1=86=E5=8C=85?= =?UTF-8?q?=E7=94=BB=E5=9B=BE=E6=8F=92=E4=BB=B6=EF=BC=8C=E6=AD=A4=E6=8F=92?= =?UTF-8?q?=E4=BB=B6=E7=8E=B0=E5=9C=A8=E6=8F=92=E4=BB=B6=E5=B9=BF=E5=9C=BA?= =?UTF-8?q?=E6=8F=90=E4=BE=9B=20-=20=E4=BF=AE=E5=A4=8D=E8=A1=A8=E8=BE=BE?= =?UTF-8?q?=E5=99=A8=E6=97=A0=E6=B3=95=E8=AF=BB=E5=8F=96=E5=8E=9F=E5=A7=8B?= =?UTF-8?q?=E6=96=87=E6=9C=AC=20-=20=E4=BF=AE=E5=A4=8Dnormal=20planner?= =?UTF-8?q?=E6=B2=A1=E6=9C=89=E8=B6=85=E6=97=B6=E9=80=80=E5=87=BA=E9=97=AE?= =?UTF-8?q?=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 3 + src/chat/heart_flow/sub_heartflow.py | 10 - src/chat/message_receive/message_sender.py | 33 --- src/chat/normal_chat/normal_chat.py | 134 +++++---- src/chat/normal_chat/normal_chat_expressor.py | 262 ------------------ src/chat/normal_chat/normal_chat_planner.py | 2 +- .../normal_chat/willing/mode_classical.py | 20 -- src/chat/normal_chat/willing/mode_custom.py | 3 - src/chat/normal_chat/willing/mode_mxp.py | 15 +- .../normal_chat/willing/willing_manager.py | 5 - src/config/official_configs.py | 11 +- template/bot_config_template.toml | 24 +- 12 files changed, 100 insertions(+), 422 deletions(-) delete mode 100644 src/chat/normal_chat/normal_chat_expressor.py diff --git a/changelogs/changelog.md b/changelogs/changelog.md index 8fe8c5e32..bef8ab146 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -17,6 +17,9 @@ - 私聊强制开启focus模式 - 支持解析reply_to和at - 修复focus冷却时间导致的固定沉默 +- 移除豆包画图插件,此插件现在插件广场提供 +- 修复表达器无法读取原始文本 +- 修复normal planner没有超时退出问题 diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index 98bc07337..cd417f872 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -368,16 +368,6 @@ class SubHeartflow: return self.normal_chat_instance.get_action_manager() return None - def set_normal_chat_planner_enabled(self, enabled: bool): - """设置NormalChat的planner是否启用 - - Args: - enabled: 是否启用planner - """ - if self.normal_chat_instance: - self.normal_chat_instance.set_planner_enabled(enabled) - else: - logger.warning(f"{self.log_prefix} NormalChat实例不存在,无法设置planner状态") async def get_full_state(self) -> dict: """获取子心流的完整状态,包括兴趣、思维和聊天状态。""" diff --git a/src/chat/message_receive/message_sender.py b/src/chat/message_receive/message_sender.py index 6cb256d32..e54f37d12 100644 --- a/src/chat/message_receive/message_sender.py +++ b/src/chat/message_receive/message_sender.py @@ -9,7 +9,6 @@ from src.common.message.api import get_global_api from .message import MessageSending, MessageThinking, MessageSet from src.chat.message_receive.storage import MessageStorage -from ...config.config import global_config from ..utils.utils import truncate_message, calculate_typing_time, count_messages_between from src.common.logger import get_logger @@ -192,19 +191,6 @@ class MessageManager: container = await self.get_container(chat_stream.stream_id) container.add_message(message) - def check_if_sending_message_exist(self, chat_id, thinking_id): - """检查指定聊天流的容器中是否存在具有特定 thinking_id 的 MessageSending 消息 或 emoji 消息""" - # 这个方法现在是非异步的,因为它只读取数据 - container = self.containers.get(chat_id) # 直接 get,因为读取不需要锁 - if container and container.has_messages(): - for message in container.get_all_messages(): - if isinstance(message, MessageSending): - msg_id = getattr(message.message_info, "message_id", None) - # 检查 message_id 是否匹配 thinking_id 或以 "me" 开头 (emoji) - if msg_id == thinking_id or (msg_id and msg_id.startswith("me")): - # logger.debug(f"检查到存在相同thinking_id或emoji的消息: {msg_id} for {thinking_id}") - return True - return False async def _handle_sending_message(self, container: MessageContainer, message: MessageSending): """处理单个 MessageSending 消息 (包含 set_reply 逻辑)""" @@ -216,12 +202,7 @@ class MessageManager: thinking_messages_count, thinking_messages_length = count_messages_between( start_time=thinking_start_time, end_time=now_time, stream_id=message.chat_stream.stream_id ) - # print(f"message.reply:{message.reply}") - # --- 条件应用 set_reply 逻辑 --- - # logger.debug( - # f"[message.apply_set_reply_logic:{message.apply_set_reply_logic},message.is_head:{message.is_head},thinking_messages_count:{thinking_messages_count},thinking_messages_length:{thinking_messages_length},message.is_private_message():{message.is_private_message()}]" - # ) if ( message.is_head and (thinking_messages_count > 3 or thinking_messages_length > 200) @@ -277,14 +258,6 @@ class MessageManager: flush=True, ) - # 检查是否超时 - if thinking_time > global_config.normal_chat.thinking_timeout: - logger.warning( - f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}" - ) - container.remove_message(message_earliest) - print() # 超时后换行,避免覆盖下一条日志 - elif isinstance(message_earliest, MessageSending): # --- 处理发送消息 --- await self._handle_sending_message(container, message_earliest) @@ -301,12 +274,6 @@ class MessageManager: logger.info(f"[{chat_id}] 处理超时发送消息: {msg.message_info.message_id}") await self._handle_sending_message(container, msg) # 复用处理逻辑 - # 清理空容器 (可选) - # async with self._container_lock: - # if not container.has_messages() and chat_id in self.containers: - # logger.debug(f"[{chat_id}] 容器已空,准备移除。") - # del self.containers[chat_id] - async def _start_processor_loop(self): """消息处理器主循环""" while self._running: diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index d88b9167f..128ce94d7 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -28,7 +28,6 @@ from .priority_manager import PriorityManager import traceback from .normal_chat_generator import NormalChatGenerator -from src.chat.normal_chat.normal_chat_expressor import NormalChatExpressor from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier @@ -72,9 +71,6 @@ class NormalChat: self.stream_name = get_chat_manager().get_stream_name(self.stream_id) or self.stream_id - # 初始化Normal Chat专用表达器 - self.expressor = NormalChatExpressor(self.chat_stream) - # Interest dict self.interest_dict = interest_dict @@ -120,6 +116,8 @@ class NormalChat: self.get_cooldown_progress_callback = get_cooldown_progress_callback self._disabled = False # 增加停用标志 + + self.timeout_count = 0 # 加载持久化的缓存 self._load_cache() @@ -490,14 +488,10 @@ class NormalChat: logger.info( f"[{self.stream_name}] 从队列中取出消息进行处理: User {message.message_info.user_info.user_id}, Time: {time.strftime('%H:%M:%S', time.localtime(message.message_info.time))}" ) - # 执行定期清理 - self._cleanup_old_segments() - # 更新消息段信息 - self._update_user_message_segments(message) # 检查是否有用户满足关系构建条件 - asyncio.create_task(self._check_relation_building_conditions()) + asyncio.create_task(self._check_relation_building_conditions(message)) await self.reply_one_message(message) @@ -722,18 +716,9 @@ class NormalChat: if self.priority_manager: self.priority_manager.add_message(message) return - - # --- 以下为原有的 "兴趣" 模式逻辑 --- - await self._process_message(message, is_mentioned, interested_rate) - - async def _process_message(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None: - """ - 实际处理单条消息的逻辑,包括意愿判断、回复生成、动作执行等。 - """ - if self._disabled: - return - - # 新增:在auto模式下检查是否需要直接切换到focus模式 + + + # 新增:在auto模式下检查是否需要直接切换到focus模式 if global_config.chat.chat_mode == "auto": if await self._check_should_switch_to_focus(): logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,尝试执行切换") @@ -747,14 +732,20 @@ class NormalChat: else: logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数,无法执行切换") - # 执行定期清理 - self._cleanup_old_segments() + # --- 以下为原有的 "兴趣" 模式逻辑 --- + await self._process_message(message, is_mentioned, interested_rate) + + async def _process_message(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None: + """ + 实际处理单条消息的逻辑,包括意愿判断、回复生成、动作执行等。 + """ + if self._disabled: + return + - # 更新消息段信息 - self._update_user_message_segments(message) # 检查是否有用户满足关系构建条件 - asyncio.create_task(self._check_relation_building_conditions()) + asyncio.create_task(self._check_relation_building_conditions(message)) timing_results = {} reply_probability = ( @@ -776,6 +767,10 @@ class NormalChat: reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"] reply_probability = min(max(reply_probability, 0), 1) # 确保概率在 0-1 之间 + # 处理表情包 + if message.is_emoji or message.is_picid: + reply_probability = 0 + # 应用疲劳期回复频率调整 fatigue_multiplier = self._get_fatigue_reply_multiplier() original_probability = reply_probability @@ -804,6 +799,8 @@ class NormalChat: await willing_manager.before_generate_reply_handle(message.message_info.message_id) do_reply = await self.reply_one_message(message) response_set = do_reply if do_reply else None + + # 输出性能计时结果 if do_reply and response_set: # 确保 response_set 不是 None timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()]) @@ -855,8 +852,6 @@ class NormalChat: return None try: - # 获取发送者名称(动作修改已在并行执行前完成) - sender_name = self._get_sender_name(message) no_action = { "action_result": { @@ -876,7 +871,7 @@ class NormalChat: return no_action # 执行规划 - plan_result = await self.planner.plan(message, sender_name) + plan_result = await self.planner.plan(message) action_type = plan_result["action_result"]["action_type"] action_data = plan_result["action_result"]["action_data"] reasoning = plan_result["action_result"]["reasoning"] @@ -914,9 +909,35 @@ class NormalChat: # 并行执行回复生成和动作规划 self.action_type = None # 初始化动作类型 self.is_parallel_action = False # 初始化并行动作标志 - response_set, plan_result = await asyncio.gather( - generate_normal_response(), plan_and_execute_actions(), return_exceptions=True - ) + + gen_task = asyncio.create_task(generate_normal_response()) + plan_task = asyncio.create_task(plan_and_execute_actions()) + + try: + gather_timeout = global_config.normal_chat.thinking_timeout + results = await asyncio.wait_for( + asyncio.gather(gen_task, plan_task, return_exceptions=True), + timeout=gather_timeout, + ) + response_set, plan_result = results + except asyncio.TimeoutError: + logger.warning(f"[{self.stream_name}] 并行执行回复生成和动作规划超时 ({gather_timeout}秒),正在取消相关任务...") + self.timeout_count += 1 + if self.timeout_count > 5: + logger.error(f"[{self.stream_name}] 连续回复超时,{global_config.normal_chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。") + return False + + # 取消未完成的任务 + if not gen_task.done(): + gen_task.cancel() + if not plan_task.done(): + plan_task.cancel() + + # 清理思考消息 + await self._cleanup_thinking_message_by_id(thinking_id) + + response_set = None + plan_result = None # 处理生成回复的结果 if isinstance(response_set, Exception): @@ -937,14 +958,7 @@ class NormalChat: elif self.enable_planner and self.action_type not in ["no_action"] and not self.is_parallel_action: logger.info(f"[{self.stream_name}] 模型选择其他动作(非并行动作)") # 如果模型未生成回复,移除思考消息 - container = await message_manager.get_container(self.stream_id) # 使用 self.stream_id - for msg in container.messages[:]: - if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id: - container.messages.remove(msg) - logger.debug(f"[{self.stream_name}] 已移除未产生回复的思考消息 {thinking_id}") - break - # 需要在此处也调用 not_reply_handle 和 delete 吗? - # 如果是因为模型没回复,也算是一种 "未回复" + await self._cleanup_thinking_message_by_id(thinking_id) return False # logger.info(f"[{self.stream_name}] 回复内容: {response_set}") @@ -969,9 +983,7 @@ class NormalChat: "user_nickname": message.message_info.user_info.user_nickname, }, "response": response_set, - # "is_mentioned": is_mentioned, "is_reference_reply": message.reply is not None, # 判断是否为引用回复 - # "timing": {k: round(v, 2) for k, v in timing_results.items()}, } self.recent_replies.append(reply_info) # 保持最近回复历史在限定数量内 @@ -1198,18 +1210,6 @@ class NormalChat: f"意愿放大器更新为: {self.willing_amplifier:.2f}" ) - def _get_sender_name(self, message: MessageRecv) -> str: - """获取发送者名称,用于planner""" - if message.chat_stream.user_info: - user_info = message.chat_stream.user_info - if user_info.user_cardname and user_info.user_nickname: - return f"[{user_info.user_nickname}][群昵称:{user_info.user_cardname}]" - elif user_info.user_nickname: - return f"[{user_info.user_nickname}]" - else: - return f"用户({user_info.user_id})" - return "某人" - async def _execute_action( self, action_type: str, action_data: dict, message: MessageRecv, thinking_id: str ) -> Optional[bool]: @@ -1246,17 +1246,18 @@ class NormalChat: return False - def set_planner_enabled(self, enabled: bool): - """设置是否启用planner""" - self.enable_planner = enabled - logger.info(f"[{self.stream_name}] Planner {'启用' if enabled else '禁用'}") - def get_action_manager(self) -> ActionManager: """获取动作管理器实例""" return self.action_manager - async def _check_relation_building_conditions(self): + async def _check_relation_building_conditions(self, message: MessageRecv): """检查person_engaged_cache中是否有满足关系构建条件的用户""" + # 执行定期清理 + self._cleanup_old_segments() + + # 更新消息段信息 + self._update_user_message_segments(message) + users_to_build_relationship = [] for person_id, segments in list(self.person_engaged_cache.items()): @@ -1401,3 +1402,16 @@ class NormalChat: ) return should_switch + + async def _cleanup_thinking_message_by_id(self, thinking_id: str): + """根据ID清理思考消息""" + try: + container = await message_manager.get_container(self.stream_id) + if container: + for msg in container.messages[:]: + if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id: + container.messages.remove(msg) + logger.info(f"[{self.stream_name}] 已清理思考消息 {thinking_id}") + break + except Exception as e: + logger.error(f"[{self.stream_name}] 清理思考消息 {thinking_id} 时出错: {e}") diff --git a/src/chat/normal_chat/normal_chat_expressor.py b/src/chat/normal_chat/normal_chat_expressor.py deleted file mode 100644 index c89ad8534..000000000 --- a/src/chat/normal_chat/normal_chat_expressor.py +++ /dev/null @@ -1,262 +0,0 @@ -""" -Normal Chat Expressor - -为Normal Chat专门设计的表达器,不需要经过LLM风格化处理, -直接发送消息,主要用于插件动作中需要发送消息的场景。 -""" - -import time -from typing import List, Optional, Tuple, Dict, Any -from src.chat.message_receive.message import MessageRecv, MessageSending, MessageThinking, Seg -from src.chat.message_receive.message import UserInfo -from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager -from src.chat.message_receive.message_sender import message_manager -from src.config.config import global_config -from src.common.logger import get_logger - -logger = get_logger("normal_chat_expressor") - - -class NormalChatExpressor: - """Normal Chat专用表达器 - - 特点: - 1. 不经过LLM风格化,直接发送消息 - 2. 支持文本和表情包发送 - 3. 为插件动作提供简化的消息发送接口 - 4. 保持与focus_chat expressor相似的API,但去掉复杂的风格化流程 - """ - - def __init__(self, chat_stream: ChatStream): - """初始化Normal Chat表达器 - - Args: - chat_stream: 聊天流对象 - stream_name: 流名称 - """ - self.chat_stream = chat_stream - self.stream_name = get_chat_manager().get_stream_name(self.chat_stream.stream_id) or self.chat_stream.stream_id - self.log_prefix = f"[{self.stream_name}]Normal表达器" - - logger.debug(f"{self.log_prefix} 初始化完成") - - async def create_thinking_message( - self, anchor_message: Optional[MessageRecv], thinking_id: str - ) -> Optional[MessageThinking]: - """创建思考消息 - - Args: - anchor_message: 锚点消息 - thinking_id: 思考ID - - Returns: - MessageThinking: 创建的思考消息,如果失败返回None - """ - if not anchor_message or not anchor_message.chat_stream: - logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流") - return None - - messageinfo = anchor_message.message_info - thinking_time_point = time.time() - - bot_user_info = UserInfo( - user_id=global_config.bot.qq_account, - user_nickname=global_config.bot.nickname, - platform=messageinfo.platform, - ) - - thinking_message = MessageThinking( - message_id=thinking_id, - chat_stream=self.chat_stream, - bot_user_info=bot_user_info, - reply=anchor_message, - thinking_start_time=thinking_time_point, - ) - - await message_manager.add_message(thinking_message) - logger.debug(f"{self.log_prefix} 创建思考消息: {thinking_id}") - return thinking_message - - async def send_response_messages( - self, - anchor_message: Optional[MessageRecv], - response_set: List[Tuple[str, str]], - thinking_id: str = "", - display_message: str = "", - ) -> Optional[MessageSending]: - """发送回复消息 - - Args: - anchor_message: 锚点消息 - response_set: 回复内容集合,格式为 [(type, content), ...] - thinking_id: 思考ID - display_message: 显示消息 - - Returns: - MessageSending: 发送的第一条消息,如果失败返回None - """ - try: - if not response_set: - logger.warning(f"{self.log_prefix} 回复内容为空") - return None - - # 如果没有thinking_id,生成一个 - if not thinking_id: - thinking_time_point = round(time.time(), 2) - thinking_id = "mt" + str(thinking_time_point) - - # 创建思考消息 - if anchor_message: - await self.create_thinking_message(anchor_message, thinking_id) - - # 创建消息集 - - mark_head = False - is_emoji = False - if len(response_set) == 0: - return None - message_id = f"{thinking_id}_{len(response_set)}" - response_type, content = response_set[0] - if len(response_set) > 1: - message_segment = Seg(type="seglist", data=[Seg(type=t, data=c) for t, c in response_set]) - else: - message_segment = Seg(type=response_type, data=content) - if response_type == "emoji": - is_emoji = True - - bot_msg = await self._build_sending_message( - message_id=message_id, - message_segment=message_segment, - thinking_id=thinking_id, - anchor_message=anchor_message, - thinking_start_time=time.time(), - reply_to=mark_head, - is_emoji=is_emoji, - display_message=display_message, - ) - logger.debug(f"{self.log_prefix} 添加{response_type}类型消息: {content}") - - # 提交消息集 - if bot_msg: - await message_manager.add_message(bot_msg) - logger.info( - f"{self.log_prefix} 成功发送 {response_type}类型消息: {str(content)[:200] + '...' if len(str(content)) > 200 else content}" - ) - container = await message_manager.get_container(self.chat_stream.stream_id) # 使用 self.stream_id - for msg in container.messages[:]: - if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id: - container.messages.remove(msg) - logger.debug(f"[{self.stream_name}] 已移除未产生回复的思考消息 {thinking_id}") - break - return bot_msg - else: - logger.warning(f"{self.log_prefix} 没有有效的消息被创建") - return None - - except Exception as e: - logger.error(f"{self.log_prefix} 发送消息失败: {e}") - import traceback - - traceback.print_exc() - return None - - async def _build_sending_message( - self, - message_id: str, - message_segment: Seg, - thinking_id: str, - anchor_message: Optional[MessageRecv], - thinking_start_time: float, - reply_to: bool = False, - is_emoji: bool = False, - display_message: str = "", - ) -> MessageSending: - """构建发送消息 - - Args: - message_id: 消息ID - message_segment: 消息段 - thinking_id: 思考ID - anchor_message: 锚点消息 - thinking_start_time: 思考开始时间 - reply_to: 是否回复 - is_emoji: 是否为表情包 - - Returns: - MessageSending: 构建的发送消息 - """ - bot_user_info = UserInfo( - user_id=global_config.bot.qq_account, - user_nickname=global_config.bot.nickname, - platform=anchor_message.message_info.platform if anchor_message else "unknown", - ) - - message_sending = MessageSending( - message_id=message_id, - chat_stream=self.chat_stream, - bot_user_info=bot_user_info, - message_segment=message_segment, - sender_info=self.chat_stream.user_info, - reply=anchor_message if reply_to else None, - thinking_start_time=thinking_start_time, - is_emoji=is_emoji, - display_message=display_message, - ) - - return message_sending - - async def deal_reply( - self, - cycle_timers: dict, - action_data: Dict[str, Any], - reasoning: str, - anchor_message: MessageRecv, - thinking_id: str, - ) -> Tuple[bool, Optional[str]]: - """处理回复动作 - 兼容focus_chat expressor API - - Args: - cycle_timers: 周期计时器(normal_chat中不使用) - action_data: 动作数据,包含text、target、emojis等 - reasoning: 推理说明 - anchor_message: 锚点消息 - thinking_id: 思考ID - - Returns: - Tuple[bool, Optional[str]]: (是否成功, 回复文本) - """ - try: - response_set = [] - - # 处理文本内容 - text_content = action_data.get("text", "") - if text_content: - response_set.append(("text", text_content)) - - # 处理表情包 - emoji_content = action_data.get("emojis", "") - if emoji_content: - response_set.append(("emoji", emoji_content)) - - if not response_set: - logger.warning(f"{self.log_prefix} deal_reply: 没有有效的回复内容") - return False, None - - # 发送消息 - result = await self.send_response_messages( - anchor_message=anchor_message, - response_set=response_set, - thinking_id=thinking_id, - ) - - if result: - return True, text_content if text_content else "发送成功" - else: - return False, None - - except Exception as e: - logger.error(f"{self.log_prefix} deal_reply执行失败: {e}") - import traceback - - traceback.print_exc() - return False, None diff --git a/src/chat/normal_chat/normal_chat_planner.py b/src/chat/normal_chat/normal_chat_planner.py index d3f1e8abc..9c4e08433 100644 --- a/src/chat/normal_chat/normal_chat_planner.py +++ b/src/chat/normal_chat/normal_chat_planner.py @@ -72,7 +72,7 @@ class NormalChatPlanner: self.action_manager = action_manager - async def plan(self, message: MessageThinking, sender_name: str = "某人") -> Dict[str, Any]: + async def plan(self, message: MessageThinking) -> Dict[str, Any]: """ Normal Chat 规划器: 使用LLM根据上下文决定做出什么动作。 diff --git a/src/chat/normal_chat/willing/mode_classical.py b/src/chat/normal_chat/willing/mode_classical.py index 3ffe23c46..a6929338c 100644 --- a/src/chat/normal_chat/willing/mode_classical.py +++ b/src/chat/normal_chat/willing/mode_classical.py @@ -33,28 +33,10 @@ class ClassicalWillingManager(BaseWillingManager): if willing_info.is_mentioned_bot: current_willing += 1 if current_willing < 1.0 else 0.05 - is_emoji_not_reply = False - if willing_info.is_emoji: - if global_config.normal_chat.emoji_response_penalty != 0: - current_willing *= global_config.normal_chat.emoji_response_penalty - else: - is_emoji_not_reply = True - - # 处理picid格式消息,直接不回复 - is_picid_not_reply = False - if willing_info.is_picid: - is_picid_not_reply = True - self.chat_reply_willing[chat_id] = min(current_willing, 3.0) reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1) - if is_emoji_not_reply: - reply_probability = 0 - - if is_picid_not_reply: - reply_probability = 0 - return reply_probability async def before_generate_reply_handle(self, message_id): @@ -71,8 +53,6 @@ class ClassicalWillingManager(BaseWillingManager): if current_willing < 1: self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.4) - async def bombing_buffer_message_handle(self, message_id): - return await super().bombing_buffer_message_handle(message_id) async def not_reply_handle(self, message_id): return await super().not_reply_handle(message_id) diff --git a/src/chat/normal_chat/willing/mode_custom.py b/src/chat/normal_chat/willing/mode_custom.py index 4b2e8f3c3..36334df43 100644 --- a/src/chat/normal_chat/willing/mode_custom.py +++ b/src/chat/normal_chat/willing/mode_custom.py @@ -17,8 +17,5 @@ class CustomWillingManager(BaseWillingManager): async def get_reply_probability(self, message_id: str): pass - async def bombing_buffer_message_handle(self, message_id: str): - pass - def __init__(self): super().__init__() diff --git a/src/chat/normal_chat/willing/mode_mxp.py b/src/chat/normal_chat/willing/mode_mxp.py index 03651d080..89f6b4757 100644 --- a/src/chat/normal_chat/willing/mode_mxp.py +++ b/src/chat/normal_chat/willing/mode_mxp.py @@ -19,7 +19,6 @@ Mxp 模式:梦溪畔独家赞助 下下策是询问一个菜鸟(@梦溪畔) """ -from src.config.config import global_config from .willing_manager import BaseWillingManager from typing import Dict import asyncio @@ -172,23 +171,11 @@ class MxpWillingManager(BaseWillingManager): self.logger.debug("进行中消息惩罚:归0") probability = self._willing_to_probability(current_willing) - - if w_info.is_emoji: - probability *= global_config.normal_chat.emoji_response_penalty - - if w_info.is_picid: - probability = 0 # picid格式消息直接不回复 - + self.temporary_willing = current_willing return probability - async def bombing_buffer_message_handle(self, message_id: str): - """炸飞消息处理""" - async with self.lock: - w_info = self.ongoing_messages[message_id] - self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += 0.1 - async def _return_to_basic_willing(self): """使每个人的意愿恢复到chat基础意愿""" while True: diff --git a/src/chat/normal_chat/willing/willing_manager.py b/src/chat/normal_chat/willing/willing_manager.py index 47c6bfd0f..9a7ce4857 100644 --- a/src/chat/normal_chat/willing/willing_manager.py +++ b/src/chat/normal_chat/willing/willing_manager.py @@ -20,7 +20,6 @@ before_generate_reply_handle 确定要回复后,在生成回复前的处理 after_generate_reply_handle 确定要回复后,在生成回复后的处理 not_reply_handle 确定不回复后的处理 get_reply_probability 获取回复概率 -bombing_buffer_message_handle 缓冲器炸飞消息后的处理 get_variable_parameters 暂不确定 set_variable_parameters 暂不确定 以下2个方法根据你的实现可以做调整: @@ -137,10 +136,6 @@ class BaseWillingManager(ABC): """抽象方法:获取回复概率""" raise NotImplementedError - @abstractmethod - async def bombing_buffer_message_handle(self, message_id: str): - """抽象方法:炸飞消息处理""" - pass async def get_willing(self, chat_id: str): """获取指定聊天流的回复意愿""" diff --git a/src/config/official_configs.py b/src/config/official_configs.py index f56f0e3d6..3cfeecf51 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -285,8 +285,6 @@ class NormalChatConfig(ConfigBase): response_interested_rate_amplifier: float = 1.0 """回复兴趣度放大系数""" - emoji_response_penalty: float = 0.0 - """表情包回复惩罚系数""" mentioned_bot_inevitable_reply: bool = False """提及 bot 必然回复""" @@ -297,6 +295,15 @@ class NormalChatConfig(ConfigBase): enable_planner: bool = False """是否启用动作规划器""" + gather_timeout: int = 110 # planner和generator的并行执行超时时间 + """planner和generator的并行执行超时时间""" + + auto_focus_threshold: float = 1.0 # 自动切换到专注模式的阈值,值越大越难触发 + """自动切换到专注模式的阈值,值越大越难触发""" + + fatigue_talk_frequency: float = 0.2 # 疲劳模式下的基础对话频率 (条/分钟) + """疲劳模式下的基础对话频率 (条/分钟)""" + @dataclass class FocusChatConfig(ConfigBase): diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 84bca3718..c4ddd21d8 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "3.1.0" +version = "3.2.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -61,7 +61,7 @@ enable_relationship = true # 是否启用关系系统 relation_frequency = 1 # 关系频率,麦麦构建关系的速度,仅在normal_chat模式下有效 [chat] #麦麦的聊天通用设置 -chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,在普通模式和专注模式之间自动切换 +chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,自动auto:在普通模式和专注模式之间自动切换 # chat_mode = "focus" # chat_mode = "auto" @@ -116,18 +116,17 @@ ban_msgs_regex = [ [normal_chat] #普通聊天 #一般回复参数 -emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发 -thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢) +emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率 +thinking_timeout = 30 # 麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢) willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现) response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数 -emoji_response_penalty = 0 # 对其他人发的表情包回复惩罚系数,设为0为不回复单个表情包,减少单独回复表情包的概率 mentioned_bot_inevitable_reply = true # 提及 bot 必然回复 at_bot_inevitable_reply = true # @bot 必然回复(包含提及) -enable_planner = false # 是否启用动作规划器(与focus_chat共享actions) +enable_planner = true # 是否启用动作规划器(与focus_chat共享actions) [focus_chat] #专注聊天 @@ -168,7 +167,7 @@ consolidation_check_percentage = 0.05 # 检查节点比例 #不希望记忆的词,已经记忆的不会受到影响,需要手动清理 memory_ban_words = [ "表情包", "图片", "回复", "聊天记录" ] -[mood] # 仅在 普通聊天 有效 +[mood] # 暂时不再有效,请不要使用 enable_mood = false # 是否启用情绪系统 mood_update_interval = 1.0 # 情绪更新间隔 单位秒 mood_decay_rate = 0.95 # 情绪衰减率 @@ -242,7 +241,7 @@ library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别 # thinking_budget = : 用于指定模型思考最长长度 [model] -model_max_output_length = 800 # 模型单次返回的最大token数 +model_max_output_length = 1000 # 模型单次返回的最大token数 #------------必填:组件模型------------ @@ -272,11 +271,12 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗) temp = 0.2 #模型的温度,新V3建议0.1-0.3 [model.replyer_2] # 次要回复模型 -name = "Pro/deepseek-ai/DeepSeek-R1" +name = "Pro/deepseek-ai/DeepSeek-V3" provider = "SILICONFLOW" -pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗) -pri_out = 16.0 #模型的输出价格(非必填,可以记录消耗) -temp = 0.7 +pri_in = 2 #模型的输入价格(非必填,可以记录消耗) +pri_out = 8 #模型的输出价格(非必填,可以记录消耗) +#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数 +temp = 0.2 #模型的温度,新V3建议0.1-0.3 [model.memory_summary] # 记忆的概括模型 From 90c705c16a04f3da5eedf78a810f21dbd5aa2248 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 5 Jul 2025 14:57:17 +0000 Subject: [PATCH 25/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/sub_heartflow.py | 1 - src/chat/message_receive/message_sender.py | 1 - src/chat/normal_chat/normal_chat.py | 32 +++++++++---------- .../normal_chat/willing/mode_classical.py | 1 - src/chat/normal_chat/willing/mode_mxp.py | 2 +- .../normal_chat/willing/willing_manager.py | 1 - src/config/official_configs.py | 1 - 7 files changed, 16 insertions(+), 23 deletions(-) diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index cd417f872..206c00364 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -368,7 +368,6 @@ class SubHeartflow: return self.normal_chat_instance.get_action_manager() return None - async def get_full_state(self) -> dict: """获取子心流的完整状态,包括兴趣、思维和聊天状态。""" return { diff --git a/src/chat/message_receive/message_sender.py b/src/chat/message_receive/message_sender.py index e54f37d12..aa6721db3 100644 --- a/src/chat/message_receive/message_sender.py +++ b/src/chat/message_receive/message_sender.py @@ -191,7 +191,6 @@ class MessageManager: container = await self.get_container(chat_stream.stream_id) container.add_message(message) - async def _handle_sending_message(self, container: MessageContainer, message: MessageSending): """处理单个 MessageSending 消息 (包含 set_reply 逻辑)""" try: diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 128ce94d7..a737d5bec 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -116,7 +116,7 @@ class NormalChat: self.get_cooldown_progress_callback = get_cooldown_progress_callback self._disabled = False # 增加停用标志 - + self.timeout_count = 0 # 加载持久化的缓存 @@ -489,7 +489,6 @@ class NormalChat: f"[{self.stream_name}] 从队列中取出消息进行处理: User {message.message_info.user_info.user_id}, Time: {time.strftime('%H:%M:%S', time.localtime(message.message_info.time))}" ) - # 检查是否有用户满足关系构建条件 asyncio.create_task(self._check_relation_building_conditions(message)) @@ -716,9 +715,8 @@ class NormalChat: if self.priority_manager: self.priority_manager.add_message(message) return - - - # 新增:在auto模式下检查是否需要直接切换到focus模式 + + # 新增:在auto模式下检查是否需要直接切换到focus模式 if global_config.chat.chat_mode == "auto": if await self._check_should_switch_to_focus(): logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,尝试执行切换") @@ -742,8 +740,6 @@ class NormalChat: if self._disabled: return - - # 检查是否有用户满足关系构建条件 asyncio.create_task(self._check_relation_building_conditions(message)) @@ -769,7 +765,7 @@ class NormalChat: # 处理表情包 if message.is_emoji or message.is_picid: - reply_probability = 0 + reply_probability = 0 # 应用疲劳期回复频率调整 fatigue_multiplier = self._get_fatigue_reply_multiplier() @@ -799,8 +795,7 @@ class NormalChat: await willing_manager.before_generate_reply_handle(message.message_info.message_id) do_reply = await self.reply_one_message(message) response_set = do_reply if do_reply else None - - + # 输出性能计时结果 if do_reply and response_set: # 确保 response_set 不是 None timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()]) @@ -852,7 +847,6 @@ class NormalChat: return None try: - no_action = { "action_result": { "action_type": "no_action", @@ -921,12 +915,16 @@ class NormalChat: ) response_set, plan_result = results except asyncio.TimeoutError: - logger.warning(f"[{self.stream_name}] 并行执行回复生成和动作规划超时 ({gather_timeout}秒),正在取消相关任务...") + logger.warning( + f"[{self.stream_name}] 并行执行回复生成和动作规划超时 ({gather_timeout}秒),正在取消相关任务..." + ) self.timeout_count += 1 if self.timeout_count > 5: - logger.error(f"[{self.stream_name}] 连续回复超时,{global_config.normal_chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。") + logger.error( + f"[{self.stream_name}] 连续回复超时,{global_config.normal_chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。" + ) return False - + # 取消未完成的任务 if not gen_task.done(): gen_task.cancel() @@ -935,7 +933,7 @@ class NormalChat: # 清理思考消息 await self._cleanup_thinking_message_by_id(thinking_id) - + response_set = None plan_result = None @@ -1252,12 +1250,12 @@ class NormalChat: async def _check_relation_building_conditions(self, message: MessageRecv): """检查person_engaged_cache中是否有满足关系构建条件的用户""" - # 执行定期清理 + # 执行定期清理 self._cleanup_old_segments() # 更新消息段信息 self._update_user_message_segments(message) - + users_to_build_relationship = [] for person_id, segments in list(self.person_engaged_cache.items()): diff --git a/src/chat/normal_chat/willing/mode_classical.py b/src/chat/normal_chat/willing/mode_classical.py index a6929338c..0b296bbf4 100644 --- a/src/chat/normal_chat/willing/mode_classical.py +++ b/src/chat/normal_chat/willing/mode_classical.py @@ -53,6 +53,5 @@ class ClassicalWillingManager(BaseWillingManager): if current_willing < 1: self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.4) - async def not_reply_handle(self, message_id): return await super().not_reply_handle(message_id) diff --git a/src/chat/normal_chat/willing/mode_mxp.py b/src/chat/normal_chat/willing/mode_mxp.py index 89f6b4757..7b9e55568 100644 --- a/src/chat/normal_chat/willing/mode_mxp.py +++ b/src/chat/normal_chat/willing/mode_mxp.py @@ -171,7 +171,7 @@ class MxpWillingManager(BaseWillingManager): self.logger.debug("进行中消息惩罚:归0") probability = self._willing_to_probability(current_willing) - + self.temporary_willing = current_willing return probability diff --git a/src/chat/normal_chat/willing/willing_manager.py b/src/chat/normal_chat/willing/willing_manager.py index 9a7ce4857..0fa701f94 100644 --- a/src/chat/normal_chat/willing/willing_manager.py +++ b/src/chat/normal_chat/willing/willing_manager.py @@ -136,7 +136,6 @@ class BaseWillingManager(ABC): """抽象方法:获取回复概率""" raise NotImplementedError - async def get_willing(self, chat_id: str): """获取指定聊天流的回复意愿""" async with self.lock: diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 3cfeecf51..7dc63089b 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -285,7 +285,6 @@ class NormalChatConfig(ConfigBase): response_interested_rate_amplifier: float = 1.0 """回复兴趣度放大系数""" - mentioned_bot_inevitable_reply: bool = False """提及 bot 必然回复""" From e0ce27f745ce4f3c3ad1ea75777a0bd44d15a344 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 23:19:36 +0800 Subject: [PATCH 26/63] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E6=96=87=E4=BB=B6=E7=89=88=E6=9C=AC=E5=8F=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 5 +- .../observation/chatting_observation.py | 37 ++---- src/config/config.py | 2 +- src/plugins/built_in/core_actions/plugin.py | 124 ------------------ 4 files changed, 14 insertions(+), 154 deletions(-) diff --git a/changelogs/changelog.md b/changelogs/changelog.md index bef8ab146..ce411dd16 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -1,6 +1,6 @@ # Changelog -## [0.8.1] - 2025-6-27 +## [0.8.1] - 2025-7-5 功能更新: @@ -21,9 +21,6 @@ - 修复表达器无法读取原始文本 - 修复normal planner没有超时退出问题 - - - ## [0.8.0] - 2025-6-27 MaiBot 0.8.0 现已推出! diff --git a/src/chat/heart_flow/observation/chatting_observation.py b/src/chat/heart_flow/observation/chatting_observation.py index 1a41ede1f..d96d62649 100644 --- a/src/chat/heart_flow/observation/chatting_observation.py +++ b/src/chat/heart_flow/observation/chatting_observation.py @@ -16,7 +16,7 @@ logger = get_logger("observation") # 定义提示模板 Prompt( - """这是qq群聊的聊天记录,请总结以下聊天记录的主题: + """这是{chat_type_description},请总结以下聊天记录的主题: {chat_logs} 请概括这段聊天记录的主题和主要内容 主题:简短的概括,包括时间,人物和事件,不要超过20个字 @@ -28,22 +28,7 @@ Prompt( "content": "内容,可以是对聊天记录的概括,也可以是聊天记录的详细内容" }} """, - "chat_summary_group_prompt", # Template for group chat -) - -Prompt( - """这是你和{chat_target}的私聊记录,请总结以下聊天记录的主题: -{chat_logs} -请用一句话概括,包括事件,时间,和主要信息,不要分点。 -主题:简短的介绍,不要超过10个字 -内容:包括人物、事件和主要信息,不要分点。 - -请用json格式返回,格式如下: -{{ - "theme": "主题", - "content": "内容" -}}""", - "chat_summary_private_prompt", # Template for private chat + "chat_summary_prompt", ) @@ -132,11 +117,10 @@ class ChattingObservation(Observation): ) # 根据聊天类型选择提示模板 + prompt_template_name = "chat_summary_prompt" if self.is_group_chat: - prompt_template_name = "chat_summary_group_prompt" - prompt = await global_prompt_manager.format_prompt(prompt_template_name, chat_logs=oldest_messages_str) + chat_type_description = "qq群聊的聊天记录" else: - prompt_template_name = "chat_summary_private_prompt" chat_target_name = "对方" if self.chat_target_info: chat_target_name = ( @@ -144,11 +128,14 @@ class ChattingObservation(Observation): or self.chat_target_info.get("user_nickname") or chat_target_name ) - prompt = await global_prompt_manager.format_prompt( - prompt_template_name, - chat_target=chat_target_name, - chat_logs=oldest_messages_str, - ) + chat_type_description = f"你和{chat_target_name}的私聊记录" + + prompt = await global_prompt_manager.format_prompt( + prompt_template_name, + chat_type_description=chat_type_description, + chat_logs=oldest_messages_str, + ) + self.compressor_prompt = prompt diff --git a/src/config/config.py b/src/config/config.py index 9beeed6ba..33561c48f 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -50,7 +50,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template") # 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/ -MMC_VERSION = "0.8.1-snapshot.1" +MMC_VERSION = "0.8.1" def update_config(): diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index cb469ae87..217405c0e 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -206,127 +206,3 @@ class CoreActionsPlugin(BasePlugin): # components.append((DeepReplyAction.get_action_info(), DeepReplyAction)) return components - - -# class DeepReplyAction(BaseAction): -# """回复动作 - 参与聊天回复""" - -# # 激活设置 -# focus_activation_type = ActionActivationType.ALWAYS -# normal_activation_type = ActionActivationType.NEVER -# mode_enable = ChatMode.FOCUS -# parallel_action = False - -# # 动作基本信息 -# action_name = "deep_reply" -# action_description = "参与聊天回复,关注某个话题,对聊天内容进行深度思考,给出回复" - -# # 动作参数定义 -# action_parameters = { -# "topic": "想要思考的话题" -# } - -# # 动作使用场景 -# action_require = ["有些问题需要深度思考", "某个问题可能涉及多个方面", "某个问题涉及专业领域或者需要专业知识","这个问题讨论的很激烈,需要深度思考"] - -# # 关联类型 -# associated_types = ["text"] - -# async def execute(self) -> Tuple[bool, str]: -# """执行回复动作""" -# logger.info(f"{self.log_prefix} 决定深度思考") - -# try: -# # 获取聊天观察 -# chatting_observation = self._get_chatting_observation() -# if not chatting_observation: -# return False, "未找到聊天观察" - -# talking_message_str = chatting_observation.talking_message_str - -# # 处理回复目标 -# chat_stream = self.api.get_service("chat_stream") -# anchor_message = await create_empty_anchor_message(chat_stream.platform, chat_stream.group_info, chat_stream) - - -# llm_model = self.api.get_available_models().replyer_1 - -# prompt = f""" -# {talking_message_str} - -# 在上面的聊天中,你对{self.action_data.get("topic", "")}感兴趣,形成深刻观点,请你思考,总结成一份学术论文,APA标准格式 -# """ - -# success, response, reasoning, model_name = await self.api.generate_with_model(prompt, llm_model) - -# print(prompt) -# print(f"DeepReplyAction: {response}") - -# # prompt = f""" -# # {talking_message_str} - -# # 在上面的聊天中,你对{self.action_data.get("topic", "")}感兴趣,请你思考 -# # """ - -# extra_info_block = self.action_data.get("extra_info_block", "") -# extra_info_block += response -# # extra_info_block += f"\n--------------------------------\n注意,这是最重要的内容!!!!!你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以下方聊天记录的回复要求不再适用,请你自由的表达,不论字数长短限制\n\n--------------------------------\n注意,你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以下方聊天记录的回复要求不再适用,请你自由的表达,不论字数长短限制\n" -# # extra_info_block += f"\n--------------------------------\n注意,优先关注这句!!!!你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以下方聊天记录的回复要求不再适用,请你自由的表达,不论字数长短限制\n\n--------------------------------\n注意,你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以其他的回复要求不再适用,请你自由的表达,不论字数长短限制\n" -# self.action_data["extra_info_block"] = extra_info_block - - -# # 获取回复器服务 -# # replyer = self.api.get_service("replyer") -# # if not replyer: -# # logger.error(f"{self.log_prefix} 未找到回复器服务") -# # return False, "回复器服务不可用" - -# # await self.send_message_by_expressor(extra_info_block) -# await self.send_text(extra_info_block) -# # 执行回复 -# # success, reply_set = await replyer.deal_reply( -# # cycle_timers=self.cycle_timers, -# # action_data=self.action_data, -# # anchor_message=anchor_message, -# # reasoning=self.reasoning, -# # thinking_id=self.thinking_id, -# # ) - -# # 构建回复文本 -# reply_text = "self._build_reply_text(reply_set)" - -# # 存储动作记录 -# await self.api.store_action_info( -# action_build_into_prompt=False, -# action_prompt_display=reply_text, -# action_done=True, -# thinking_id=self.thinking_id, -# action_data=self.action_data, -# ) - -# # 重置NoReplyAction的连续计数器 -# NoReplyAction.reset_consecutive_count() - -# return success, reply_text - -# except Exception as e: -# logger.error(f"{self.log_prefix} 回复动作执行失败: {e}") -# return False, f"回复失败: {str(e)}" - -# def _get_chatting_observation(self) -> Optional[ChattingObservation]: -# """获取聊天观察对象""" -# observations = self.api.get_service("observations") or [] -# for obs in observations: -# if isinstance(obs, ChattingObservation): -# return obs -# return None - - -# def _build_reply_text(self, reply_set) -> str: -# """构建回复文本""" -# reply_text = "" -# if reply_set: -# for reply in reply_set: -# data = reply[1] -# reply_text += data -# return reply_text From 88e09642551398d51c5a4421c43759e2aa805f66 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 5 Jul 2025 15:25:35 +0000 Subject: [PATCH 27/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/heart_flow/observation/chatting_observation.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/chat/heart_flow/observation/chatting_observation.py b/src/chat/heart_flow/observation/chatting_observation.py index d96d62649..2a4a42856 100644 --- a/src/chat/heart_flow/observation/chatting_observation.py +++ b/src/chat/heart_flow/observation/chatting_observation.py @@ -129,14 +129,13 @@ class ChattingObservation(Observation): or chat_target_name ) chat_type_description = f"你和{chat_target_name}的私聊记录" - + prompt = await global_prompt_manager.format_prompt( prompt_template_name, chat_type_description=chat_type_description, chat_logs=oldest_messages_str, ) - self.compressor_prompt = prompt # 构建当前消息 From f67192de8372ee9e70872e6beb2a261978f3bf20 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 5 Jul 2025 23:39:19 +0800 Subject: [PATCH 28/63] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E5=9C=A8au?= =?UTF-8?q?to=E6=A8=A1=E5=BC=8F=E4=B8=8B=EF=BC=8C=E7=A7=81=E8=81=8A?= =?UTF-8?q?=E4=BC=9A=E8=BD=AC=E4=B8=BAnormal=E7=9A=84bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 6 ++++++ src/chat/focus_chat/heartFC_chat.py | 9 +++++++++ src/config/config.py | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/changelogs/changelog.md b/changelogs/changelog.md index ce411dd16..8be62ac08 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -1,5 +1,11 @@ # Changelog +## [0.8.2] - 2025-7-5 + +优化和修复: + +- 修复在auto模式下,私聊会转为normal的bug + ## [0.8.1] - 2025-7-5 功能更新: diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index a538d9459..d8d9fe0e0 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -307,6 +307,15 @@ class HeartFChatting: if loop_info["loop_action_info"]["command"] == "stop_focus_chat": logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天") + + # 如果是私聊,则不停止,而是重置疲劳度并继续 + if not self.chat_stream.group_info: + logger.info( + f"{self.log_prefix} 私聊模式下收到停止请求,不退出。" + ) + continue # 继续下一次循环,而不是退出 + + # 如果是群聊,则执行原来的停止逻辑 # 如果设置了回调函数,则调用它 if self.on_stop_focus_chat: try: diff --git a/src/config/config.py b/src/config/config.py index 33561c48f..641353809 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -50,7 +50,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template") # 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/ -MMC_VERSION = "0.8.1" +MMC_VERSION = "0.8.2-snapshot.1" def update_config(): From 61e708fe86bab85ce3cac0a8593c24bbc6e7da96 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 5 Jul 2025 15:40:11 +0000 Subject: [PATCH 29/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index d8d9fe0e0..bd4d86aa8 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -310,9 +310,7 @@ class HeartFChatting: # 如果是私聊,则不停止,而是重置疲劳度并继续 if not self.chat_stream.group_info: - logger.info( - f"{self.log_prefix} 私聊模式下收到停止请求,不退出。" - ) + logger.info(f"{self.log_prefix} 私聊模式下收到停止请求,不退出。") continue # 继续下一次循环,而不是退出 # 如果是群聊,则执行原来的停止逻辑 From b5698f2ede3a3ca2f615281601e517ad960abb90 Mon Sep 17 00:00:00 2001 From: A0000Xz <122650088+A0000Xz@users.noreply.github.com> Date: Sun, 6 Jul 2025 02:47:06 +0800 Subject: [PATCH 30/63] =?UTF-8?q?=E8=AE=A9=E9=BA=A6=E9=BA=A6=E8=87=AA?= =?UTF-8?q?=E5=B7=B1=E5=8F=91=E7=9A=84=E5=9B=BE=E7=89=87=E4=B9=9F=E8=83=BD?= =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96picid=E7=AD=89=E5=AD=97=E6=AE=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/utils/utils_image.py | 54 ++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py index 25b753bab..eed65ad88 100644 --- a/src/chat/utils/utils_image.py +++ b/src/chat/utils/utils_image.py @@ -178,12 +178,24 @@ class ImageManager: """获取普通图片描述,带查重和保存功能""" try: # 计算图片哈希 - # 确保base64字符串只包含ASCII字符 if isinstance(image_base64, str): image_base64 = image_base64.encode("ascii", errors="ignore").decode("ascii") image_bytes = base64.b64decode(image_base64) image_hash = hashlib.md5(image_bytes).hexdigest() - image_format = Image.open(io.BytesIO(image_bytes)).format.lower() + + # 检查图片是否已存在 + existing_image = Images.get_or_none(Images.emoji_hash == image_hash) + if existing_image: + # 更新计数 + if hasattr(existing_image, 'count') and existing_image.count is not None: + existing_image.count += 1 + else: + existing_image.count = 1 + existing_image.save() + + # 如果已有描述,直接返回 + if existing_image.description: + return f"[图片:{existing_image.description}]" # 查询缓存的描述 cached_description = self._get_description_from_db(image_hash, "image") @@ -192,6 +204,7 @@ class ImageManager: return f"[图片:{cached_description}]" # 调用AI获取描述 + image_format = Image.open(io.BytesIO(image_bytes)).format.lower() prompt = "请用中文描述这张图片的内容。如果有文字,请把文字都描述出来,请留意其主题,直观感受,输出为一段平文本,最多50字" description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format) @@ -199,17 +212,7 @@ class ImageManager: logger.warning("AI未能生成图片描述") return "[图片(描述生成失败)]" - # 再次检查缓存 - cached_description = self._get_description_from_db(image_hash, "image") - if cached_description: - logger.warning(f"虽然生成了描述,但是找到缓存图片描述 {cached_description}") - return f"[图片:{cached_description}]" - - logger.debug(f"描述是{description}") - - # 根据配置决定是否保存图片 - - # 生成文件名和路径 + # 保存图片和描述 current_timestamp = time.time() filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}" image_dir = os.path.join(self.IMAGE_DIR, "image") @@ -221,26 +224,31 @@ class ImageManager: with open(file_path, "wb") as f: f.write(image_bytes) - # 保存到数据库 (Images表) - try: - img_obj = Images.get((Images.emoji_hash == image_hash) & (Images.type == "image")) - img_obj.path = file_path - img_obj.description = description - img_obj.timestamp = current_timestamp - img_obj.save() - except Images.DoesNotExist: + # 保存到数据库,补充缺失字段 + if existing_image: + existing_image.path = file_path + existing_image.description = description + existing_image.timestamp = current_timestamp + if not hasattr(existing_image, 'image_id') or not existing_image.image_id: + existing_image.image_id = str(uuid.uuid4()) + if not hasattr(existing_image, 'vlm_processed') or existing_image.vlm_processed is None: + existing_image.vlm_processed = True + existing_image.save() + else: Images.create( + image_id=str(uuid.uuid4()), emoji_hash=image_hash, path=file_path, type="image", description=description, timestamp=current_timestamp, + vlm_processed=True, + count=1, ) - logger.debug(f"保存图片元数据: {file_path}") except Exception as e: logger.error(f"保存图片文件或元数据失败: {str(e)}") - # 保存描述到数据库 (ImageDescriptions表) + # 保存描述到ImageDescriptions表 self._save_description_to_db(image_hash, description, "image") return f"[图片:{description}]" From ed1d21ad671a658239cfe23d5bb5f29245e55914 Mon Sep 17 00:00:00 2001 From: A0000Xz <122650088+A0000Xz@users.noreply.github.com> Date: Sun, 6 Jul 2025 02:50:39 +0800 Subject: [PATCH 31/63] =?UTF-8?q?=E8=AE=A9=E9=BA=A6=E9=BA=A6=E8=87=AA?= =?UTF-8?q?=E5=B7=B1=E5=8F=91=E7=9A=84=E5=9B=BE=E7=89=87=E5=9C=A8=E6=95=B0?= =?UTF-8?q?=E6=8D=AE=E5=BA=93=E5=86=85=E4=B8=8D=E4=BB=A5=E6=96=87=E6=9C=AC?= =?UTF-8?q?=E6=8F=8F=E8=BF=B0=E5=AD=98=E5=82=A8=E8=80=8C=E6=98=AF=E4=BB=A5?= =?UTF-8?q?[picid:]=E6=A0=BC=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/message_receive/storage.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/src/chat/message_receive/storage.py b/src/chat/message_receive/storage.py index 862354db7..23afe6c87 100644 --- a/src/chat/message_receive/storage.py +++ b/src/chat/message_receive/storage.py @@ -4,7 +4,7 @@ from typing import Union # from ...common.database.database import db # db is now Peewee's SqliteDatabase instance from .message import MessageSending, MessageRecv from .chat_stream import ChatStream -from ...common.database.database_model import Messages, RecalledMessages # Import Peewee models +from ...common.database.database_model import Messages, RecalledMessages, Images # Import Peewee models from src.common.logger import get_logger logger = get_logger("message_storage") @@ -25,6 +25,7 @@ class MessageStorage: # print(processed_plain_text) if processed_plain_text: + processed_plain_text = MessageStorage.replace_image_descriptions(processed_plain_text) filtered_processed_plain_text = re.sub(pattern, "", processed_plain_text, flags=re.DOTALL) else: filtered_processed_plain_text = "" @@ -136,3 +137,28 @@ class MessageStorage: except Exception as e: logger.error(f"更新消息ID失败: {e}") + + @staticmethod + def replace_image_descriptions(text: str) -> str: + """将[图片:描述]替换为[picid:image_id]""" + # 先检查文本中是否有图片标记 + pattern = r'\[图片:([^\]]+)\]' + matches = re.findall(pattern, text) + + if not matches: + logger.debug("文本中没有图片标记,直接返回原文本") + return text + def replace_match(match): + description = match.group(1).strip() + try: + image_record = (Images.select() + .where(Images.description == description) + .order_by(Images.timestamp.desc()) + .first()) + if image_record: + return f"[picid:{image_record.image_id}]" + else: + return match.group(0) # 保持原样 + except Exception as e: + return match.group(0) + return re.sub(r'\[图片:([^\]]+)\]', replace_match, text) From 869a02d232221d26bb16f54396d580e649b01d44 Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:10:14 +0800 Subject: [PATCH 32/63] Update planner_simple.py --- src/chat/focus_chat/planners/planner_simple.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/focus_chat/planners/planner_simple.py index 20f41c711..05c57dcf6 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/focus_chat/planners/planner_simple.py @@ -29,6 +29,11 @@ def init_prompt(): {chat_context_description},以下是具体的聊天内容: {chat_content_block} {moderation_prompt} + +重要提醒:避免重复回复同一话题 +- 如果你在最近1分钟内已经对某个话题进行了回复,不要再次回复相同或相似的内容 +- 如果聊天记录显示你刚刚已经回复过相似内容,即使话题仍然在进行,也应该选择no_reply + 现在请你根据聊天内容选择合适的action: {action_options_text} From 54516bc8731227852db9acd075419798ee2ea3fe Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:19:13 +0800 Subject: [PATCH 33/63] Update planner_simple.py --- src/chat/focus_chat/planners/planner_simple.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/focus_chat/planners/planner_simple.py index 05c57dcf6..0b7aa96e7 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/focus_chat/planners/planner_simple.py @@ -30,9 +30,7 @@ def init_prompt(): {chat_content_block} {moderation_prompt} -重要提醒:避免重复回复同一话题 -- 如果你在最近1分钟内已经对某个话题进行了回复,不要再次回复相同或相似的内容 -- 如果聊天记录显示你刚刚已经回复过相似内容,即使话题仍然在进行,也应该选择no_reply +重要提醒:如果聊天记录显示你刚刚已经回复过相似内容,即使话题仍然在进行,必须选择no_reply 现在请你根据聊天内容选择合适的action: From 2a0dfb7642209723628e0d205e59847f06a3f3d0 Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:25:15 +0800 Subject: [PATCH 34/63] Update working_memory_processor.py --- .../working_memory_processor.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/focus_chat/info_processors/working_memory_processor.py index 2de0bcfab..f81833c00 100644 --- a/src/chat/focus_chat/info_processors/working_memory_processor.py +++ b/src/chat/focus_chat/info_processors/working_memory_processor.py @@ -71,6 +71,7 @@ class WorkingMemoryProcessor(BaseProcessor): """ working_memory = None chat_info = "" + chat_obs = None try: for observation in observations: if isinstance(observation, WorkingMemoryObservation): @@ -79,10 +80,15 @@ class WorkingMemoryProcessor(BaseProcessor): chat_info = observation.get_observe_info() chat_obs = observation # 检查是否有待压缩内容 - if chat_obs.compressor_prompt: + if chat_obs and chat_obs.compressor_prompt: logger.debug(f"{self.log_prefix} 压缩聊天记忆") await self.compress_chat_memory(working_memory, chat_obs) + # 检查working_memory是否为None + if working_memory is None: + logger.debug(f"{self.log_prefix} 没有找到工作记忆观察,跳过处理") + return [] + all_memory = working_memory.get_all_memories() if not all_memory: logger.debug(f"{self.log_prefix} 目前没有工作记忆,跳过提取") @@ -183,6 +189,11 @@ class WorkingMemoryProcessor(BaseProcessor): working_memory: 工作记忆对象 obs: 聊天观察对象 """ + # 检查working_memory是否为None + if working_memory is None: + logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法压缩聊天记忆") + return + try: summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt) if not summary_result: @@ -235,6 +246,11 @@ class WorkingMemoryProcessor(BaseProcessor): memory_id1: 第一个记忆ID memory_id2: 第二个记忆ID """ + # 检查working_memory是否为None + if working_memory is None: + logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法合并记忆") + return + try: merged_memory = await working_memory.merge_memory(memory_id1, memory_id2) logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}") From c0de1fcc3f130f4e428c1da54288530100141cf8 Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:25:37 +0800 Subject: [PATCH 35/63] Update knowledge_lib.py --- src/chat/knowledge/knowledge_lib.py | 108 +++++++++++++++------------- 1 file changed, 58 insertions(+), 50 deletions(-) diff --git a/src/chat/knowledge/knowledge_lib.py b/src/chat/knowledge/knowledge_lib.py index 6a4fcd4ea..a9d603b9f 100644 --- a/src/chat/knowledge/knowledge_lib.py +++ b/src/chat/knowledge/knowledge_lib.py @@ -5,60 +5,68 @@ from src.chat.knowledge.mem_active_manager import MemoryActiveManager from src.chat.knowledge.qa_manager import QAManager from src.chat.knowledge.kg_manager import KGManager from src.chat.knowledge.global_logger import logger +from src.config.config import global_config as bot_global_config # try: # import quick_algo # except ImportError: # print("quick_algo not found, please install it first") -logger.info("正在初始化Mai-LPMM\n") -logger.info("创建LLM客户端") -llm_client_list = dict() -for key in global_config["llm_providers"]: - llm_client_list[key] = LLMClient( - global_config["llm_providers"][key]["base_url"], - global_config["llm_providers"][key]["api_key"], +# 检查LPMM知识库是否启用 +if bot_global_config.lpmm_knowledge.enable: + logger.info("正在初始化Mai-LPMM\n") + logger.info("创建LLM客户端") + llm_client_list = dict() + for key in global_config["llm_providers"]: + llm_client_list[key] = LLMClient( + global_config["llm_providers"][key]["base_url"], + global_config["llm_providers"][key]["api_key"], + ) + + # 初始化Embedding库 + embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]]) + logger.info("正在从文件加载Embedding库") + try: + embed_manager.load_from_file() + except Exception as e: + logger.warning("此消息不会影响正常使用:从文件加载Embedding库时,{}".format(e)) + # logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误") + logger.info("Embedding库加载完成") + # 初始化KG + kg_manager = KGManager() + logger.info("正在从文件加载KG") + try: + kg_manager.load_from_file() + except Exception as e: + logger.warning("此消息不会影响正常使用:从文件加载KG时,{}".format(e)) + # logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误") + logger.info("KG加载完成") + + logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}") + logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}") + + + # 数据比对:Embedding库与KG的段落hash集合 + for pg_hash in kg_manager.stored_paragraph_hashes: + key = PG_NAMESPACE + "-" + pg_hash + if key not in embed_manager.stored_pg_hashes: + logger.warning(f"KG中存在Embedding库中不存在的段落:{key}") + + # 问答系统(用于知识库) + qa_manager = QAManager( + embed_manager, + kg_manager, + llm_client_list[global_config["embedding"]["provider"]], + llm_client_list[global_config["qa"]["llm"]["provider"]], + llm_client_list[global_config["qa"]["llm"]["provider"]], ) -# 初始化Embedding库 -embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]]) -logger.info("正在从文件加载Embedding库") -try: - embed_manager.load_from_file() -except Exception as e: - logger.warning("此消息不会影响正常使用:从文件加载Embedding库时,{}".format(e)) - # logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误") -logger.info("Embedding库加载完成") -# 初始化KG -kg_manager = KGManager() -logger.info("正在从文件加载KG") -try: - kg_manager.load_from_file() -except Exception as e: - logger.warning("此消息不会影响正常使用:从文件加载KG时,{}".format(e)) - # logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误") -logger.info("KG加载完成") - -logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}") -logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}") - - -# 数据比对:Embedding库与KG的段落hash集合 -for pg_hash in kg_manager.stored_paragraph_hashes: - key = PG_NAMESPACE + "-" + pg_hash - if key not in embed_manager.stored_pg_hashes: - logger.warning(f"KG中存在Embedding库中不存在的段落:{key}") - -# 问答系统(用于知识库) -qa_manager = QAManager( - embed_manager, - kg_manager, - llm_client_list[global_config["embedding"]["provider"]], - llm_client_list[global_config["qa"]["llm"]["provider"]], - llm_client_list[global_config["qa"]["llm"]["provider"]], -) - -# 记忆激活(用于记忆库) -inspire_manager = MemoryActiveManager( - embed_manager, - llm_client_list[global_config["embedding"]["provider"]], -) + # 记忆激活(用于记忆库) + inspire_manager = MemoryActiveManager( + embed_manager, + llm_client_list[global_config["embedding"]["provider"]], + ) +else: + logger.info("LPMM知识库已禁用,跳过初始化") + # 创建空的占位符对象,避免导入错误 + qa_manager = None + inspire_manager = None From 9c0271b10f8f83277c662d36f4b805e3ee4f0141 Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:25:51 +0800 Subject: [PATCH 36/63] Update default_generator.py --- src/chat/replyer/default_generator.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index da9d9a584..1f0b438dc 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -956,6 +956,11 @@ async def get_prompt_info(message: str, threshold: float): logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") # 从LPMM知识库获取知识 try: + # 检查LPMM知识库是否启用 + if qa_manager is None: + logger.debug("LPMM知识库已禁用,跳过知识获取") + return "" + found_knowledge_from_lpmm = qa_manager.get_knowledge(message) end_time = time.time() From cb5fa45523ceb79e142e31f8dee6160c4e6e0bac Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:26:07 +0800 Subject: [PATCH 37/63] Update pfc_KnowledgeFetcher.py --- src/experimental/PFC/pfc_KnowledgeFetcher.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/experimental/PFC/pfc_KnowledgeFetcher.py b/src/experimental/PFC/pfc_KnowledgeFetcher.py index 38a6dafb9..c52533ea6 100644 --- a/src/experimental/PFC/pfc_KnowledgeFetcher.py +++ b/src/experimental/PFC/pfc_KnowledgeFetcher.py @@ -35,6 +35,11 @@ class KnowledgeFetcher: logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识") try: + # 检查LPMM知识库是否启用 + if qa_manager is None: + logger.debug(f"[私聊][{self.private_name}]LPMM知识库已禁用,跳过知识获取") + return "未找到匹配的知识" + knowledge_info = qa_manager.get_knowledge(query) logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}") return knowledge_info From 1da67ae067831714c6ff25b8a3ff70ea4d6c6cc1 Mon Sep 17 00:00:00 2001 From: "CNMr.Sunshine" <61444298+CNMrSunshine@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:26:22 +0800 Subject: [PATCH 38/63] Update lpmm_get_knowledge.py --- src/tools/not_using/lpmm_get_knowledge.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/tools/not_using/lpmm_get_knowledge.py b/src/tools/not_using/lpmm_get_knowledge.py index df4fa6a4f..80b9b617b 100644 --- a/src/tools/not_using/lpmm_get_knowledge.py +++ b/src/tools/not_using/lpmm_get_knowledge.py @@ -36,6 +36,11 @@ class SearchKnowledgeFromLPMMTool(BaseTool): query = function_args.get("query") # threshold = function_args.get("threshold", 0.4) + # 检查LPMM知识库是否启用 + if qa_manager is None: + logger.debug("LPMM知识库已禁用,跳过知识获取") + return {"type": "info", "id": query, "content": "LPMM知识库已禁用"} + # 调用知识库搜索 knowledge_info = qa_manager.get_knowledge(query) From f624547034cf7c9f6e6f5bef57a78117bd333e92 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 11:40:12 +0800 Subject: [PATCH 39/63] Update planner_simple.py --- src/chat/focus_chat/planners/planner_simple.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/focus_chat/planners/planner_simple.py index 0b7aa96e7..3d044d6e7 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/focus_chat/planners/planner_simple.py @@ -30,8 +30,6 @@ def init_prompt(): {chat_content_block} {moderation_prompt} -重要提醒:如果聊天记录显示你刚刚已经回复过相似内容,即使话题仍然在进行,必须选择no_reply - 现在请你根据聊天内容选择合适的action: {action_options_text} From 871d3ee7450e6233bcd7f56182358121c3b50d01 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 03:42:36 +0000 Subject: [PATCH 40/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../focus_chat/info_processors/working_memory_processor.py | 4 ++-- src/chat/knowledge/knowledge_lib.py | 1 - src/chat/replyer/default_generator.py | 2 +- src/experimental/PFC/pfc_KnowledgeFetcher.py | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/focus_chat/info_processors/working_memory_processor.py index f81833c00..abe9786d4 100644 --- a/src/chat/focus_chat/info_processors/working_memory_processor.py +++ b/src/chat/focus_chat/info_processors/working_memory_processor.py @@ -193,7 +193,7 @@ class WorkingMemoryProcessor(BaseProcessor): if working_memory is None: logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法压缩聊天记忆") return - + try: summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt) if not summary_result: @@ -250,7 +250,7 @@ class WorkingMemoryProcessor(BaseProcessor): if working_memory is None: logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法合并记忆") return - + try: merged_memory = await working_memory.merge_memory(memory_id1, memory_id2) logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}") diff --git a/src/chat/knowledge/knowledge_lib.py b/src/chat/knowledge/knowledge_lib.py index a9d603b9f..5540d95e2 100644 --- a/src/chat/knowledge/knowledge_lib.py +++ b/src/chat/knowledge/knowledge_lib.py @@ -44,7 +44,6 @@ if bot_global_config.lpmm_knowledge.enable: logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}") logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}") - # 数据比对:Embedding库与KG的段落hash集合 for pg_hash in kg_manager.stored_paragraph_hashes: key = PG_NAMESPACE + "-" + pg_hash diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 1f0b438dc..1fec8646e 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -960,7 +960,7 @@ async def get_prompt_info(message: str, threshold: float): if qa_manager is None: logger.debug("LPMM知识库已禁用,跳过知识获取") return "" - + found_knowledge_from_lpmm = qa_manager.get_knowledge(message) end_time = time.time() diff --git a/src/experimental/PFC/pfc_KnowledgeFetcher.py b/src/experimental/PFC/pfc_KnowledgeFetcher.py index c52533ea6..a1d161a70 100644 --- a/src/experimental/PFC/pfc_KnowledgeFetcher.py +++ b/src/experimental/PFC/pfc_KnowledgeFetcher.py @@ -39,7 +39,7 @@ class KnowledgeFetcher: if qa_manager is None: logger.debug(f"[私聊][{self.private_name}]LPMM知识库已禁用,跳过知识获取") return "未找到匹配的知识" - + knowledge_info = qa_manager.get_knowledge(query) logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}") return knowledge_info From b69be93e8ea6cbdfdfa886d2af86427f3bb28af3 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 11:47:03 +0800 Subject: [PATCH 41/63] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E8=BF=87?= =?UTF-8?q?=E6=BB=A4=E6=AC=A1=E5=BA=8F=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/message_receive/bot.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/chat/message_receive/bot.py b/src/chat/message_receive/bot.py index 7227a929d..faa79ffba 100644 --- a/src/chat/message_receive/bot.py +++ b/src/chat/message_receive/bot.py @@ -189,6 +189,9 @@ class ChatBot: ) message.update_chat_stream(chat) + + # 处理消息内容,生成纯文本 + await message.process() # 过滤检查 if _check_ban_words(message.processed_plain_text, chat, user_info) or _check_ban_regex( @@ -196,8 +199,6 @@ class ChatBot: ): return - # 处理消息内容,生成纯文本 - await message.process() # 命令处理 - 使用新插件系统检查并处理命令 is_command, cmd_result, continue_process = await self._process_commands_with_new_system(message) From 0e485f4680bd9cb29292c1fae465f9882c252ca7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 03:47:36 +0000 Subject: [PATCH 42/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/message_receive/bot.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/chat/message_receive/bot.py b/src/chat/message_receive/bot.py index faa79ffba..6126fc751 100644 --- a/src/chat/message_receive/bot.py +++ b/src/chat/message_receive/bot.py @@ -189,7 +189,7 @@ class ChatBot: ) message.update_chat_stream(chat) - + # 处理消息内容,生成纯文本 await message.process() @@ -199,7 +199,6 @@ class ChatBot: ): return - # 命令处理 - 使用新插件系统检查并处理命令 is_command, cmd_result, continue_process = await self._process_commands_with_new_system(message) From b3a93d16e61585e97eb5b666b68b3fb8c874671c Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 17:02:36 +0800 Subject: [PATCH 43/63] =?UTF-8?q?fix=20-=20=E4=BC=98=E5=8C=96normal=5Fchat?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=EF=BC=8C=E9=87=87=E7=94=A8=E5=92=8Cfocus?= =?UTF-8?q?=E4=B8=80=E8=87=B4=E7=9A=84=E5=85=B3=E7=B3=BB=E6=9E=84=E5=BB=BA?= =?UTF-8?q?=EF=BC=8C=E4=BC=98=E5=8C=96log=EF=BC=8C=E6=B7=BB=E5=8A=A0?= =?UTF-8?q?=E8=B6=85=E6=97=B6=E6=A3=80=E6=9F=A5=EF=BC=8C=E5=85=81=E8=AE=B8?= =?UTF-8?q?normal=E4=BD=BF=E7=94=A8llm=E6=BF=80=E6=B4=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 5 + src/chat/express/exprssion_learner.py | 4 +- src/chat/focus_chat/heartFC_chat.py | 76 +- src/chat/focus_chat/memory_activator.py | 4 +- .../focus_chat/planners/planner_factory.py | 45 -- .../focus_chat/planners/planner_simple.py | 15 +- src/chat/memory_system/Hippocampus.py | 4 +- src/chat/normal_chat/normal_chat.py | 684 ++++-------------- .../normal_chat_action_modifier.py | 111 ++- src/chat/normal_chat/normal_chat_generator.py | 72 -- src/chat/normal_chat/normal_chat_planner.py | 8 +- src/chat/replyer/default_generator.py | 49 +- src/chat/replyer/replyer_manager.py | 2 - src/chat/utils/utils.py | 4 +- src/common/logger.py | 9 +- src/config/official_configs.py | 5 +- .../relationship_builder_manager.py | 4 +- src/person_info/relationship_fetcher.py | 10 +- src/plugin_system/apis/database_api.py | 2 +- src/plugin_system/apis/generator_api.py | 13 +- src/plugin_system/apis/send_api.py | 2 +- src/plugins/built_in/core_actions/no_reply.py | 4 +- src/plugins/built_in/core_actions/plugin.py | 24 +- src/tools/tool_executor.py | 5 +- template/bot_config_template.toml | 12 +- 25 files changed, 378 insertions(+), 795 deletions(-) delete mode 100644 src/chat/focus_chat/planners/planner_factory.py delete mode 100644 src/chat/normal_chat/normal_chat_generator.py diff --git a/changelogs/changelog.md b/changelogs/changelog.md index 8be62ac08..41c760e85 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -5,6 +5,11 @@ 优化和修复: - 修复在auto模式下,私聊会转为normal的bug +- 修复一般过滤次序问题 +- 优化normal_chat代码,采用和focus一致的关系构建 +- 优化计时信息和Log +- 添加回复超时检查 +- normal的插件允许llm激活 ## [0.8.1] - 2025-7-5 diff --git a/src/chat/express/exprssion_learner.py b/src/chat/express/exprssion_learner.py index 9fcb69687..9b170d9a3 100644 --- a/src/chat/express/exprssion_learner.py +++ b/src/chat/express/exprssion_learner.py @@ -29,7 +29,7 @@ def init_prompt() -> None: 4. 思考有没有特殊的梗,一并总结成语言风格 5. 例子仅供参考,请严格根据群聊内容总结!!! 注意:总结成如下格式的规律,总结的内容要详细,但具有概括性: -当"xxxxxx"时,可以"xxxxxx", xxxxxx不超过20个字,为特定句式或表达 +例如:当"AAAAA"时,可以"BBBBB", AAAAA代表某个具体的场景,不超过20个字。BBBBB代表对应的语言风格,特定句式或表达方式,不超过20个字。 例如: 当"对某件事表示十分惊叹,有些意外"时,使用"我嘞个xxxx" @@ -69,7 +69,7 @@ class ExpressionLearner: # TODO: API-Adapter修改标记 self.express_learn_model: LLMRequest = LLMRequest( model=global_config.model.replyer_1, - temperature=0.2, + temperature=0.3, request_type="expressor.learner", ) self.llm_model = None diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index bd4d86aa8..1009edde5 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -21,7 +21,7 @@ from src.chat.heart_flow.observation.actions_observation import ActionObservatio from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.info_processors.base_processor import BaseProcessor -from src.chat.focus_chat.planners.planner_factory import PlannerFactory +from src.chat.focus_chat.planners.planner_simple import ActionPlanner from src.chat.focus_chat.planners.modify_actions import ActionModifier from src.chat.focus_chat.planners.action_manager import ActionManager from src.config.config import global_config @@ -119,7 +119,7 @@ class HeartFChatting: self._register_default_processors() self.action_manager = ActionManager() - self.action_planner = PlannerFactory.create_planner( + self.action_planner = ActionPlanner( log_prefix=self.log_prefix, action_manager=self.action_manager ) self.action_modifier = ActionModifier(action_manager=self.action_manager) @@ -141,6 +141,9 @@ class HeartFChatting: # 存储回调函数 self.on_stop_focus_chat = on_stop_focus_chat + self.reply_timeout_count = 0 + self.plan_timeout_count = 0 + # 初始化性能记录器 # 如果没有指定版本号,则使用全局版本管理器的版本号 actual_version = performance_version or get_hfc_version() @@ -382,24 +385,12 @@ class HeartFChatting: formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" timer_strings.append(f"{name}: {formatted_time}") - # 新增:输出每个处理器的耗时 - processor_time_costs = self._current_cycle_detail.loop_processor_info.get( - "processor_time_costs", {} - ) - processor_time_strings = [] - for pname, ptime in processor_time_costs.items(): - formatted_ptime = f"{ptime * 1000:.2f}毫秒" if ptime < 1 else f"{ptime:.2f}秒" - processor_time_strings.append(f"{pname}: {formatted_ptime}") - processor_time_log = ( - ("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else "" - ) logger.info( f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考," f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, " - f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}" + f"选择动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}" + (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "") - + processor_time_log ) # 记录性能数据 @@ -410,7 +401,6 @@ class HeartFChatting: "action_type": action_result.get("action_type", "unknown"), "total_time": self._current_cycle_detail.end_time - self._current_cycle_detail.start_time, "step_times": cycle_timers.copy(), - "processor_time_costs": processor_time_costs, # 处理器时间 "reasoning": action_result.get("reasoning", ""), "success": self._current_cycle_detail.loop_action_info.get("action_taken", False), } @@ -491,13 +481,12 @@ class HeartFChatting: processor_tasks = [] task_to_name_map = {} - processor_time_costs = {} # 新增: 记录每个处理器耗时 for processor in self.processors: processor_name = processor.__class__.log_prefix async def run_with_timeout(proc=processor): - return await asyncio.wait_for(proc.process_info(observations=observations), 30) + return await proc.process_info(observations=observations) task = asyncio.create_task(run_with_timeout()) @@ -518,39 +507,20 @@ class HeartFChatting: try: result_list = await task - logger.info(f"{self.log_prefix} 处理器 {processor_name} 已完成!") + logger.debug(f"{self.log_prefix} 处理器 {processor_name} 已完成!") if result_list is not None: all_plan_info.extend(result_list) else: logger.warning(f"{self.log_prefix} 处理器 {processor_name} 返回了 None") - # 记录耗时 - processor_time_costs[processor_name] = duration_since_parallel_start - except asyncio.TimeoutError: - logger.info(f"{self.log_prefix} 处理器 {processor_name} 超时(>30s),已跳过") - processor_time_costs[processor_name] = 30 except Exception as e: logger.error( f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}", exc_info=True, ) traceback.print_exc() - processor_time_costs[processor_name] = duration_since_parallel_start - if pending_tasks: - current_progress_time = time.time() - elapsed_for_log = current_progress_time - parallel_start_time - pending_names_for_log = [task_to_name_map[t] for t in pending_tasks] - logger.info( - f"{self.log_prefix} 信息处理已进行 {elapsed_for_log:.2f}秒,待完成任务: {', '.join(pending_names_for_log)}" - ) - # 所有任务完成后的最终日志 - parallel_end_time = time.time() - total_duration = parallel_end_time - parallel_start_time - logger.info(f"{self.log_prefix} 所有处理器任务全部完成,总耗时: {total_duration:.2f}秒") - # logger.debug(f"{self.log_prefix} 所有信息处理器处理后的信息: {all_plan_info}") - - return all_plan_info, processor_time_costs + return all_plan_info async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict: try: @@ -582,19 +552,16 @@ class HeartFChatting: logger.error(f"{self.log_prefix} 动作修改失败: {e}") # 继续执行,不中断流程 - # 第二步:信息处理器 - with Timer("信息处理器", cycle_timers): - try: - all_plan_info, processor_time_costs = await self._process_processors(self.observations) - except Exception as e: - logger.error(f"{self.log_prefix} 信息处理器失败: {e}") - # 设置默认值以继续执行 - all_plan_info = [] - processor_time_costs = {} + + try: + all_plan_info = await self._process_processors(self.observations) + except Exception as e: + logger.error(f"{self.log_prefix} 信息处理器失败: {e}") + # 设置默认值以继续执行 + all_plan_info = [] loop_processor_info = { "all_plan_info": all_plan_info, - "processor_time_costs": processor_time_costs, } logger.debug(f"{self.log_prefix} 并行阶段完成,准备进入规划器,plan_info数量: {len(all_plan_info)}") @@ -737,8 +704,15 @@ class HeartFChatting: logger.info( f"{self.log_prefix} [非auto模式] 已发送 {self._message_count} 条消息,达到疲惫阈值 {current_threshold},但非auto模式不会自动退出" ) - - logger.debug(f"{self.log_prefix} 麦麦执行了'{action}', 返回结果'{success}', '{reply_text}', '{command}'") + else: + if reply_text == "timeout": + self.reply_timeout_count += 1 + if self.reply_timeout_count > 5: + logger.warning( + f"[{self.log_prefix} ] 连续回复超时次数过多,{global_config.chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。或者尝试拉高thinking_timeout参数,这可能导致回复时间过长。" + ) + logger.warning(f"{self.log_prefix} 回复生成超时{global_config.chat.thinking_timeout}s,已跳过") + return False, "", "" return success, reply_text, command diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py index bfe6a58e5..eb783d483 100644 --- a/src/chat/focus_chat/memory_activator.py +++ b/src/chat/focus_chat/memory_activator.py @@ -117,14 +117,14 @@ class MemoryActivator: # 添加新的关键词到缓存 self.cached_keywords.update(keywords) - logger.info(f"当前激活的记忆关键词: {self.cached_keywords}") + # 调用记忆系统获取相关记忆 related_memory = await hippocampus_manager.get_memory_from_topic( valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 ) - logger.info(f"获取到的记忆: {related_memory}") + logger.info(f"当前记忆关键词: {self.cached_keywords} 。获取到的记忆: {related_memory}") # 激活时,所有已有记忆的duration+1,达到3则移除 for m in self.running_memory[:]: diff --git a/src/chat/focus_chat/planners/planner_factory.py b/src/chat/focus_chat/planners/planner_factory.py deleted file mode 100644 index 8552dcd20..000000000 --- a/src/chat/focus_chat/planners/planner_factory.py +++ /dev/null @@ -1,45 +0,0 @@ -from typing import Dict, Type -from src.chat.focus_chat.planners.base_planner import BasePlanner -from src.chat.focus_chat.planners.planner_simple import ActionPlanner as SimpleActionPlanner -from src.chat.focus_chat.planners.action_manager import ActionManager -from src.common.logger import get_logger - -logger = get_logger("planner_factory") - - -class PlannerFactory: - """规划器工厂类,用于创建不同类型的规划器实例""" - - # 注册所有可用的规划器类型 - _planner_types: Dict[str, Type[BasePlanner]] = { - "simple": SimpleActionPlanner, - } - - @classmethod - def register_planner(cls, name: str, planner_class: Type[BasePlanner]) -> None: - """ - 注册新的规划器类型 - - Args: - name: 规划器类型名称 - planner_class: 规划器类 - """ - cls._planner_types[name] = planner_class - logger.info(f"注册新的规划器类型: {name}") - - @classmethod - def create_planner(cls, log_prefix: str, action_manager: ActionManager) -> BasePlanner: - """ - 创建规划器实例 - - Args: - log_prefix: 日志前缀 - action_manager: 动作管理器实例 - - Returns: - BasePlanner: 规划器实例 - """ - - planner_class = cls._planner_types["simple"] - logger.info(f"{log_prefix} 使用simple规划器") - return planner_class(log_prefix=log_prefix, action_manager=action_manager) diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/focus_chat/planners/planner_simple.py index 3d044d6e7..8b06c7bed 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/focus_chat/planners/planner_simple.py @@ -58,6 +58,8 @@ def init_prompt(): Prompt( """ +动作:{action_name} +动作描述:{action_description} {action_require} {{ "action": "{action_name}",{action_parameters} @@ -66,16 +68,6 @@ def init_prompt(): "action_prompt", ) - Prompt( - """ -{action_require} -{{ - "action": "{action_name}",{action_parameters} -}} -""", - "action_prompt_private", - ) - class ActionPlanner(BasePlanner): def __init__(self, log_prefix: str, action_manager: ActionManager): @@ -191,7 +183,8 @@ class ActionPlanner(BasePlanner): logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}") - logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") + if reasoning_content: + logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") except Exception as req_e: logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}") diff --git a/src/chat/memory_system/Hippocampus.py b/src/chat/memory_system/Hippocampus.py index bd8a171f4..4b311b8cb 100644 --- a/src/chat/memory_system/Hippocampus.py +++ b/src/chat/memory_system/Hippocampus.py @@ -784,12 +784,12 @@ class Hippocampus: # 计算激活节点数与总节点数的比值 total_activation = sum(activate_map.values()) - logger.debug(f"总激活值: {total_activation:.2f}") + # logger.debug(f"总激活值: {total_activation:.2f}") total_nodes = len(self.memory_graph.G.nodes()) # activated_nodes = len(activate_map) activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0 activation_ratio = activation_ratio * 60 - logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}") + logger.debug(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}") return activation_ratio diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 38bc1076e..d81f7f48b 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -1,11 +1,12 @@ import asyncio import time from random import random -from typing import List, Dict, Optional -import os -import pickle -from maim_message import UserInfo, Seg +from typing import List, Optional +from src.config.config import global_config from src.common.logger import get_logger +from src.person_info.person_info import get_person_info_manager +from src.plugin_system.apis import generator_api +from maim_message import UserInfo, Seg from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager from src.chat.utils.timer_calculator import Timer @@ -14,20 +15,10 @@ from ..message_receive.message import MessageSending, MessageRecv, MessageThinki from src.chat.message_receive.message_sender import message_manager from src.chat.normal_chat.willing.willing_manager import get_willing_manager from src.chat.normal_chat.normal_chat_utils import get_recent_message_stats -from src.config.config import global_config from src.chat.focus_chat.planners.action_manager import ActionManager -from src.person_info.person_info import PersonInfoManager -from src.person_info.relationship_manager import get_relationship_manager -from src.chat.utils.chat_message_builder import ( - get_raw_msg_by_timestamp_with_chat, - get_raw_msg_by_timestamp_with_chat_inclusive, - get_raw_msg_before_timestamp_with_chat, - num_new_messages_since, -) +from src.person_info.relationship_builder_manager import relationship_builder_manager from .priority_manager import PriorityManager import traceback - -from .normal_chat_generator import NormalChatGenerator from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier @@ -38,15 +29,6 @@ willing_manager = get_willing_manager() logger = get_logger("normal_chat") -# 消息段清理配置 -SEGMENT_CLEANUP_CONFIG = { - "enable_cleanup": True, # 是否启用清理 - "max_segment_age_days": 7, # 消息段最大保存天数 - "max_segments_per_user": 10, # 每用户最大消息段数 - "cleanup_interval_hours": 1, # 清理间隔(小时) -} - - class NormalChat: """ 普通聊天处理类,负责处理非核心对话的聊天逻辑。 @@ -71,6 +53,8 @@ class NormalChat: self.stream_name = get_chat_manager().get_stream_name(self.stream_id) or self.stream_id + self.relationship_builder = relationship_builder_manager.get_or_create_builder(self.stream_id) + # Interest dict self.interest_dict = interest_dict @@ -78,9 +62,7 @@ class NormalChat: self.willing_amplifier = 1 self.start_time = time.time() - - # Other sync initializations - self.gpt = NormalChatGenerator() + self.mood_manager = mood_manager self.start_time = time.time() @@ -96,18 +78,6 @@ class NormalChat: self.recent_replies = [] self.max_replies_history = 20 # 最多保存最近20条回复记录 - # 新的消息段缓存结构: - # {person_id: [{"start_time": float, "end_time": float, "last_msg_time": float, "message_count": int}, ...]} - self.person_engaged_cache: Dict[str, List[Dict[str, any]]] = {} - - # 持久化存储文件路径 - self.cache_file_path = os.path.join("data", "relationship", f"relationship_cache_{self.stream_id}.pkl") - - # 最后处理的消息时间,避免重复处理相同消息 - self.last_processed_message_time = 0.0 - - # 最后清理时间,用于定期清理老消息段 - self.last_cleanup_time = 0.0 # 添加回调函数,用于在满足条件时通知切换到focus_chat模式 self.on_switch_to_focus_callback = on_switch_to_focus_callback @@ -119,11 +89,6 @@ class NormalChat: self.timeout_count = 0 - # 加载持久化的缓存 - self._load_cache() - - logger.debug(f"[{self.stream_name}] NormalChat 初始化完成 (异步部分)。") - self.action_type: Optional[str] = None # 当前动作类型 self.is_parallel_action: bool = False # 是否是可并行动作 @@ -151,320 +116,25 @@ class NormalChat: self._priority_chat_task.cancel() logger.info(f"[{self.stream_name}] NormalChat 已停用。") - # ================================ - # 缓存管理模块 - # 负责持久化存储、状态管理、缓存读写 - # ================================ - - def _load_cache(self): - """从文件加载持久化的缓存""" - if os.path.exists(self.cache_file_path): - try: - with open(self.cache_file_path, "rb") as f: - cache_data = pickle.load(f) - # 新格式:包含额外信息的缓存 - self.person_engaged_cache = cache_data.get("person_engaged_cache", {}) - self.last_processed_message_time = cache_data.get("last_processed_message_time", 0.0) - self.last_cleanup_time = cache_data.get("last_cleanup_time", 0.0) - - logger.info( - f"[{self.stream_name}] 成功加载关系缓存,包含 {len(self.person_engaged_cache)} 个用户,最后处理时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_processed_message_time)) if self.last_processed_message_time > 0 else '未设置'}" - ) - except Exception as e: - logger.error(f"[{self.stream_name}] 加载关系缓存失败: {e}") - self.person_engaged_cache = {} - self.last_processed_message_time = 0.0 - else: - logger.info(f"[{self.stream_name}] 关系缓存文件不存在,使用空缓存") - - def _save_cache(self): - """保存缓存到文件""" - try: - os.makedirs(os.path.dirname(self.cache_file_path), exist_ok=True) - cache_data = { - "person_engaged_cache": self.person_engaged_cache, - "last_processed_message_time": self.last_processed_message_time, - "last_cleanup_time": self.last_cleanup_time, - } - with open(self.cache_file_path, "wb") as f: - pickle.dump(cache_data, f) - logger.debug(f"[{self.stream_name}] 成功保存关系缓存") - except Exception as e: - logger.error(f"[{self.stream_name}] 保存关系缓存失败: {e}") - - # ================================ - # 消息段管理模块 - # 负责跟踪用户消息活动、管理消息段、清理过期数据 - # ================================ - - def _update_message_segments(self, person_id: str, message_time: float): - """更新用户的消息段 - - Args: - person_id: 用户ID - message_time: 消息时间戳 - """ - if person_id not in self.person_engaged_cache: - self.person_engaged_cache[person_id] = [] - - segments = self.person_engaged_cache[person_id] - current_time = time.time() - - # 获取该消息前5条消息的时间作为潜在的开始时间 - before_messages = get_raw_msg_before_timestamp_with_chat(self.stream_id, message_time, limit=5) - if before_messages: - # 由于get_raw_msg_before_timestamp_with_chat返回按时间升序排序的消息,最后一个是最接近message_time的 - # 我们需要第一个消息作为开始时间,但应该确保至少包含5条消息或该用户之前的消息 - potential_start_time = before_messages[0]["time"] - else: - # 如果没有前面的消息,就从当前消息开始 - potential_start_time = message_time - - # 如果没有现有消息段,创建新的 - if not segments: - new_segment = { - "start_time": potential_start_time, - "end_time": message_time, - "last_msg_time": message_time, - "message_count": self._count_messages_in_timerange(potential_start_time, message_time), - } - segments.append(new_segment) - logger.debug( - f"[{self.stream_name}] 为用户 {person_id} 创建新消息段: 时间范围 {time.strftime('%H:%M:%S', time.localtime(potential_start_time))} - {time.strftime('%H:%M:%S', time.localtime(message_time))}, 消息数: {new_segment['message_count']}" - ) - self._save_cache() - return - - # 获取最后一个消息段 - last_segment = segments[-1] - - # 计算从最后一条消息到当前消息之间的消息数量(不包含边界) - messages_between = self._count_messages_between(last_segment["last_msg_time"], message_time) - - if messages_between <= 10: - # 在10条消息内,延伸当前消息段 - last_segment["end_time"] = message_time - last_segment["last_msg_time"] = message_time - # 重新计算整个消息段的消息数量 - last_segment["message_count"] = self._count_messages_in_timerange( - last_segment["start_time"], last_segment["end_time"] - ) - logger.debug(f"[{self.stream_name}] 延伸用户 {person_id} 的消息段: {last_segment}") - else: - # 超过10条消息,结束当前消息段并创建新的 - # 结束当前消息段:延伸到原消息段最后一条消息后5条消息的时间 - after_messages = get_raw_msg_by_timestamp_with_chat( - self.stream_id, last_segment["last_msg_time"], current_time, limit=5, limit_mode="earliest" - ) - if after_messages and len(after_messages) >= 5: - # 如果有足够的后续消息,使用第5条消息的时间作为结束时间 - last_segment["end_time"] = after_messages[4]["time"] - else: - # 如果没有足够的后续消息,保持原有的结束时间 - pass - - # 重新计算当前消息段的消息数量 - last_segment["message_count"] = self._count_messages_in_timerange( - last_segment["start_time"], last_segment["end_time"] - ) - - # 创建新的消息段 - new_segment = { - "start_time": potential_start_time, - "end_time": message_time, - "last_msg_time": message_time, - "message_count": self._count_messages_in_timerange(potential_start_time, message_time), - } - segments.append(new_segment) - logger.debug(f"[{self.stream_name}] 为用户 {person_id} 创建新消息段(超过10条消息间隔): {new_segment}") - - self._save_cache() - - def _count_messages_in_timerange(self, start_time: float, end_time: float) -> int: - """计算指定时间范围内的消息数量(包含边界)""" - messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time) - return len(messages) - - def _count_messages_between(self, start_time: float, end_time: float) -> int: - """计算两个时间点之间的消息数量(不包含边界),用于间隔检查""" - return num_new_messages_since(self.stream_id, start_time, end_time) - - def _get_total_message_count(self, person_id: str) -> int: - """获取用户所有消息段的总消息数量""" - if person_id not in self.person_engaged_cache: - return 0 - - total_count = 0 - for segment in self.person_engaged_cache[person_id]: - total_count += segment["message_count"] - - return total_count - - def _cleanup_old_segments(self) -> bool: - """清理老旧的消息段 - - Returns: - bool: 是否执行了清理操作 - """ - if not SEGMENT_CLEANUP_CONFIG["enable_cleanup"]: - return False - - current_time = time.time() - - # 检查是否需要执行清理(基于时间间隔) - cleanup_interval_seconds = SEGMENT_CLEANUP_CONFIG["cleanup_interval_hours"] * 3600 - if current_time - self.last_cleanup_time < cleanup_interval_seconds: - return False - - logger.info(f"[{self.stream_name}] 开始执行老消息段清理...") - - cleanup_stats = { - "users_cleaned": 0, - "segments_removed": 0, - "total_segments_before": 0, - "total_segments_after": 0, - } - - max_age_seconds = SEGMENT_CLEANUP_CONFIG["max_segment_age_days"] * 24 * 3600 - max_segments_per_user = SEGMENT_CLEANUP_CONFIG["max_segments_per_user"] - - users_to_remove = [] - - for person_id, segments in self.person_engaged_cache.items(): - cleanup_stats["total_segments_before"] += len(segments) - original_segment_count = len(segments) - - # 1. 按时间清理:移除过期的消息段 - segments_after_age_cleanup = [] - for segment in segments: - segment_age = current_time - segment["end_time"] - if segment_age <= max_age_seconds: - segments_after_age_cleanup.append(segment) - else: - cleanup_stats["segments_removed"] += 1 - logger.debug( - f"[{self.stream_name}] 移除用户 {person_id} 的过期消息段: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(segment['start_time']))} - {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(segment['end_time']))}" - ) - - # 2. 按数量清理:如果消息段数量仍然过多,保留最新的 - if len(segments_after_age_cleanup) > max_segments_per_user: - # 按end_time排序,保留最新的 - segments_after_age_cleanup.sort(key=lambda x: x["end_time"], reverse=True) - segments_removed_count = len(segments_after_age_cleanup) - max_segments_per_user - cleanup_stats["segments_removed"] += segments_removed_count - segments_after_age_cleanup = segments_after_age_cleanup[:max_segments_per_user] - logger.debug( - f"[{self.stream_name}] 用户 {person_id} 消息段数量过多,移除 {segments_removed_count} 个最老的消息段" - ) - - # 使用清理后的消息段 - - # 更新缓存 - if len(segments_after_age_cleanup) == 0: - # 如果没有剩余消息段,标记用户为待移除 - users_to_remove.append(person_id) - else: - self.person_engaged_cache[person_id] = segments_after_age_cleanup - cleanup_stats["total_segments_after"] += len(segments_after_age_cleanup) - - if original_segment_count != len(segments_after_age_cleanup): - cleanup_stats["users_cleaned"] += 1 - - # 移除没有消息段的用户 - for person_id in users_to_remove: - del self.person_engaged_cache[person_id] - logger.debug(f"[{self.stream_name}] 移除用户 {person_id}:没有剩余消息段") - - # 更新最后清理时间 - self.last_cleanup_time = current_time - - # 保存缓存 - if cleanup_stats["segments_removed"] > 0 or len(users_to_remove) > 0: - self._save_cache() - logger.info( - f"[{self.stream_name}] 清理完成 - 影响用户: {cleanup_stats['users_cleaned']}, 移除消息段: {cleanup_stats['segments_removed']}, 移除用户: {len(users_to_remove)}" - ) - logger.info( - f"[{self.stream_name}] 消息段统计 - 清理前: {cleanup_stats['total_segments_before']}, 清理后: {cleanup_stats['total_segments_after']}" - ) - else: - logger.debug(f"[{self.stream_name}] 清理完成 - 无需清理任何内容") - - return cleanup_stats["segments_removed"] > 0 or len(users_to_remove) > 0 - - def get_cache_status(self) -> str: - """获取缓存状态信息,用于调试和监控""" - if not self.person_engaged_cache: - return f"[{self.stream_name}] 关系缓存为空" - - status_lines = [f"[{self.stream_name}] 关系缓存状态:"] - status_lines.append( - f"最后处理消息时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_processed_message_time)) if self.last_processed_message_time > 0 else '未设置'}" - ) - status_lines.append( - f"最后清理时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_cleanup_time)) if self.last_cleanup_time > 0 else '未执行'}" - ) - status_lines.append(f"总用户数:{len(self.person_engaged_cache)}") - status_lines.append( - f"清理配置:{'启用' if SEGMENT_CLEANUP_CONFIG['enable_cleanup'] else '禁用'} (最大保存{SEGMENT_CLEANUP_CONFIG['max_segment_age_days']}天, 每用户最多{SEGMENT_CLEANUP_CONFIG['max_segments_per_user']}段)" - ) - status_lines.append("") - - for person_id, segments in self.person_engaged_cache.items(): - total_count = self._get_total_message_count(person_id) - status_lines.append(f"用户 {person_id}:") - status_lines.append(f" 总消息数:{total_count} ({total_count}/45)") - status_lines.append(f" 消息段数:{len(segments)}") - - for i, segment in enumerate(segments): - start_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["start_time"])) - end_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["end_time"])) - last_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["last_msg_time"])) - status_lines.append( - f" 段{i + 1}: {start_str} -> {end_str} (最后消息: {last_str}, 消息数: {segment['message_count']})" - ) - status_lines.append("") - - return "\n".join(status_lines) - - def _update_user_message_segments(self, message: MessageRecv): - """更新用户消息段信息""" - time.time() - user_id = message.message_info.user_info.user_id - platform = message.message_info.platform - msg_time = message.message_info.time - - # 跳过机器人自己的消息 - if user_id == global_config.bot.qq_account: - return - - # 只处理新消息(避免重复处理) - if msg_time <= self.last_processed_message_time: - return - - person_id = PersonInfoManager.get_person_id(platform, user_id) - self._update_message_segments(person_id, msg_time) - - # 更新最后处理时间 - self.last_processed_message_time = max(self.last_processed_message_time, msg_time) - logger.debug( - f"[{self.stream_name}] 更新用户 {person_id} 的消息段,消息时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg_time))}" - ) - async def _priority_chat_loop_add_message(self): while not self._disabled: try: - ids = list(self.interest_dict.keys()) - for msg_id in ids: - message, interest_value, _ = self.interest_dict[msg_id] + # 创建字典条目的副本以避免在迭代时发生修改 + items_to_process = list(self.interest_dict.items()) + for msg_id, value in items_to_process: + # 尝试从原始字典中弹出条目,如果它已被其他任务处理,则跳过 + if self.interest_dict.pop(msg_id, None) is None: + continue # 条目已被其他任务处理 + + message, interest_value, _ = value if not self._disabled: # 更新消息段信息 - self._update_user_message_segments(message) + # self._update_user_message_segments(message) # 添加消息到优先级管理器 if self.priority_manager: self.priority_manager.add_message(message, interest_value) - self.interest_dict.pop(msg_id, None) + except Exception: logger.error( f"[{self.stream_name}] 优先级聊天循环添加消息时出现错误: {traceback.format_exc()}", exc_info=True @@ -489,9 +159,6 @@ class NormalChat: f"[{self.stream_name}] 从队列中取出消息进行处理: User {message.message_info.user_info.user_id}, Time: {time.strftime('%H:%M:%S', time.localtime(message.message_info.time))}" ) - # 检查是否有用户满足关系构建条件 - asyncio.create_task(self._check_relation_building_conditions(message)) - do_reply = await self.reply_one_message(message) response_set = do_reply if do_reply else [] factor = 0.5 @@ -708,19 +375,12 @@ class NormalChat: async def normal_response(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None: """ 处理接收到的消息。 - 根据回复模式,决定是立即处理还是放入优先级队列。 + 在"兴趣"模式下,判断是否回复并生成内容。 """ if self._disabled: return - # 根据回复模式决定行为 - if self.reply_mode == "priority": - # 优先模式下,所有消息都进入管理器 - if self.priority_manager: - self.priority_manager.add_message(message) - return - - # 新增:在auto模式下检查是否需要直接切换到focus模式 + # 新增:在auto模式下检查是否需要直接切换到focus模式 if global_config.chat.chat_mode == "auto": if await self._check_should_switch_to_focus(): logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,尝试执行切换") @@ -734,19 +394,7 @@ class NormalChat: else: logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数,无法执行切换") - # --- 以下为原有的 "兴趣" 模式逻辑 --- - await self._process_message(message, is_mentioned, interested_rate) - - async def _process_message(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None: - """ - 实际处理单条消息的逻辑,包括意愿判断、回复生成、动作执行等。 - """ - if self._disabled: - return - - # 检查是否有用户满足关系构建条件 - asyncio.create_task(self._check_relation_building_conditions(message)) - + # --- 以下为 "兴趣" 模式逻辑 (从 _process_message 合并而来) --- timing_results = {} reply_probability = ( 1.0 if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply else 0.0 @@ -804,7 +452,7 @@ class NormalChat: if do_reply and response_set: # 确保 response_set 不是 None timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()]) trigger_msg = message.processed_plain_text - response_msg = " ".join(response_set) + response_msg = " ".join([item[1] for item in response_set if item[0] == "text"]) logger.info( f"[{self.stream_name}]回复消息: {trigger_msg[:30]}... | 回复内容: {response_msg[:30]}... | 计时: {timing_str}" ) @@ -816,8 +464,105 @@ class NormalChat: # 意愿管理器:注销当前message信息 (无论是否回复,只要处理过就删除) willing_manager.delete(message.message_info.message_id) + async def _generate_normal_response( + self, message: MessageRecv, available_actions: Optional[list] + ) -> Optional[list]: + """生成普通回复""" + try: + logger.info( + f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}" + ) + person_info_manager = get_person_info_manager() + person_id = person_info_manager.get_person_id( + message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id + ) + person_name = await person_info_manager.get_value(person_id, "person_name") + reply_to_str = f"{person_name}:{message.processed_plain_text}" + + success, reply_set = await generator_api.generate_reply( + chat_stream=message.chat_stream, + reply_to=reply_to_str, + available_actions=available_actions, + enable_tool=global_config.tool.enable_in_normal_chat, + request_type="normal.replyer", + ) + + if not success or not reply_set: + logger.info(f"对 {message.processed_plain_text} 的回复生成失败") + return None + + content = " ".join([item[1] for item in reply_set if item[0] == "text"]) + if content: + logger.info(f"{global_config.bot.nickname}的备选回复是:{content}") + + return reply_set + + except Exception as e: + logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}") + return None + + async def _plan_and_execute_actions(self, message: MessageRecv, thinking_id: str) -> Optional[dict]: + """规划和执行额外动作""" + no_action = { + "action_result": { + "action_type": "no_action", + "action_data": {}, + "reasoning": "规划器初始化默认", + "is_parallel": True, + }, + "chat_context": "", + "action_prompt": "", + } + + if not self.enable_planner: + logger.debug(f"[{self.stream_name}] Planner未启用,跳过动作规划") + return no_action + + try: + # 检查是否应该跳过规划 + if self.action_modifier.should_skip_planning(): + logger.debug(f"[{self.stream_name}] 没有可用动作,跳过规划") + self.action_type = "no_action" + return no_action + + # 执行规划 + plan_result = await self.planner.plan(message) + action_type = plan_result["action_result"]["action_type"] + action_data = plan_result["action_result"]["action_data"] + reasoning = plan_result["action_result"]["reasoning"] + is_parallel = plan_result["action_result"].get("is_parallel", False) + + logger.info(f"[{self.stream_name}] Planner决策: {action_type}, 理由: {reasoning}, 并行执行: {is_parallel}") + self.action_type = action_type # 更新实例属性 + self.is_parallel_action = is_parallel # 新增:保存并行执行标志 + + # 如果规划器决定不执行任何动作 + if action_type == "no_action": + logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作") + return no_action + + # 执行额外的动作(不影响回复生成) + action_result = await self._execute_action(action_type, action_data, message, thinking_id) + if action_result is not None: + logger.info(f"[{self.stream_name}] 额外动作 {action_type} 执行完成") + else: + logger.warning(f"[{self.stream_name}] 额外动作 {action_type} 执行失败") + + return { + "action_type": action_type, + "action_data": action_data, + "reasoning": reasoning, + "is_parallel": is_parallel, + } + + except Exception as e: + logger.error(f"[{self.stream_name}] Planner执行失败: {e}") + return no_action + async def reply_one_message(self, message: MessageRecv) -> None: # 回复前处理 + await self.relationship_builder.build_relation() + thinking_id = await self._create_thinking_message(message) # 如果启用planner,预先修改可用actions(避免在并行任务中重复调用) @@ -832,87 +577,15 @@ class NormalChat: logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}") available_actions = None - # 定义并行执行的任务 - async def generate_normal_response(): - """生成普通回复""" - try: - return await self.gpt.generate_response( - message=message, - available_actions=available_actions, - ) - except Exception as e: - logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}") - return None - - async def plan_and_execute_actions(): - """规划和执行额外动作""" - if not self.enable_planner: - logger.debug(f"[{self.stream_name}] Planner未启用,跳过动作规划") - return None - - try: - no_action = { - "action_result": { - "action_type": "no_action", - "action_data": {}, - "reasoning": "规划器初始化默认", - "is_parallel": True, - }, - "chat_context": "", - "action_prompt": "", - } - - # 检查是否应该跳过规划 - if self.action_modifier.should_skip_planning(): - logger.debug(f"[{self.stream_name}] 没有可用动作,跳过规划") - self.action_type = "no_action" - return no_action - - # 执行规划 - plan_result = await self.planner.plan(message) - action_type = plan_result["action_result"]["action_type"] - action_data = plan_result["action_result"]["action_data"] - reasoning = plan_result["action_result"]["reasoning"] - is_parallel = plan_result["action_result"].get("is_parallel", False) - - logger.info( - f"[{self.stream_name}] Planner决策: {action_type}, 理由: {reasoning}, 并行执行: {is_parallel}" - ) - self.action_type = action_type # 更新实例属性 - self.is_parallel_action = is_parallel # 新增:保存并行执行标志 - - # 如果规划器决定不执行任何动作 - if action_type == "no_action": - logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作") - return no_action - - # 执行额外的动作(不影响回复生成) - action_result = await self._execute_action(action_type, action_data, message, thinking_id) - if action_result is not None: - logger.info(f"[{self.stream_name}] 额外动作 {action_type} 执行完成") - else: - logger.warning(f"[{self.stream_name}] 额外动作 {action_type} 执行失败") - - return { - "action_type": action_type, - "action_data": action_data, - "reasoning": reasoning, - "is_parallel": is_parallel, - } - - except Exception as e: - logger.error(f"[{self.stream_name}] Planner执行失败: {e}") - return no_action - # 并行执行回复生成和动作规划 self.action_type = None # 初始化动作类型 self.is_parallel_action = False # 初始化并行动作标志 - gen_task = asyncio.create_task(generate_normal_response()) - plan_task = asyncio.create_task(plan_and_execute_actions()) + gen_task = asyncio.create_task(self._generate_normal_response(message, available_actions)) + plan_task = asyncio.create_task(self._plan_and_execute_actions(message, thinking_id)) try: - gather_timeout = global_config.normal_chat.thinking_timeout + gather_timeout = global_config.chat.thinking_timeout results = await asyncio.wait_for( asyncio.gather(gen_task, plan_task, return_exceptions=True), timeout=gather_timeout, @@ -922,12 +595,12 @@ class NormalChat: logger.warning( f"[{self.stream_name}] 并行执行回复生成和动作规划超时 ({gather_timeout}秒),正在取消相关任务..." ) + print(f"111{self.timeout_count}") self.timeout_count += 1 if self.timeout_count > 5: - logger.error( - f"[{self.stream_name}] 连续回复超时,{global_config.normal_chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。" + logger.warning( + f"[{self.stream_name}] 连续回复超时次数过多,{global_config.chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。或者尝试拉高thinking_timeout参数,这可能导致回复时间过长。" ) - return False # 取消未完成的任务 if not gen_task.done(): @@ -969,8 +642,15 @@ class NormalChat: logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。") return False + # 提取回复文本 + reply_texts = [item[1] for item in response_set if item[0] == "text"] + if not reply_texts: + logger.info(f"[{self.stream_name}] 回复内容中没有文本,不发送消息") + await self._cleanup_thinking_message_by_id(thinking_id) + return False + # 发送回复 (不再需要传入 chat) - first_bot_msg = await self._add_messages_to_manager(message, response_set, thinking_id) + first_bot_msg = await self._add_messages_to_manager(message, reply_texts, thinking_id) # 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况) if first_bot_msg: @@ -1252,100 +932,6 @@ class NormalChat: """获取动作管理器实例""" return self.action_manager - async def _check_relation_building_conditions(self, message: MessageRecv): - """检查person_engaged_cache中是否有满足关系构建条件的用户""" - # 执行定期清理 - self._cleanup_old_segments() - - # 更新消息段信息 - self._update_user_message_segments(message) - - users_to_build_relationship = [] - - for person_id, segments in list(self.person_engaged_cache.items()): - total_message_count = self._get_total_message_count(person_id) - if total_message_count >= 45: - users_to_build_relationship.append(person_id) - logger.info( - f"[{self.stream_name}] 用户 {person_id} 满足关系构建条件,总消息数:{total_message_count},消息段数:{len(segments)}" - ) - elif total_message_count > 0: - # 记录进度信息 - logger.debug( - f"[{self.stream_name}] 用户 {person_id} 进度:{total_message_count}/45 条消息,{len(segments)} 个消息段" - ) - - # 为满足条件的用户构建关系 - for person_id in users_to_build_relationship: - segments = self.person_engaged_cache[person_id] - # 异步执行关系构建 - asyncio.create_task(self._build_relation_for_person_segments(person_id, segments)) - # 移除已处理的用户缓存 - del self.person_engaged_cache[person_id] - self._save_cache() - logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建已启动,缓存已清理") - - async def _build_relation_for_person_segments(self, person_id: str, segments: List[Dict[str, any]]): - """基于消息段更新用户印象,统一使用focus chat的构建方式""" - if not segments: - return - - logger.debug(f"[{self.stream_name}] 开始为 {person_id} 基于 {len(segments)} 个消息段更新印象") - try: - processed_messages = [] - - for i, segment in enumerate(segments): - start_time = segment["start_time"] - end_time = segment["end_time"] - segment["message_count"] - start_date = time.strftime("%Y-%m-%d %H:%M", time.localtime(start_time)) - - # 获取该段的消息(包含边界) - segment_messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time) - logger.debug( - f"[{self.stream_name}] 消息段 {i + 1}: {start_date} - {time.strftime('%Y-%m-%d %H:%M', time.localtime(end_time))}, 消息数: {len(segment_messages)}" - ) - - if segment_messages: - # 如果不是第一个消息段,在消息列表前添加间隔标识 - if i > 0: - # 创建一个特殊的间隔消息 - gap_message = { - "time": start_time - 0.1, # 稍微早于段开始时间 - "user_id": "system", - "user_platform": "system", - "user_nickname": "系统", - "user_cardname": "", - "display_message": f"...(中间省略一些消息){start_date} 之后的消息如下...", - "is_action_record": True, - "chat_info_platform": segment_messages[0].get("chat_info_platform", ""), - "chat_id": self.stream_id, - } - processed_messages.append(gap_message) - - # 添加该段的所有消息 - processed_messages.extend(segment_messages) - - if processed_messages: - # 按时间排序所有消息(包括间隔标识) - processed_messages.sort(key=lambda x: x["time"]) - - logger.debug( - f"[{self.stream_name}] 为 {person_id} 获取到总共 {len(processed_messages)} 条消息(包含间隔标识)用于印象更新" - ) - relationship_manager = get_relationship_manager() - - # 调用统一的更新方法 - await relationship_manager.update_person_impression( - person_id=person_id, timestamp=time.time(), bot_engaged_messages=processed_messages - ) - else: - logger.debug(f"[{self.stream_name}] 没有找到 {person_id} 的消息段对应的消息,不更新印象") - - except Exception as e: - logger.error(f"[{self.stream_name}] 为 {person_id} 更新印象时发生错误: {e}") - logger.error(traceback.format_exc()) - def _get_fatigue_reply_multiplier(self) -> float: """获取疲劳期回复频率调整系数 @@ -1369,7 +955,6 @@ class NormalChat: except Exception as e: logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}") return 1.0 # 出错时返回正常系数 - async def _check_should_switch_to_focus(self) -> bool: """ 检查是否满足切换到focus模式的条件 @@ -1417,3 +1002,4 @@ class NormalChat: break except Exception as e: logger.error(f"[{self.stream_name}] 清理思考消息 {thinking_id} 时出错: {e}") + diff --git a/src/chat/normal_chat/normal_chat_action_modifier.py b/src/chat/normal_chat/normal_chat_action_modifier.py index 8cdde145e..d2f715cb4 100644 --- a/src/chat/normal_chat/normal_chat_action_modifier.py +++ b/src/chat/normal_chat/normal_chat_action_modifier.py @@ -5,6 +5,7 @@ from src.chat.utils.chat_message_builder import build_readable_messages, get_raw from src.config.config import global_config import random import time +import asyncio logger = get_logger("normal_chat_action_modifier") @@ -184,6 +185,7 @@ class NormalChatActionModifier: always_actions = {} random_actions = {} keyword_actions = {} + llm_judge_actions = {} for action_name, action_info in actions_with_info.items(): # 使用normal_activation_type @@ -192,8 +194,10 @@ class NormalChatActionModifier: # 现在统一是字符串格式的激活类型值 if activation_type == "always": always_actions[action_name] = action_info - elif activation_type == "random" or activation_type == "llm_judge": + elif activation_type == "random": random_actions[action_name] = action_info + elif activation_type == "llm_judge": + llm_judge_actions[action_name] = action_info elif activation_type == "keyword": keyword_actions[action_name] = action_info else: @@ -225,6 +229,24 @@ class NormalChatActionModifier: keywords = action_info.get("activation_keywords", []) logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词({keywords})") + # 4. 处理LLM_JUDGE类型(并行判定) + if llm_judge_actions: + # 直接并行处理所有LLM判定actions + llm_results = await self._process_llm_judge_actions_parallel( + llm_judge_actions, + chat_content, + ) + + # 添加激活的LLM判定actions + for action_name, should_activate in llm_results.items(): + if should_activate: + activated_actions[action_name] = llm_judge_actions[action_name] + logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: LLM_JUDGE类型判定通过") + else: + logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: LLM_JUDGE类型判定未通过") + + + logger.debug(f"{self.log_prefix}Normal模式激活类型过滤完成: {list(activated_actions.keys())}") return activated_actions @@ -277,6 +299,93 @@ class NormalChatActionModifier: else: logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}") return False + + + async def _process_llm_judge_actions_parallel( + self, + llm_judge_actions: Dict[str, Any], + chat_content: str = "", + ) -> Dict[str, bool]: + """ + 并行处理LLM判定actions,支持智能缓存 + + Args: + llm_judge_actions: 需要LLM判定的actions + chat_content: 聊天内容 + + Returns: + Dict[str, bool]: action名称到激活结果的映射 + """ + + # 生成当前上下文的哈希值 + current_context_hash = self._generate_context_hash(chat_content) + current_time = time.time() + + results = {} + tasks_to_run = {} + + # 检查缓存 + for action_name, action_info in llm_judge_actions.items(): + cache_key = f"{action_name}_{current_context_hash}" + + # 检查是否有有效的缓存 + if ( + cache_key in self._llm_judge_cache + and current_time - self._llm_judge_cache[cache_key]["timestamp"] < self._cache_expiry_time + ): + results[action_name] = self._llm_judge_cache[cache_key]["result"] + logger.debug( + f"{self.log_prefix}使用缓存结果 {action_name}: {'激活' if results[action_name] else '未激活'}" + ) + else: + # 需要进行LLM判定 + tasks_to_run[action_name] = action_info + + # 如果有需要运行的任务,并行执行 + if tasks_to_run: + logger.debug(f"{self.log_prefix}并行执行LLM判定,任务数: {len(tasks_to_run)}") + + # 创建并行任务 + tasks = [] + task_names = [] + + for action_name, action_info in tasks_to_run.items(): + task = self._llm_judge_action( + action_name, + action_info, + chat_content, + ) + tasks.append(task) + task_names.append(action_name) + + # 并行执行所有任务 + try: + task_results = await asyncio.gather(*tasks, return_exceptions=True) + + # 处理结果并更新缓存 + for _, (action_name, result) in enumerate(zip(task_names, task_results)): + if isinstance(result, Exception): + logger.error(f"{self.log_prefix}LLM判定action {action_name} 时出错: {result}") + results[action_name] = False + else: + results[action_name] = result + + # 更新缓存 + cache_key = f"{action_name}_{current_context_hash}" + self._llm_judge_cache[cache_key] = {"result": result, "timestamp": current_time} + + logger.debug(f"{self.log_prefix}并行LLM判定完成,耗时: {time.time() - current_time:.2f}s") + + except Exception as e: + logger.error(f"{self.log_prefix}并行LLM判定失败: {e}") + # 如果并行执行失败,为所有任务返回False + for action_name in tasks_to_run.keys(): + results[action_name] = False + + # 清理过期缓存 + self._cleanup_expired_cache(current_time) + + return results def get_available_actions_count(self) -> int: """获取当前可用动作数量(排除默认的no_action)""" diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py deleted file mode 100644 index df7cc6876..000000000 --- a/src/chat/normal_chat/normal_chat_generator.py +++ /dev/null @@ -1,72 +0,0 @@ -from src.llm_models.utils_model import LLMRequest -from src.config.config import global_config -from src.chat.message_receive.message import MessageThinking -from src.common.logger import get_logger -from src.person_info.person_info import PersonInfoManager, get_person_info_manager -from src.chat.utils.utils import process_llm_response -from src.plugin_system.apis import generator_api -from src.chat.focus_chat.memory_activator import MemoryActivator - - -logger = get_logger("normal_chat_response") - - -class NormalChatGenerator: - def __init__(self): - model_config_1 = global_config.model.replyer_1.copy() - model_config_2 = global_config.model.replyer_2.copy() - - prob_first = global_config.chat.replyer_random_probability - - model_config_1["weight"] = prob_first - model_config_2["weight"] = 1.0 - prob_first - - self.model_configs = [model_config_1, model_config_2] - - self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation") - self.memory_activator = MemoryActivator() - - async def generate_response( - self, - message: MessageThinking, - available_actions=None, - ): - logger.info( - f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}" - ) - person_id = PersonInfoManager.get_person_id( - message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id - ) - person_info_manager = get_person_info_manager() - person_name = await person_info_manager.get_value(person_id, "person_name") - relation_info = await person_info_manager.get_value(person_id, "short_impression") - reply_to_str = f"{person_name}:{message.processed_plain_text}" - - try: - success, reply_set, prompt = await generator_api.generate_reply( - chat_stream=message.chat_stream, - reply_to=reply_to_str, - relation_info=relation_info, - available_actions=available_actions, - enable_tool=global_config.tool.enable_in_normal_chat, - model_configs=self.model_configs, - request_type="normal.replyer", - return_prompt=True, - ) - - if not success or not reply_set: - logger.info(f"对 {message.processed_plain_text} 的回复生成失败") - return None - - content = " ".join([item[1] for item in reply_set if item[0] == "text"]) - logger.debug(f"对 {message.processed_plain_text} 的回复:{content}") - - if content: - logger.info(f"{global_config.bot.nickname}的备选回复是:{content}") - content = process_llm_response(content) - - return content - - except Exception: - logger.exception("生成回复时出错") - return None diff --git a/src/chat/normal_chat/normal_chat_planner.py b/src/chat/normal_chat/normal_chat_planner.py index 9c4e08433..83d12caa1 100644 --- a/src/chat/normal_chat/normal_chat_planner.py +++ b/src/chat/normal_chat/normal_chat_planner.py @@ -49,10 +49,8 @@ def init_prompt(): Prompt( """ 动作:{action_name} -该动作的描述:{action_description} -使用该动作的场景: +动作描述:{action_description} {action_require} -输出要求: {{ "action": "{action_name}",{action_parameters} }} @@ -160,8 +158,8 @@ class NormalChatPlanner: logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") logger.info(f"{self.log_prefix}规划器原始响应: {content}") - logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") - logger.info(f"{self.log_prefix}规划器模型: {model_name}") + if reasoning_content: + logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") # 解析JSON响应 try: diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 1fec8646e..dd1b4e8a6 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -92,14 +92,12 @@ class DefaultReplyer: def __init__( self, chat_stream: ChatStream, - enable_tool: bool = False, model_configs: Optional[List[Dict[str, Any]]] = None, request_type: str = "focus.replyer", ): self.log_prefix = "replyer" self.request_type = request_type - self.enable_tool = enable_tool if model_configs: self.express_model_configs = model_configs @@ -170,9 +168,10 @@ class DefaultReplyer: self, reply_data: Dict[str, Any] = None, reply_to: str = "", - relation_info: str = "", extra_info: str = "", available_actions: List[str] = None, + enable_tool: bool = True, + enable_timeout: bool = False, ) -> Tuple[bool, Optional[str]]: """ 回复器 (Replier): 核心逻辑,负责生成回复文本。 @@ -186,7 +185,6 @@ class DefaultReplyer: if not reply_data: reply_data = { "reply_to": reply_to, - "relation_info": relation_info, "extra_info": extra_info, } for key, value in reply_data.items(): @@ -198,6 +196,8 @@ class DefaultReplyer: prompt = await self.build_prompt_reply_context( reply_data=reply_data, # 传递action_data available_actions=available_actions, + enable_timeout=enable_timeout, + enable_tool=enable_tool, ) # 4. 调用 LLM 生成回复 @@ -311,7 +311,7 @@ class DefaultReplyer: person_id = person_info_manager.get_person_id_by_person_name(sender) if not person_id: logger.warning(f"{self.log_prefix} 未找到用户 {sender} 的ID,跳过信息提取") - return None + return f"你完全不认识{sender},不理解ta的相关信息。" relation_info = await relationship_fetcher.build_relation_info(person_id, text, chat_history) return relation_info @@ -367,13 +367,12 @@ class DefaultReplyer: for running_memory in running_memorys: memory_str += f"- {running_memory['content']}\n" memory_block = memory_str - logger.info(f"{self.log_prefix} 添加了 {len(running_memorys)} 个激活的记忆到prompt") else: memory_block = "" return memory_block - async def build_tool_info(self, reply_data=None, chat_history=None): + async def build_tool_info(self, reply_data=None, chat_history=None, enable_tool: bool = True): """构建工具信息块 Args: @@ -384,6 +383,9 @@ class DefaultReplyer: str: 工具信息字符串 """ + if not enable_tool: + return "" + if not reply_data: return "" @@ -460,7 +462,15 @@ class DefaultReplyer: return keywords_reaction_prompt - async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None) -> str: + async def _time_and_run_task(self, coro, name: str): + """一个简单的帮助函数,用于计时和运行异步任务,返回任务名、结果和耗时""" + start_time = time.time() + result = await coro + end_time = time.time() + duration = end_time - start_time + return name, result, duration + + async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None, enable_timeout: bool = False, enable_tool: bool = True) -> str: """ 构建回复器上下文 @@ -526,13 +536,26 @@ class DefaultReplyer: ) # 并行执行四个构建任务 - expression_habits_block, relation_info, memory_block, tool_info = await asyncio.gather( - self.build_expression_habits(chat_talking_prompt_half, target), - self.build_relation_info(reply_data, chat_talking_prompt_half), - self.build_memory_block(chat_talking_prompt_half, target), - self.build_tool_info(reply_data, chat_talking_prompt_half), + task_results = await asyncio.gather( + self._time_and_run_task(self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits"), + self._time_and_run_task(self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info"), + self._time_and_run_task(self.build_memory_block(chat_talking_prompt_half, target), "build_memory_block"), + self._time_and_run_task(self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info"), ) + # 处理结果 + timing_logs = [] + results_dict = {} + for name, result, duration in task_results: + results_dict[name] = result + timing_logs.append(f"{name}: {duration:.4f}s") + logger.info(f"回复生成前信息获取时间: {'; '.join(timing_logs)}") + + expression_habits_block = results_dict["build_expression_habits"] + relation_info = results_dict["build_relation_info"] + memory_block = results_dict["build_memory_block"] + tool_info = results_dict["build_tool_info"] + keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target) if tool_info: diff --git a/src/chat/replyer/replyer_manager.py b/src/chat/replyer/replyer_manager.py index 76d2a9dc2..6a73b7d4b 100644 --- a/src/chat/replyer/replyer_manager.py +++ b/src/chat/replyer/replyer_manager.py @@ -14,7 +14,6 @@ class ReplyerManager: self, chat_stream: Optional[ChatStream] = None, chat_id: Optional[str] = None, - enable_tool: bool = False, model_configs: Optional[List[Dict[str, Any]]] = None, request_type: str = "replyer", ) -> Optional[DefaultReplyer]: @@ -50,7 +49,6 @@ class ReplyerManager: # model_configs 只在此时(初始化时)生效 replyer = DefaultReplyer( chat_stream=target_stream, - enable_tool=enable_tool, model_configs=model_configs, # 可以是None,此时使用默认模型 request_type=request_type, ) diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index edfb9f31c..a081ad9a5 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -81,7 +81,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]: if is_at and global_config.normal_chat.at_bot_inevitable_reply: reply_probability = 1.0 - logger.info("被@,回复概率设置为100%") + logger.debug("被@,回复概率设置为100%") else: if not is_mentioned: # 判断是否被回复 @@ -106,7 +106,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]: is_mentioned = True if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply: reply_probability = 1.0 - logger.info("被提及,回复概率设置为100%") + logger.debug("被提及,回复概率设置为100%") return is_mentioned, reply_probability diff --git a/src/common/logger.py b/src/common/logger.py index cf6f07407..30a2e4bd7 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -346,7 +346,6 @@ MODULE_COLORS = { # 聊天相关模块 "normal_chat": "\033[38;5;81m", # 亮蓝绿色 "normal_chat_response": "\033[38;5;123m", # 青绿色 - "normal_chat_expressor": "\033[38;5;117m", # 浅蓝色 "normal_chat_action_modifier": "\033[38;5;111m", # 蓝色 "normal_chat_planner": "\033[38;5;75m", # 浅蓝色 "heartflow": "\033[38;5;213m", # 粉色 @@ -362,7 +361,6 @@ MODULE_COLORS = { # 专注聊天模块 "replyer": "\033[38;5;166m", # 橙色 "expressor": "\033[38;5;172m", # 黄橙色 - "planner_factory": "\033[38;5;178m", # 黄色 "processor": "\033[38;5;184m", # 黄绿色 "base_processor": "\033[38;5;190m", # 绿黄色 "working_memory": "\033[38;5;22m", # 深绿色 @@ -370,6 +368,7 @@ MODULE_COLORS = { # 插件系统 "plugin_manager": "\033[38;5;208m", # 红色 "base_plugin": "\033[38;5;202m", # 橙红色 + "send_api": "\033[38;5;208m", # 橙色 "base_command": "\033[38;5;208m", # 橙色 "component_registry": "\033[38;5;214m", # 橙黄色 "stream_api": "\033[38;5;220m", # 黄色 @@ -388,10 +387,8 @@ MODULE_COLORS = { "willing": "\033[38;5;147m", # 浅紫色 # 工具模块 "tool_use": "\033[38;5;64m", # 深绿色 + "tool_executor": "\033[38;5;64m", # 深绿色 "base_tool": "\033[38;5;70m", # 绿色 - "compare_numbers_tool": "\033[38;5;76m", # 浅绿色 - "change_mood_tool": "\033[38;5;82m", # 绿色 - "relationship_tool": "\033[38;5;88m", # 深红色 # 工具和实用模块 "prompt": "\033[38;5;99m", # 紫色 "prompt_build": "\033[38;5;105m", # 紫色 @@ -417,6 +414,8 @@ MODULE_COLORS = { "confirm": "\033[1;93m", # 黄色+粗体 # 模型相关 "model_utils": "\033[38;5;164m", # 紫红色 + + "relationship_builder": "\033[38;5;117m", # 浅蓝色 } RESET_COLOR = "\033[0m" diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 7dc63089b..a07bc25f5 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -84,6 +84,9 @@ class ChatConfig(ConfigBase): 选择普通模型的概率为 1 - reasoning_normal_model_probability """ + thinking_timeout: int = 30 + """麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)""" + talk_frequency: float = 1 """回复频率阈值""" @@ -276,8 +279,6 @@ class NormalChatConfig(ConfigBase): emoji_chance: float = 0.2 """发送表情包的基础概率""" - thinking_timeout: int = 120 - """最长思考时间""" willing_mode: str = "classical" """意愿模式""" diff --git a/src/person_info/relationship_builder_manager.py b/src/person_info/relationship_builder_manager.py index ce8d254e0..926d67fca 100644 --- a/src/person_info/relationship_builder_manager.py +++ b/src/person_info/relationship_builder_manager.py @@ -25,7 +25,7 @@ class RelationshipBuilderManager: """ if chat_id not in self.builders: self.builders[chat_id] = RelationshipBuilder(chat_id) - logger.info(f"创建聊天 {chat_id} 的关系构建器") + logger.debug(f"创建聊天 {chat_id} 的关系构建器") return self.builders[chat_id] @@ -51,7 +51,7 @@ class RelationshipBuilderManager: """ if chat_id in self.builders: del self.builders[chat_id] - logger.info(f"移除聊天 {chat_id} 的关系构建器") + logger.debug(f"移除聊天 {chat_id} 的关系构建器") return True return False diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index 15bc6cc81..006f99e18 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -106,7 +106,15 @@ class RelationshipFetcher: await self._extract_single_info(person_id, info_type, person_name) relation_info = self._organize_known_info() - relation_info = f"你对{person_name}的印象是:{short_impression}\n{relation_info}" + if short_impression and relation_info: + relation_info = f"你对{person_name}的印象是:{short_impression}。具体来说:{relation_info}" + elif short_impression: + relation_info = f"你对{person_name}的印象是:{short_impression}" + elif relation_info: + relation_info = f"你对{person_name}的了解:{relation_info}" + else: + relation_info = "" + return relation_info async def _build_fetch_query(self, person_id, target_message, chat_history): diff --git a/src/plugin_system/apis/database_api.py b/src/plugin_system/apis/database_api.py index 3921443df..085df997f 100644 --- a/src/plugin_system/apis/database_api.py +++ b/src/plugin_system/apis/database_api.py @@ -374,7 +374,7 @@ async def store_action_info( ) if saved_record: - logger.info(f"[DatabaseAPI] 成功存储动作信息: {action_name} (ID: {record_data['action_id']})") + logger.debug(f"[DatabaseAPI] 成功存储动作信息: {action_name} (ID: {record_data['action_id']})") else: logger.error(f"[DatabaseAPI] 存储动作信息失败: {action_name}") diff --git a/src/plugin_system/apis/generator_api.py b/src/plugin_system/apis/generator_api.py index 9f7f136be..ead002064 100644 --- a/src/plugin_system/apis/generator_api.py +++ b/src/plugin_system/apis/generator_api.py @@ -27,7 +27,6 @@ logger = get_logger("generator_api") def get_replyer( chat_stream: Optional[ChatStream] = None, chat_id: Optional[str] = None, - enable_tool: bool = False, model_configs: Optional[List[Dict[str, Any]]] = None, request_type: str = "replyer", ) -> Optional[DefaultReplyer]: @@ -52,7 +51,6 @@ def get_replyer( chat_id=chat_id, model_configs=model_configs, request_type=request_type, - enable_tool=enable_tool, ) except Exception as e: logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True) @@ -70,7 +68,6 @@ async def generate_reply( chat_id: str = None, action_data: Dict[str, Any] = None, reply_to: str = "", - relation_info: str = "", extra_info: str = "", available_actions: List[str] = None, enable_tool: bool = False, @@ -79,6 +76,7 @@ async def generate_reply( return_prompt: bool = False, model_configs: Optional[List[Dict[str, Any]]] = None, request_type: str = "", + enable_timeout: bool = False, ) -> Tuple[bool, List[Tuple[str, Any]]]: """生成回复 @@ -95,27 +93,28 @@ async def generate_reply( try: # 获取回复器 replyer = get_replyer( - chat_stream, chat_id, model_configs=model_configs, request_type=request_type, enable_tool=enable_tool + chat_stream, chat_id, model_configs=model_configs, request_type=request_type ) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") return False, [] - logger.info("[GeneratorAPI] 开始生成回复") + logger.debug("[GeneratorAPI] 开始生成回复") # 调用回复器生成回复 success, content, prompt = await replyer.generate_reply_with_context( reply_data=action_data or {}, reply_to=reply_to, - relation_info=relation_info, extra_info=extra_info, available_actions=available_actions, + enable_timeout=enable_timeout, + enable_tool=enable_tool, ) reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo) if success: - logger.info(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项") + logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项") else: logger.warning("[GeneratorAPI] 回复生成失败") diff --git a/src/plugin_system/apis/send_api.py b/src/plugin_system/apis/send_api.py index 645f2b4dc..d9b1eff7c 100644 --- a/src/plugin_system/apis/send_api.py +++ b/src/plugin_system/apis/send_api.py @@ -66,7 +66,7 @@ async def _send_to_target( bool: 是否发送成功 """ try: - logger.info(f"[SendAPI] 发送{message_type}消息到 {stream_id}") + logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}") # 查找目标聊天流 target_stream = get_chat_manager().get_stream(stream_id) diff --git a/src/plugins/built_in/core_actions/no_reply.py b/src/plugins/built_in/core_actions/no_reply.py index f480886ce..3e98ed32e 100644 --- a/src/plugins/built_in/core_actions/no_reply.py +++ b/src/plugins/built_in/core_actions/no_reply.py @@ -77,7 +77,7 @@ class NoReplyAction(BaseAction): reason = self.action_data.get("reason", "") start_time = time.time() - last_judge_time = 0 # 上次进行LLM判断的时间 + last_judge_time = start_time # 上次进行LLM判断的时间 min_judge_interval = self._min_judge_interval # 最小判断间隔,从配置获取 check_interval = 0.2 # 检查新消息的间隔,设为0.2秒提高响应性 @@ -357,7 +357,7 @@ class NoReplyAction(BaseAction): judge_history.append((current_time, judge_result, reason)) if judge_result == "需要回复": - logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待") + # logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待") full_prompt = f"{global_config.bot.nickname}(你)的想法是:{reason}" await self.store_action_info( diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index 217405c0e..a96d3ab13 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -8,6 +8,7 @@ import random import time from typing import List, Tuple, Type +import asyncio # 导入新插件系统 from src.plugin_system import BasePlugin, register_plugin, BaseAction, ComponentInfo, ActionActivationType, ChatMode @@ -55,17 +56,24 @@ class ReplyAction(BaseAction): async def execute(self) -> Tuple[bool, str]: """执行回复动作""" - logger.info(f"{self.log_prefix} 决定回复: {self.reasoning}") + logger.info(f"{self.log_prefix} 决定进行回复") start_time = self.action_data.get("loop_start_time", time.time()) try: - success, reply_set = await generator_api.generate_reply( - action_data=self.action_data, - chat_id=self.chat_id, - request_type="focus.replyer", - enable_tool=global_config.tool.enable_in_focus_chat, - ) + try: + success, reply_set = await asyncio.wait_for( + generator_api.generate_reply( + action_data=self.action_data, + chat_id=self.chat_id, + request_type="focus.replyer", + enable_tool=global_config.tool.enable_in_focus_chat, + ), + timeout=global_config.chat.thinking_timeout, + ) + except asyncio.TimeoutError: + logger.warning(f"{self.log_prefix} 回复生成超时 ({global_config.chat.thinking_timeout}s)") + return False, "timeout" # 检查从start_time以来的新消息数量 # 获取动作触发时间或使用默认值 @@ -77,7 +85,7 @@ class ReplyAction(BaseAction): # 根据新消息数量决定是否使用reply_to need_reply = new_message_count >= random.randint(2, 5) logger.info( - f"{self.log_prefix} 从{start_time}到{current_time}共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}reply_to" + f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复" ) # 构建回复文本 diff --git a/src/tools/tool_executor.py b/src/tools/tool_executor.py index 0673068cf..34a35ae3b 100644 --- a/src/tools/tool_executor.py +++ b/src/tools/tool_executor.py @@ -6,6 +6,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.tools.tool_use import ToolUser from src.chat.utils.json_utils import process_llm_tool_calls from typing import List, Dict, Tuple, Optional +from src.chat.message_receive.chat_stream import get_chat_manager logger = get_logger("tool_executor") @@ -42,7 +43,9 @@ class ToolExecutor: cache_ttl: 缓存生存时间(周期数) """ self.chat_id = chat_id - self.log_prefix = f"[ToolExecutor:{self.chat_id}] " + self.chat_stream = get_chat_manager().get_stream(self.chat_id) + self.log_prefix = f"[{get_chat_manager().get_stream_name(self.chat_id) or self.chat_id}]" + self.llm_model = LLMRequest( model=global_config.model.tool_use, request_type="tool_executor", diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index c4ddd21d8..40ab3b36f 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "3.2.0" +version = "3.3.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -102,6 +102,8 @@ exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易 # 专注模式下,麦麦会进行主动的观察和回复,并给出回复,token消耗量较高 # 自动模式下,麦麦会根据消息内容自动切换到专注模式或普通模式 +thinking_timeout = 30 # 麦麦一次回复最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢) + [message_receive] # 以下是消息过滤,可以根据规则过滤特定消息,将不会读取这些消息 ban_words = [ @@ -117,18 +119,12 @@ ban_msgs_regex = [ [normal_chat] #普通聊天 #一般回复参数 emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率 -thinking_timeout = 30 # 麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢) - willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现) - response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数 - mentioned_bot_inevitable_reply = true # 提及 bot 必然回复 at_bot_inevitable_reply = true # @bot 必然回复(包含提及) - enable_planner = true # 是否启用动作规划器(与focus_chat共享actions) - [focus_chat] #专注聊天 think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗 consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高 @@ -228,7 +224,7 @@ console_log_level = "INFO" # 控制台日志级别,可选: DEBUG, INFO, WARNIN file_log_level = "DEBUG" # 文件日志级别,可选: DEBUG, INFO, WARNING, ERROR, CRITICAL # 第三方库日志控制 -suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn"] # 完全屏蔽的库 +suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn","jieba"] # 完全屏蔽的库 library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别 #下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env自定义的宏,使用自定义模型则选择定位相似的模型自己填写 From 498d72384fffac547fddb7c12bc46a1829c63920 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 18:36:14 +0800 Subject: [PATCH 44/63] =?UTF-8?q?feat=EF=BC=9A=E7=BB=9F=E4=B8=80normal?= =?UTF-8?q?=E5=92=8Cfocus=E7=9A=84=E5=8A=A8=E4=BD=9C=E8=B0=83=E6=95=B4,emo?= =?UTF-8?q?ji=E7=BB=9F=E4=B8=80=E5=8F=AF=E9=80=89=E9=9A=8F=E6=9C=BA?= =?UTF-8?q?=E6=BF=80=E6=B4=BB=E6=88=96llm=E6=BF=80=E6=B4=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 2 + src/chat/focus_chat/heartFC_chat.py | 40 +- src/chat/focus_chat/planners/base_planner.py | 28 -- .../observation/actions_observation.py | 2 +- src/chat/normal_chat/normal_chat.py | 41 +- .../normal_chat_action_modifier.py | 403 ------------------ src/chat/normal_chat/normal_chat_utils.py | 30 -- .../action_manager.py | 4 - .../action_modifier.py} | 349 ++++++--------- .../planner_focus.py} | 10 +- .../planner_normal.py} | 2 +- src/common/logger.py | 4 +- src/config/official_configs.py | 20 +- src/person_info/relationship_fetcher.py | 2 +- src/plugin_system/apis/emoji_api.py | 4 +- src/plugin_system/apis/send_api.py | 2 +- src/plugins/built_in/core_actions/emoji.py | 2 +- src/plugins/built_in/core_actions/plugin.py | 11 +- src/tools/tool_executor.py | 3 +- template/bot_config_template.toml | 6 +- 20 files changed, 217 insertions(+), 748 deletions(-) delete mode 100644 src/chat/focus_chat/planners/base_planner.py delete mode 100644 src/chat/normal_chat/normal_chat_action_modifier.py delete mode 100644 src/chat/normal_chat/normal_chat_utils.py rename src/chat/{focus_chat/planners => planner_actions}/action_manager.py (98%) rename src/chat/{focus_chat/planners/modify_actions.py => planner_actions/action_modifier.py} (57%) rename src/chat/{focus_chat/planners/planner_simple.py => planner_actions/planner_focus.py} (97%) rename src/chat/{normal_chat/normal_chat_planner.py => planner_actions/planner_normal.py} (99%) diff --git a/changelogs/changelog.md b/changelogs/changelog.md index 41c760e85..eab206f1b 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -10,6 +10,8 @@ - 优化计时信息和Log - 添加回复超时检查 - normal的插件允许llm激活 +- 合并action激活器 +- emoji统一可选随机激活或llm激活 ## [0.8.1] - 2025-7-5 diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 1009edde5..a6d12b821 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -21,9 +21,9 @@ from src.chat.heart_flow.observation.actions_observation import ActionObservatio from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.info_processors.base_processor import BaseProcessor -from src.chat.focus_chat.planners.planner_simple import ActionPlanner -from src.chat.focus_chat.planners.modify_actions import ActionModifier -from src.chat.focus_chat.planners.action_manager import ActionManager +from src.chat.planner_actions.planner_focus import ActionPlanner +from src.chat.planner_actions.action_modifier import ActionModifier +from src.chat.planner_actions.action_manager import ActionManager from src.config.config import global_config from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger from src.chat.focus_chat.hfc_version_manager import get_hfc_version @@ -50,24 +50,6 @@ PROCESSOR_CLASSES = { logger = get_logger("hfc") # Logger Name Changed -async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str): - """处理循环延迟""" - cycle_duration = time.monotonic() - cycle_start_time - - try: - sleep_duration = 0.0 - if not action_taken_this_cycle and cycle_duration < 1: - sleep_duration = 1 - cycle_duration - elif cycle_duration < 0.2: - sleep_duration = 0.2 - - if sleep_duration > 0: - await asyncio.sleep(sleep_duration) - - except asyncio.CancelledError: - logger.info(f"{log_prefix} Sleep interrupted, loop likely cancelling.") - raise - class HeartFChatting: """ @@ -80,7 +62,6 @@ class HeartFChatting: self, chat_id: str, on_stop_focus_chat: Optional[Callable[[], Awaitable[None]]] = None, - performance_version: str = None, ): """ HeartFChatting 初始化函数 @@ -122,7 +103,7 @@ class HeartFChatting: self.action_planner = ActionPlanner( log_prefix=self.log_prefix, action_manager=self.action_manager ) - self.action_modifier = ActionModifier(action_manager=self.action_manager) + self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id) self.action_observation = ActionObservation(observe_id=self.stream_id) self.action_observation.set_action_manager(self.action_manager) @@ -146,7 +127,7 @@ class HeartFChatting: # 初始化性能记录器 # 如果没有指定版本号,则使用全局版本管理器的版本号 - actual_version = performance_version or get_hfc_version() + actual_version = get_hfc_version() self.performance_logger = HFCPerformanceLogger(chat_id, actual_version) logger.info( @@ -287,7 +268,6 @@ class HeartFChatting: # 初始化周期状态 cycle_timers = {} - loop_cycle_start_time = time.monotonic() # 执行规划和处理阶段 try: @@ -370,11 +350,6 @@ class HeartFChatting: self._current_cycle_detail.timers = cycle_timers - # 防止循环过快消耗资源 - await _handle_cycle_delay( - loop_info["loop_action_info"]["action_taken"], loop_cycle_start_time, self.log_prefix - ) - # 完成当前循环并保存历史 self._current_cycle_detail.complete_cycle() self._cycle_history.append(self._current_cycle_detail) @@ -407,7 +382,7 @@ class HeartFChatting: self.performance_logger.record_cycle(cycle_performance_data) except Exception as perf_e: logger.warning(f"{self.log_prefix} 记录性能数据失败: {perf_e}") - + await asyncio.sleep(global_config.focus_chat.think_interval) except asyncio.CancelledError: @@ -543,6 +518,7 @@ class HeartFChatting: # 调用完整的动作修改流程 await self.action_modifier.modify_actions( observations=self.observations, + mode="focus", ) await self.action_observation.observe() @@ -567,7 +543,7 @@ class HeartFChatting: logger.debug(f"{self.log_prefix} 并行阶段完成,准备进入规划器,plan_info数量: {len(all_plan_info)}") with Timer("规划器", cycle_timers): - plan_result = await self.action_planner.plan(all_plan_info, self.observations, loop_start_time) + plan_result = await self.action_planner.plan(all_plan_info, loop_start_time) loop_plan_info = { "action_result": plan_result.get("action_result", {}), diff --git a/src/chat/focus_chat/planners/base_planner.py b/src/chat/focus_chat/planners/base_planner.py deleted file mode 100644 index 0492039e1..000000000 --- a/src/chat/focus_chat/planners/base_planner.py +++ /dev/null @@ -1,28 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Dict, Any -from src.chat.focus_chat.planners.action_manager import ActionManager -from src.chat.focus_chat.info.info_base import InfoBase - - -class BasePlanner(ABC): - """规划器基类""" - - def __init__(self, log_prefix: str, action_manager: ActionManager): - self.log_prefix = log_prefix - self.action_manager = action_manager - - @abstractmethod - async def plan( - self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]], loop_start_time: float - ) -> Dict[str, Any]: - """ - 规划下一步行动 - - Args: - all_plan_info: 所有计划信息 - running_memorys: 回忆信息 - loop_start_time: 循环开始时间 - Returns: - Dict[str, Any]: 规划结果 - """ - pass diff --git a/src/chat/heart_flow/observation/actions_observation.py b/src/chat/heart_flow/observation/actions_observation.py index 12e972daf..125032140 100644 --- a/src/chat/heart_flow/observation/actions_observation.py +++ b/src/chat/heart_flow/observation/actions_observation.py @@ -2,7 +2,7 @@ # 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 from datetime import datetime from src.common.logger import get_logger -from src.chat.focus_chat.planners.action_manager import ActionManager +from src.chat.planner_actions.action_manager import ActionManager logger = get_logger("observation") diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index d81f7f48b..6817670f0 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -9,18 +9,17 @@ from src.plugin_system.apis import generator_api from maim_message import UserInfo, Seg from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager from src.chat.utils.timer_calculator import Timer - +from src.common.message_repository import count_messages from src.chat.utils.prompt_builder import global_prompt_manager from ..message_receive.message import MessageSending, MessageRecv, MessageThinking, MessageSet from src.chat.message_receive.message_sender import message_manager from src.chat.normal_chat.willing.willing_manager import get_willing_manager -from src.chat.normal_chat.normal_chat_utils import get_recent_message_stats -from src.chat.focus_chat.planners.action_manager import ActionManager +from src.chat.planner_actions.action_manager import ActionManager from src.person_info.relationship_builder_manager import relationship_builder_manager from .priority_manager import PriorityManager import traceback -from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner -from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier +from src.chat.planner_actions.planner_normal import NormalChatPlanner +from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from src.manager.mood_manager import mood_manager @@ -71,7 +70,7 @@ class NormalChat: # Planner相关初始化 self.action_manager = ActionManager() self.planner = NormalChatPlanner(self.stream_name, self.action_manager) - self.action_modifier = NormalChatActionModifier(self.action_manager, self.stream_id, self.stream_name) + self.action_modifier = ActionModifier(self.action_manager, self.stream_id) self.enable_planner = global_config.normal_chat.enable_planner # 从配置中读取是否启用planner # 记录最近的回复内容,每项包含: {time, user_message, response, is_mentioned, is_reference_reply} @@ -569,8 +568,8 @@ class NormalChat: available_actions = None if self.enable_planner: try: - await self.action_modifier.modify_actions_for_normal_chat( - self.chat_stream, self.recent_replies, message.processed_plain_text + await self.action_modifier.modify_actions( + mode="normal", message_content=message.processed_plain_text ) available_actions = self.action_manager.get_using_actions_for_mode("normal") except Exception as e: @@ -1003,3 +1002,29 @@ class NormalChat: except Exception as e: logger.error(f"[{self.stream_name}] 清理思考消息 {thinking_id} 时出错: {e}") + +def get_recent_message_stats(minutes: int = 30, chat_id: str = None) -> dict: + """ + Args: + minutes (int): 检索的分钟数,默认30分钟 + chat_id (str, optional): 指定的chat_id,仅统计该chat下的消息。为None时统计全部。 + Returns: + dict: {"bot_reply_count": int, "total_message_count": int} + """ + + now = time.time() + start_time = now - minutes * 60 + bot_id = global_config.bot.qq_account + + filter_base = {"time": {"$gte": start_time}} + if chat_id is not None: + filter_base["chat_id"] = chat_id + + # 总消息数 + total_message_count = count_messages(filter_base) + # bot自身回复数 + bot_filter = filter_base.copy() + bot_filter["user_id"] = bot_id + bot_reply_count = count_messages(bot_filter) + + return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count} \ No newline at end of file diff --git a/src/chat/normal_chat/normal_chat_action_modifier.py b/src/chat/normal_chat/normal_chat_action_modifier.py deleted file mode 100644 index d2f715cb4..000000000 --- a/src/chat/normal_chat/normal_chat_action_modifier.py +++ /dev/null @@ -1,403 +0,0 @@ -from typing import List, Any, Dict -from src.common.logger import get_logger -from src.chat.focus_chat.planners.action_manager import ActionManager -from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat -from src.config.config import global_config -import random -import time -import asyncio - -logger = get_logger("normal_chat_action_modifier") - - -class NormalChatActionModifier: - """Normal Chat动作修改器 - - 负责根据Normal Chat的上下文和状态动态调整可用的动作集合 - 实现与Focus Chat类似的动作激活策略,但将LLM_JUDGE转换为概率激活以提升性能 - """ - - def __init__(self, action_manager: ActionManager, stream_id: str, stream_name: str): - """初始化动作修改器""" - self.action_manager = action_manager - self.stream_id = stream_id - self.stream_name = stream_name - self.log_prefix = f"[{stream_name}]动作修改器" - - # 缓存所有注册的动作 - self.all_actions = self.action_manager.get_registered_actions() - - async def modify_actions_for_normal_chat( - self, - chat_stream, - recent_replies: List[dict], - message_content: str, - **kwargs: Any, - ): - """为Normal Chat修改可用动作集合 - - 实现动作激活策略: - 1. 基于关联类型的动态过滤 - 2. 基于激活类型的智能判定(LLM_JUDGE转为概率激活) - - Args: - chat_stream: 聊天流对象 - recent_replies: 最近的回复记录 - message_content: 当前消息内容 - **kwargs: 其他参数 - """ - - reasons = [] - merged_action_changes = {"add": [], "remove": []} - type_mismatched_actions = [] # 在外层定义避免作用域问题 - - self.action_manager.restore_default_actions() - - # 第一阶段:基于关联类型的动态过滤 - if chat_stream: - chat_context = chat_stream.context if hasattr(chat_stream, "context") else None - if chat_context: - # 获取Normal模式下的可用动作(已经过滤了mode_enable) - current_using_actions = self.action_manager.get_using_actions_for_mode("normal") - # print(f"current_using_actions: {current_using_actions}") - for action_name in current_using_actions.keys(): - if action_name in self.all_actions: - data = self.all_actions[action_name] - if data.get("associated_types"): - if not chat_context.check_types(data["associated_types"]): - type_mismatched_actions.append(action_name) - logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作") - - if type_mismatched_actions: - merged_action_changes["remove"].extend(type_mismatched_actions) - reasons.append(f"移除{type_mismatched_actions}(关联类型不匹配)") - - # 第二阶段:应用激活类型判定 - # 构建聊天内容 - 使用与planner一致的方式 - chat_content = "" - if chat_stream and hasattr(chat_stream, "stream_id"): - try: - # 获取消息历史,使用与normal_chat_planner相同的方法 - message_list_before_now = get_raw_msg_before_timestamp_with_chat( - chat_id=chat_stream.stream_id, - timestamp=time.time(), - limit=global_config.chat.max_context_size, # 使用相同的配置 - ) - - # 构建可读的聊天上下文 - chat_content = build_readable_messages( - message_list_before_now, - replace_bot_name=True, - merge_messages=False, - timestamp_mode="relative", - read_mark=0.0, - show_actions=True, - ) - - logger.debug(f"{self.log_prefix} 成功构建聊天内容,长度: {len(chat_content)}") - - except Exception as e: - logger.warning(f"{self.log_prefix} 构建聊天内容失败: {e}") - chat_content = "" - - # 获取当前Normal模式下的动作集进行激活判定 - current_actions = self.action_manager.get_using_actions_for_mode("normal") - - # print(f"current_actions: {current_actions}") - # print(f"chat_content: {chat_content}") - final_activated_actions = await self._apply_normal_activation_filtering( - current_actions, chat_content, message_content, recent_replies - ) - # print(f"final_activated_actions: {final_activated_actions}") - - # 统一处理所有需要移除的动作,避免重复移除 - all_actions_to_remove = set() # 使用set避免重复 - - # 添加关联类型不匹配的动作 - if type_mismatched_actions: - all_actions_to_remove.update(type_mismatched_actions) - - # 添加激活类型判定未通过的动作 - for action_name in current_actions.keys(): - if action_name not in final_activated_actions: - all_actions_to_remove.add(action_name) - - # 统计移除原因(避免重复) - activation_failed_actions = [ - name - for name in current_actions.keys() - if name not in final_activated_actions and name not in type_mismatched_actions - ] - if activation_failed_actions: - reasons.append(f"移除{activation_failed_actions}(激活类型判定未通过)") - - # 统一执行移除操作 - for action_name in all_actions_to_remove: - success = self.action_manager.remove_action_from_using(action_name) - if success: - logger.debug(f"{self.log_prefix} 移除动作: {action_name}") - else: - logger.debug(f"{self.log_prefix} 动作 {action_name} 已经不在使用集中,跳过移除") - - # 应用动作添加(如果有的话) - for action_name in merged_action_changes["add"]: - if action_name in self.all_actions: - success = self.action_manager.add_action_to_using(action_name) - if success: - logger.debug(f"{self.log_prefix} 添加动作: {action_name}") - - # 记录变更原因 - if reasons: - logger.info(f"{self.log_prefix} 动作调整完成: {' | '.join(reasons)}") - - # 获取最终的Normal模式可用动作并记录 - final_actions = self.action_manager.get_using_actions_for_mode("normal") - logger.debug(f"{self.log_prefix} 当前Normal模式可用动作: {list(final_actions.keys())}") - - async def _apply_normal_activation_filtering( - self, - actions_with_info: Dict[str, Any], - chat_content: str = "", - message_content: str = "", - recent_replies: List[dict] = None, - ) -> Dict[str, Any]: - """ - 应用Normal模式的激活类型过滤逻辑 - - 与Focus模式的区别: - 1. LLM_JUDGE类型转换为概率激活(避免LLM调用) - 2. RANDOM类型保持概率激活 - 3. KEYWORD类型保持关键词匹配 - 4. ALWAYS类型直接激活 - - Args: - actions_with_info: 带完整信息的动作字典 - chat_content: 聊天内容 - message_content: 当前消息内容 - recent_replies: 最近的回复记录列表 - - Returns: - Dict[str, Any]: 过滤后激活的actions字典 - """ - activated_actions = {} - - # 分类处理不同激活类型的actions - always_actions = {} - random_actions = {} - keyword_actions = {} - llm_judge_actions = {} - - for action_name, action_info in actions_with_info.items(): - # 使用normal_activation_type - activation_type = action_info.get("normal_activation_type", "always") - - # 现在统一是字符串格式的激活类型值 - if activation_type == "always": - always_actions[action_name] = action_info - elif activation_type == "random": - random_actions[action_name] = action_info - elif activation_type == "llm_judge": - llm_judge_actions[action_name] = action_info - elif activation_type == "keyword": - keyword_actions[action_name] = action_info - else: - logger.warning(f"{self.log_prefix}未知的激活类型: {activation_type},跳过处理") - - # 1. 处理ALWAYS类型(直接激活) - for action_name, action_info in always_actions.items(): - activated_actions[action_name] = action_info - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: ALWAYS类型直接激活") - - # 2. 处理RANDOM类型(概率激活) - for action_name, action_info in random_actions.items(): - probability = action_info.get("random_activation_probability", ActionManager.DEFAULT_RANDOM_PROBABILITY) - should_activate = random.random() < probability - if should_activate: - activated_actions[action_name] = action_info - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: RANDOM类型触发(概率{probability})") - else: - logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: RANDOM类型未触发(概率{probability})") - - # 3. 处理KEYWORD类型(关键词匹配) - for action_name, action_info in keyword_actions.items(): - should_activate = self._check_keyword_activation(action_name, action_info, chat_content, message_content) - if should_activate: - activated_actions[action_name] = action_info - keywords = action_info.get("activation_keywords", []) - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: KEYWORD类型匹配关键词({keywords})") - else: - keywords = action_info.get("activation_keywords", []) - logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词({keywords})") - - # 4. 处理LLM_JUDGE类型(并行判定) - if llm_judge_actions: - # 直接并行处理所有LLM判定actions - llm_results = await self._process_llm_judge_actions_parallel( - llm_judge_actions, - chat_content, - ) - - # 添加激活的LLM判定actions - for action_name, should_activate in llm_results.items(): - if should_activate: - activated_actions[action_name] = llm_judge_actions[action_name] - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: LLM_JUDGE类型判定通过") - else: - logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: LLM_JUDGE类型判定未通过") - - - - logger.debug(f"{self.log_prefix}Normal模式激活类型过滤完成: {list(activated_actions.keys())}") - return activated_actions - - def _check_keyword_activation( - self, - action_name: str, - action_info: Dict[str, Any], - chat_content: str = "", - message_content: str = "", - ) -> bool: - """ - 检查是否匹配关键词触发条件 - - Args: - action_name: 动作名称 - action_info: 动作信息 - chat_content: 聊天内容(已经是格式化后的可读消息) - - Returns: - bool: 是否应该激活此action - """ - - activation_keywords = action_info.get("activation_keywords", []) - case_sensitive = action_info.get("keyword_case_sensitive", False) - - if not activation_keywords: - logger.warning(f"{self.log_prefix}动作 {action_name} 设置为关键词触发但未配置关键词") - return False - - # 使用构建好的聊天内容作为检索文本 - search_text = chat_content + message_content - - # 如果不区分大小写,转换为小写 - if not case_sensitive: - search_text = search_text.lower() - - # 检查每个关键词 - matched_keywords = [] - for keyword in activation_keywords: - check_keyword = keyword if case_sensitive else keyword.lower() - if check_keyword in search_text: - matched_keywords.append(keyword) - - # print(f"search_text: {search_text}") - # print(f"activation_keywords: {activation_keywords}") - - if matched_keywords: - logger.debug(f"{self.log_prefix}动作 {action_name} 匹配到关键词: {matched_keywords}") - return True - else: - logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}") - return False - - - async def _process_llm_judge_actions_parallel( - self, - llm_judge_actions: Dict[str, Any], - chat_content: str = "", - ) -> Dict[str, bool]: - """ - 并行处理LLM判定actions,支持智能缓存 - - Args: - llm_judge_actions: 需要LLM判定的actions - chat_content: 聊天内容 - - Returns: - Dict[str, bool]: action名称到激活结果的映射 - """ - - # 生成当前上下文的哈希值 - current_context_hash = self._generate_context_hash(chat_content) - current_time = time.time() - - results = {} - tasks_to_run = {} - - # 检查缓存 - for action_name, action_info in llm_judge_actions.items(): - cache_key = f"{action_name}_{current_context_hash}" - - # 检查是否有有效的缓存 - if ( - cache_key in self._llm_judge_cache - and current_time - self._llm_judge_cache[cache_key]["timestamp"] < self._cache_expiry_time - ): - results[action_name] = self._llm_judge_cache[cache_key]["result"] - logger.debug( - f"{self.log_prefix}使用缓存结果 {action_name}: {'激活' if results[action_name] else '未激活'}" - ) - else: - # 需要进行LLM判定 - tasks_to_run[action_name] = action_info - - # 如果有需要运行的任务,并行执行 - if tasks_to_run: - logger.debug(f"{self.log_prefix}并行执行LLM判定,任务数: {len(tasks_to_run)}") - - # 创建并行任务 - tasks = [] - task_names = [] - - for action_name, action_info in tasks_to_run.items(): - task = self._llm_judge_action( - action_name, - action_info, - chat_content, - ) - tasks.append(task) - task_names.append(action_name) - - # 并行执行所有任务 - try: - task_results = await asyncio.gather(*tasks, return_exceptions=True) - - # 处理结果并更新缓存 - for _, (action_name, result) in enumerate(zip(task_names, task_results)): - if isinstance(result, Exception): - logger.error(f"{self.log_prefix}LLM判定action {action_name} 时出错: {result}") - results[action_name] = False - else: - results[action_name] = result - - # 更新缓存 - cache_key = f"{action_name}_{current_context_hash}" - self._llm_judge_cache[cache_key] = {"result": result, "timestamp": current_time} - - logger.debug(f"{self.log_prefix}并行LLM判定完成,耗时: {time.time() - current_time:.2f}s") - - except Exception as e: - logger.error(f"{self.log_prefix}并行LLM判定失败: {e}") - # 如果并行执行失败,为所有任务返回False - for action_name in tasks_to_run.keys(): - results[action_name] = False - - # 清理过期缓存 - self._cleanup_expired_cache(current_time) - - return results - - def get_available_actions_count(self) -> int: - """获取当前可用动作数量(排除默认的no_action)""" - current_actions = self.action_manager.get_using_actions_for_mode("normal") - # 排除no_action(如果存在) - filtered_actions = {k: v for k, v in current_actions.items() if k != "no_action"} - return len(filtered_actions) - - def should_skip_planning(self) -> bool: - """判断是否应该跳过规划过程""" - available_count = self.get_available_actions_count() - if available_count == 0: - logger.debug(f"{self.log_prefix} 没有可用动作,跳过规划") - return True - return False diff --git a/src/chat/normal_chat/normal_chat_utils.py b/src/chat/normal_chat/normal_chat_utils.py deleted file mode 100644 index 2ebd3bdaa..000000000 --- a/src/chat/normal_chat/normal_chat_utils.py +++ /dev/null @@ -1,30 +0,0 @@ -import time -from src.config.config import global_config -from src.common.message_repository import count_messages - - -def get_recent_message_stats(minutes: int = 30, chat_id: str = None) -> dict: - """ - Args: - minutes (int): 检索的分钟数,默认30分钟 - chat_id (str, optional): 指定的chat_id,仅统计该chat下的消息。为None时统计全部。 - Returns: - dict: {"bot_reply_count": int, "total_message_count": int} - """ - - now = time.time() - start_time = now - minutes * 60 - bot_id = global_config.bot.qq_account - - filter_base = {"time": {"$gte": start_time}} - if chat_id is not None: - filter_base["chat_id"] = chat_id - - # 总消息数 - total_message_count = count_messages(filter_base) - # bot自身回复数 - bot_filter = filter_base.copy() - bot_filter["user_id"] = bot_id - bot_reply_count = count_messages(bot_filter) - - return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count} diff --git a/src/chat/focus_chat/planners/action_manager.py b/src/chat/planner_actions/action_manager.py similarity index 98% rename from src/chat/focus_chat/planners/action_manager.py rename to src/chat/planner_actions/action_manager.py index 8dec6889a..c7f9bd6c1 100644 --- a/src/chat/focus_chat/planners/action_manager.py +++ b/src/chat/planner_actions/action_manager.py @@ -292,10 +292,6 @@ class ActionManager: ) self._using_actions = self._default_actions.copy() - def restore_default_actions(self) -> None: - """恢复默认动作集到使用集""" - self._using_actions = self._default_actions.copy() - def add_system_action_if_needed(self, action_name: str) -> bool: """ 根据需要添加系统动作到使用集 diff --git a/src/chat/focus_chat/planners/modify_actions.py b/src/chat/planner_actions/action_modifier.py similarity index 57% rename from src/chat/focus_chat/planners/modify_actions.py rename to src/chat/planner_actions/action_modifier.py index 1ec25567b..c57842ae4 100644 --- a/src/chat/focus_chat/planners/modify_actions.py +++ b/src/chat/planner_actions/action_modifier.py @@ -10,7 +10,8 @@ import random import asyncio import hashlib import time -from src.chat.focus_chat.planners.action_manager import ActionManager +from src.chat.planner_actions.action_manager import ActionManager +from src.chat.utils.chat_message_builder import get_raw_msg_before_timestamp_with_chat, build_readable_messages logger = get_logger("action_manager") @@ -23,12 +24,13 @@ class ActionModifier: 支持并行判定和智能缓存优化。 """ - log_prefix = "动作处理" - - def __init__(self, action_manager: ActionManager): + def __init__(self, action_manager: ActionManager, chat_id: str): """初始化动作处理器""" + self.chat_id = chat_id + self.chat_stream = get_chat_manager().get_stream(self.chat_id) + self.log_prefix = f"[{get_chat_manager().get_stream_name(self.chat_id) or self.chat_id}]" + self.action_manager = action_manager - self.all_actions = self.action_manager.get_using_actions_for_mode("focus") # 用于LLM判定的小模型 self.llm_judge = LLMRequest( @@ -43,11 +45,12 @@ class ActionModifier: async def modify_actions( self, + mode: str = "focus", observations: Optional[List[Observation]] = None, - **kwargs: Any, + message_content: str = "", ): """ - 完整的动作修改流程,整合传统观察处理和新的激活类型判定 + 动作修改流程,整合传统观察处理和新的激活类型判定 这个方法处理完整的动作管理流程: 1. 基于观察的传统动作修改(循环历史分析、类型匹配等) @@ -57,230 +60,156 @@ class ActionModifier: """ logger.debug(f"{self.log_prefix}开始完整动作修改流程") + removals_s1 = [] + removals_s2 = [] + + self.action_manager.restore_actions() + all_actions = self.action_manager.get_using_actions_for_mode(mode) + + message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( + chat_id=self.chat_stream.stream_id, + timestamp=time.time(), + limit=int(global_config.chat.max_context_size * 0.5), + ) + chat_content = build_readable_messages( + message_list_before_now_half, + replace_bot_name=True, + merge_messages=False, + timestamp_mode="relative", + read_mark=0.0, + show_actions=True, + ) + + if message_content: + chat_content = chat_content + "\n" + f"现在,最新的消息是:{message_content}" + # === 第一阶段:传统观察处理 === - chat_content = None - if observations: - hfc_obs = None - chat_obs = None - - # 收集所有观察对象 for obs in observations: if isinstance(obs, HFCloopObservation): - hfc_obs = obs - if isinstance(obs, ChattingObservation): - chat_obs = obs - chat_content = obs.talking_message_str_truncate_short + # 获取适用于FOCUS模式的动作 + removals_from_loop = await self.analyze_loop_actions(obs) + if removals_from_loop: + removals_s1.extend(removals_from_loop) - # 合并所有动作变更 - merged_action_changes = {"add": [], "remove": []} - reasons = [] + # 检查动作的关联类型 + chat_context = self.chat_stream.context + type_mismatched_actions = self._check_action_associated_types(all_actions, chat_context) - # 处理HFCloopObservation - 传统的循环历史分析 - if hfc_obs: - obs = hfc_obs - # 获取适用于FOCUS模式的动作 - all_actions = self.all_actions - action_changes = await self.analyze_loop_actions(obs) - if action_changes["add"] or action_changes["remove"]: - # 合并动作变更 - merged_action_changes["add"].extend(action_changes["add"]) - merged_action_changes["remove"].extend(action_changes["remove"]) - reasons.append("基于循环历史分析") + if type_mismatched_actions: + removals_s1.extend(type_mismatched_actions) - # 详细记录循环历史分析的变更原因 - for action_name in action_changes["add"]: - logger.info(f"{self.log_prefix}添加动作: {action_name},原因: 循环历史分析建议添加") - for action_name in action_changes["remove"]: - logger.info(f"{self.log_prefix}移除动作: {action_name},原因: 循环历史分析建议移除") + # 应用第一阶段的移除 + for action_name, reason in removals_s1: + self.action_manager.remove_action_from_using(action_name) + logger.debug(f"{self.log_prefix}阶段一移除动作: {action_name},原因: {reason}") - # 处理ChattingObservation - 传统的类型匹配检查 - if chat_obs: - # 检查动作的关联类型 - chat_context = get_chat_manager().get_stream(chat_obs.chat_id).context - type_mismatched_actions = [] - - for action_name in all_actions.keys(): - data = all_actions[action_name] - if data.get("associated_types"): - if not chat_context.check_types(data["associated_types"]): - type_mismatched_actions.append(action_name) - associated_types_str = ", ".join(data["associated_types"]) - logger.info( - f"{self.log_prefix}移除动作: {action_name},原因: 关联类型不匹配(需要: {associated_types_str})" - ) - - if type_mismatched_actions: - # 合并到移除列表中 - merged_action_changes["remove"].extend(type_mismatched_actions) - reasons.append("基于关联类型检查") - - # 应用传统的动作变更到ActionManager - for action_name in merged_action_changes["add"]: - if action_name in self.action_manager.get_registered_actions(): - self.action_manager.add_action_to_using(action_name) - logger.debug(f"{self.log_prefix}应用添加动作: {action_name},原因集合: {reasons}") - - for action_name in merged_action_changes["remove"]: - self.action_manager.remove_action_from_using(action_name) - logger.debug(f"{self.log_prefix}应用移除动作: {action_name},原因集合: {reasons}") - - logger.info( - f"{self.log_prefix}传统动作修改完成,当前使用动作: {list(self.action_manager.get_using_actions().keys())}" - ) - - # 注释:已移除exit_focus_chat动作,现在由no_reply动作处理频率检测退出专注模式 # === 第二阶段:激活类型判定 === - # 如果提供了聊天上下文,则进行激活类型判定 if chat_content is not None: logger.debug(f"{self.log_prefix}开始激活类型判定阶段") - # 获取当前使用的动作集(经过第一阶段处理,且适用于FOCUS模式) - current_using_actions = self.action_manager.get_using_actions() - all_registered_actions = self.action_manager.get_registered_actions() - - # 构建完整的动作信息 - current_actions_with_info = {} - for action_name in current_using_actions.keys(): - if action_name in all_registered_actions: - current_actions_with_info[action_name] = all_registered_actions[action_name] - else: - logger.warning(f"{self.log_prefix}使用中的动作 {action_name} 未在已注册动作中找到") - - # 应用激活类型判定 - final_activated_actions = await self._apply_activation_type_filtering( - current_actions_with_info, + # 获取当前使用的动作集(经过第一阶段处理) + current_using_actions = self.action_manager.get_using_actions_for_mode(mode) + + # 获取因激活类型判定而需要移除的动作 + removals_s2 = await self._get_deactivated_actions_by_type( + current_using_actions, + mode, chat_content, ) - # 更新ActionManager,移除未激活的动作 - actions_to_remove = [] - removal_reasons = {} - - for action_name in current_using_actions.keys(): - if action_name not in final_activated_actions: - actions_to_remove.append(action_name) - # 确定移除原因 - if action_name in all_registered_actions: - action_info = all_registered_actions[action_name] - activation_type = action_info.get("focus_activation_type", "always") - - # 处理字符串格式的激活类型值 - if activation_type == "random": - probability = action_info.get("random_probability", 0.3) - removal_reasons[action_name] = f"RANDOM类型未触发(概率{probability})" - elif activation_type == "llm_judge": - removal_reasons[action_name] = "LLM判定未激活" - elif activation_type == "keyword": - keywords = action_info.get("activation_keywords", []) - removal_reasons[action_name] = f"关键词未匹配(关键词: {keywords})" - else: - removal_reasons[action_name] = "激活判定未通过" - else: - removal_reasons[action_name] = "动作信息不完整" - - for action_name in actions_to_remove: + # 应用第二阶段的移除 + for action_name, reason in removals_s2: self.action_manager.remove_action_from_using(action_name) - reason = removal_reasons.get(action_name, "未知原因") - logger.info(f"{self.log_prefix}移除动作: {action_name},原因: {reason}") - - # 注释:已完全移除exit_focus_chat动作 - - logger.info(f"{self.log_prefix}激活类型判定完成,最终可用动作: {list(final_activated_actions.keys())}") + logger.debug(f"{self.log_prefix}阶段二移除动作: {action_name},原因: {reason}") + + # === 统一日志记录 === + all_removals = removals_s1 + removals_s2 + if all_removals: + removals_summary = " | ".join([f"{name}({reason})" for name, reason in all_removals]) logger.info( - f"{self.log_prefix}完整动作修改流程结束,最终动作集: {list(self.action_manager.get_using_actions().keys())}" + f"{self.log_prefix}{mode}模式动作修改流程结束,最终可用动作: {list(self.action_manager.get_using_actions_for_mode(mode).keys())}||移除记录: {removals_summary}" ) - async def _apply_activation_type_filtering( + def _check_action_associated_types(self, all_actions, chat_context): + type_mismatched_actions = [] + for action_name, data in all_actions.items(): + if data.get("associated_types"): + if not chat_context.check_types(data["associated_types"]): + associated_types_str = ", ".join(data["associated_types"]) + reason = f"适配器不支持(需要: {associated_types_str})" + type_mismatched_actions.append((action_name, reason)) + logger.debug( + f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}" + ) + return type_mismatched_actions + + async def _get_deactivated_actions_by_type( self, actions_with_info: Dict[str, Any], + mode: str = "focus", chat_content: str = "", - ) -> Dict[str, Any]: + ) -> List[tuple[str, str]]: """ - 应用激活类型过滤逻辑,支持四种激活类型的并行处理 + 根据激活类型过滤,返回需要停用的动作列表及原因 Args: actions_with_info: 带完整信息的动作字典 chat_content: 聊天内容 Returns: - Dict[str, Any]: 过滤后激活的actions字典 + List[Tuple[str, str]]: 需要停用的 (action_name, reason) 元组列表 """ - activated_actions = {} + deactivated_actions = [] # 分类处理不同激活类型的actions - always_actions = {} - random_actions = {} llm_judge_actions = {} - keyword_actions = {} + + actions_to_check = list(actions_with_info.items()) + random.shuffle(actions_to_check) - for action_name, action_info in actions_with_info.items(): - activation_type = action_info.get("focus_activation_type", "always") + for action_name, action_info in actions_to_check: + activation_type = f"{mode}_activation_type" + activation_type = action_info.get(activation_type, "always") - # print(f"action_name: {action_name}, activation_type: {activation_type}") - - # 现在统一是字符串格式的激活类型值 if activation_type == "always": - always_actions[action_name] = action_info + continue # 总是激活,无需处理 + elif activation_type == "random": - random_actions[action_name] = action_info + probability = action_info.get("random_activation_probability", ActionManager.DEFAULT_RANDOM_PROBABILITY) + if not (random.random() < probability): + reason = f"RANDOM类型未触发(概率{probability})" + deactivated_actions.append((action_name, reason)) + logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: {reason}") + + elif activation_type == "keyword": + if not self._check_keyword_activation(action_name, action_info, chat_content): + keywords = action_info.get("activation_keywords", []) + reason = f"关键词未匹配(关键词: {keywords})" + deactivated_actions.append((action_name, reason)) + logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: {reason}") + elif activation_type == "llm_judge": llm_judge_actions[action_name] = action_info - elif activation_type == "keyword": - keyword_actions[action_name] = action_info + else: logger.warning(f"{self.log_prefix}未知的激活类型: {activation_type},跳过处理") - # 1. 处理ALWAYS类型(直接激活) - for action_name, action_info in always_actions.items(): - activated_actions[action_name] = action_info - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: ALWAYS类型直接激活") - - # 2. 处理RANDOM类型 - for action_name, action_info in random_actions.items(): - probability = action_info.get("random_activation_probability", ActionManager.DEFAULT_RANDOM_PROBABILITY) - should_activate = random.random() < probability - if should_activate: - activated_actions[action_name] = action_info - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: RANDOM类型触发(概率{probability})") - else: - logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: RANDOM类型未触发(概率{probability})") - - # 3. 处理KEYWORD类型(快速判定) - for action_name, action_info in keyword_actions.items(): - should_activate = self._check_keyword_activation( - action_name, - action_info, - chat_content, - ) - if should_activate: - activated_actions[action_name] = action_info - keywords = action_info.get("activation_keywords", []) - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: KEYWORD类型匹配关键词({keywords})") - else: - keywords = action_info.get("activation_keywords", []) - logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词({keywords})") - - # 4. 处理LLM_JUDGE类型(并行判定) + # 并行处理LLM_JUDGE类型 if llm_judge_actions: - # 直接并行处理所有LLM判定actions llm_results = await self._process_llm_judge_actions_parallel( llm_judge_actions, chat_content, ) - - # 添加激活的LLM判定actions for action_name, should_activate in llm_results.items(): - if should_activate: - activated_actions[action_name] = llm_judge_actions[action_name] - logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: LLM_JUDGE类型判定通过") - else: - logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: LLM_JUDGE类型判定未通过") + if not should_activate: + reason = "LLM判定未激活" + deactivated_actions.append((action_name, reason)) + logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: {reason}") - logger.debug(f"{self.log_prefix}激活类型过滤完成: {list(activated_actions.keys())}") - return activated_actions + return deactivated_actions async def process_actions_for_planner( self, observed_messages_str: str = "", chat_context: Optional[str] = None, extra_context: Optional[str] = None @@ -538,22 +467,19 @@ class ActionModifier: logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}") return False - async def analyze_loop_actions(self, obs: HFCloopObservation) -> Dict[str, List[str]]: - """分析最近的循环内容并决定动作的增减 + async def analyze_loop_actions(self, obs: HFCloopObservation) -> List[tuple[str, str]]: + """分析最近的循环内容并决定动作的移除 Returns: - Dict[str, List[str]]: 包含要增加和删除的动作 - { - "add": ["action1", "action2"], - "remove": ["action3"] - } + List[Tuple[str, str]]: 包含要删除的动作及原因的元组列表 + [("action3", "some reason")] """ - result = {"add": [], "remove": []} + removals = [] # 获取最近10次循环 recent_cycles = obs.history_loop[-10:] if len(obs.history_loop) > 10 else obs.history_loop if not recent_cycles: - return result + return removals reply_sequence = [] # 记录最近的动作序列 @@ -584,36 +510,39 @@ class ActionModifier: # 根据最近的reply情况决定是否移除reply动作 if len(last_max_reply_num) >= max_reply_num and all(last_max_reply_num): # 如果最近max_reply_num次都是reply,直接移除 - result["remove"].append("reply") + reason = f"连续回复过多(最近{len(last_max_reply_num)}次全是reply,超过阈值{max_reply_num})" + removals.append(("reply", reason)) # reply_count = len(last_max_reply_num) - no_reply_count - logger.info( - f"{self.log_prefix}移除reply动作,原因: 连续回复过多(最近{len(last_max_reply_num)}次全是reply,超过阈值{max_reply_num})" - ) elif len(last_max_reply_num) >= sec_thres_reply_num and all(last_max_reply_num[-sec_thres_reply_num:]): # 如果最近sec_thres_reply_num次都是reply,40%概率移除 removal_probability = 0.4 / global_config.focus_chat.consecutive_replies if random.random() < removal_probability: - result["remove"].append("reply") - logger.info( - f"{self.log_prefix}移除reply动作,原因: 连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" - ) - else: - logger.debug( - f"{self.log_prefix}连续回复检测:最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,未触发" - ) + reason = f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + removals.append(("reply", reason)) elif len(last_max_reply_num) >= one_thres_reply_num and all(last_max_reply_num[-one_thres_reply_num:]): # 如果最近one_thres_reply_num次都是reply,20%概率移除 removal_probability = 0.2 / global_config.focus_chat.consecutive_replies if random.random() < removal_probability: - result["remove"].append("reply") - logger.info( - f"{self.log_prefix}移除reply动作,原因: 连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" - ) - else: - logger.debug( - f"{self.log_prefix}连续回复检测:最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,未触发" - ) + reason = f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + removals.append(("reply", reason)) else: logger.debug(f"{self.log_prefix}连续回复检测:无需移除reply动作,最近回复模式正常") - return result + return removals + + + + def get_available_actions_count(self) -> int: + """获取当前可用动作数量(排除默认的no_action)""" + current_actions = self.action_manager.get_using_actions_for_mode("normal") + # 排除no_action(如果存在) + filtered_actions = {k: v for k, v in current_actions.items() if k != "no_action"} + return len(filtered_actions) + + def should_skip_planning(self) -> bool: + """判断是否应该跳过规划过程""" + available_count = self.get_available_actions_count() + if available_count == 0: + logger.debug(f"{self.log_prefix} 没有可用动作,跳过规划") + return True + return False \ No newline at end of file diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/planner_actions/planner_focus.py similarity index 97% rename from src/chat/focus_chat/planners/planner_simple.py rename to src/chat/planner_actions/planner_focus.py index 8b06c7bed..bb3bdcacd 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/planner_actions/planner_focus.py @@ -9,9 +9,8 @@ from src.chat.focus_chat.info.obs_info import ObsInfo from src.chat.focus_chat.info.action_info import ActionInfo from src.common.logger import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager -from src.chat.focus_chat.planners.action_manager import ActionManager +from src.chat.planner_actions.action_manager import ActionManager from json_repair import repair_json -from src.chat.focus_chat.planners.base_planner import BasePlanner from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from datetime import datetime @@ -69,9 +68,10 @@ def init_prompt(): ) -class ActionPlanner(BasePlanner): +class ActionPlanner: def __init__(self, log_prefix: str, action_manager: ActionManager): - super().__init__(log_prefix, action_manager) + self.log_prefix = log_prefix + self.action_manager = action_manager # LLM规划器配置 self.planner_llm = LLMRequest( model=global_config.model.planner, @@ -84,7 +84,7 @@ class ActionPlanner(BasePlanner): ) async def plan( - self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]], loop_start_time: float + self, all_plan_info: List[InfoBase],loop_start_time: float ) -> Dict[str, Any]: """ 规划器 (Planner): 使用LLM根据上下文决定做出什么动作。 diff --git a/src/chat/normal_chat/normal_chat_planner.py b/src/chat/planner_actions/planner_normal.py similarity index 99% rename from src/chat/normal_chat/normal_chat_planner.py rename to src/chat/planner_actions/planner_normal.py index 83d12caa1..fce446b58 100644 --- a/src/chat/normal_chat/normal_chat_planner.py +++ b/src/chat/planner_actions/planner_normal.py @@ -6,7 +6,7 @@ from src.config.config import global_config from src.common.logger import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.individuality.individuality import get_individuality -from src.chat.focus_chat.planners.action_manager import ActionManager +from src.chat.planner_actions.action_manager import ActionManager from src.chat.message_receive.message import MessageThinking from json_repair import repair_json from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat diff --git a/src/common/logger.py b/src/common/logger.py index 30a2e4bd7..c0fa7be2d 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -340,7 +340,7 @@ MODULE_COLORS = { "memory": "\033[34m", "hfc": "\033[96m", "base_action": "\033[96m", - "action_manager": "\033[34m", + "action_manager": "\033[32m", # 关系系统 "relation": "\033[38;5;201m", # 深粉色 # 聊天相关模块 @@ -414,7 +414,7 @@ MODULE_COLORS = { "confirm": "\033[1;93m", # 黄色+粗体 # 模型相关 "model_utils": "\033[38;5;164m", # 紫红色 - + "relationship_fetcher": "\033[38;5;170m", # 浅紫色 "relationship_builder": "\033[38;5;117m", # 浅蓝色 } diff --git a/src/config/official_configs.py b/src/config/official_configs.py index a07bc25f5..1c28ab7c8 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -273,12 +273,6 @@ class MessageReceiveConfig(ConfigBase): class NormalChatConfig(ConfigBase): """普通聊天配置类""" - message_buffer: bool = False - """消息缓冲器""" - - emoji_chance: float = 0.2 - """发送表情包的基础概率""" - willing_mode: str = "classical" """意愿模式""" @@ -295,14 +289,6 @@ class NormalChatConfig(ConfigBase): enable_planner: bool = False """是否启用动作规划器""" - gather_timeout: int = 110 # planner和generator的并行执行超时时间 - """planner和generator的并行执行超时时间""" - - auto_focus_threshold: float = 1.0 # 自动切换到专注模式的阈值,值越大越难触发 - """自动切换到专注模式的阈值,值越大越难触发""" - - fatigue_talk_frequency: float = 0.2 # 疲劳模式下的基础对话频率 (条/分钟) - """疲劳模式下的基础对话频率 (条/分钟)""" @dataclass @@ -362,6 +348,12 @@ class ToolConfig(ConfigBase): @dataclass class EmojiConfig(ConfigBase): """表情包配置类""" + + emoji_chance: float = 0.6 + """发送表情包的基础概率""" + + emoji_activate_type: str = "random" + """表情包激活类型,可选:random,llm,random下,表情包动作随机启用,llm下,表情包动作根据llm判断是否启用""" max_reg_num: int = 200 """表情包最大注册数量""" diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index 006f99e18..e2bde69de 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -142,7 +142,7 @@ class RelationshipFetcher: # 检查是否返回了不需要查询的标志 if "none" in content_json: - logger.info(f"{self.log_prefix} LLM判断当前不需要查询任何信息:{content_json.get('none', '')}") + logger.debug(f"{self.log_prefix} LLM判断当前不需要查询任何信息:{content_json.get('none', '')}") return None info_type = content_json.get("info_type") diff --git a/src/plugin_system/apis/emoji_api.py b/src/plugin_system/apis/emoji_api.py index 3fdcf1b55..33c0f23d7 100644 --- a/src/plugin_system/apis/emoji_api.py +++ b/src/plugin_system/apis/emoji_api.py @@ -31,7 +31,7 @@ async def get_by_description(description: str) -> Optional[Tuple[str, str, str]] Optional[Tuple[str, str, str]]: (base64编码, 表情包描述, 匹配的情感标签) 或 None """ try: - logger.info(f"[EmojiAPI] 根据描述获取表情包: {description}") + logger.debug(f"[EmojiAPI] 根据描述获取表情包: {description}") emoji_manager = get_emoji_manager() emoji_result = await emoji_manager.get_emoji_for_text(description) @@ -47,7 +47,7 @@ async def get_by_description(description: str) -> Optional[Tuple[str, str, str]] logger.error(f"[EmojiAPI] 无法将表情包文件转换为base64: {emoji_path}") return None - logger.info(f"[EmojiAPI] 成功获取表情包: {emoji_description}, 匹配情感: {matched_emotion}") + logger.debug(f"[EmojiAPI] 成功获取表情包: {emoji_description}, 匹配情感: {matched_emotion}") return emoji_base64, emoji_description, matched_emotion except Exception as e: diff --git a/src/plugin_system/apis/send_api.py b/src/plugin_system/apis/send_api.py index d9b1eff7c..c0486e164 100644 --- a/src/plugin_system/apis/send_api.py +++ b/src/plugin_system/apis/send_api.py @@ -116,7 +116,7 @@ async def _send_to_target( ) if sent_msg: - logger.info(f"[SendAPI] 成功发送消息到 {stream_id}") + logger.debug(f"[SendAPI] 成功发送消息到 {stream_id}") return True else: logger.error("[SendAPI] 发送消息失败") diff --git a/src/plugins/built_in/core_actions/emoji.py b/src/plugins/built_in/core_actions/emoji.py index c1fe0f0fb..128214427 100644 --- a/src/plugins/built_in/core_actions/emoji.py +++ b/src/plugins/built_in/core_actions/emoji.py @@ -18,7 +18,7 @@ class EmojiAction(BaseAction): """表情动作 - 发送表情包""" # 激活设置 - focus_activation_type = ActionActivationType.LLM_JUDGE + focus_activation_type = ActionActivationType.RANDOM normal_activation_type = ActionActivationType.RANDOM mode_enable = ChatMode.ALL parallel_action = True diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index a96d3ab13..2b7194063 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -180,8 +180,15 @@ class CoreActionsPlugin(BasePlugin): """返回插件包含的组件列表""" # --- 从配置动态设置Action/Command --- - emoji_chance = global_config.normal_chat.emoji_chance - EmojiAction.random_activation_probability = emoji_chance + emoji_chance = global_config.emoji.emoji_chance + if global_config.emoji.emoji_activate_type == "random": + EmojiAction.random_activation_probability = emoji_chance + EmojiAction.focus_activation_type = ActionActivationType.RANDOM + EmojiAction.normal_activation_type = ActionActivationType.RANDOM + elif global_config.emoji.emoji_activate_type == "llm": + EmojiAction.random_activation_probability = 0.0 + EmojiAction.focus_activation_type = ActionActivationType.LLM_JUDGE + EmojiAction.normal_activation_type = ActionActivationType.LLM_JUDGE no_reply_probability = self.get_config("no_reply.random_probability", 0.8) NoReplyAction.random_activation_probability = no_reply_probability diff --git a/src/tools/tool_executor.py b/src/tools/tool_executor.py index 34a35ae3b..b43dfcff3 100644 --- a/src/tools/tool_executor.py +++ b/src/tools/tool_executor.py @@ -128,7 +128,8 @@ class ToolExecutor: if tool_results: self._set_cache(cache_key, tool_results) - logger.info(f"{self.log_prefix}工具执行完成,共执行{len(used_tools)}个工具: {used_tools}") + if used_tools: + logger.info(f"{self.log_prefix}工具执行完成,共执行{len(used_tools)}个工具: {used_tools}") if return_details: return tool_results, used_tools, prompt diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 40ab3b36f..478d62ed8 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "3.3.0" +version = "3.4.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -118,7 +118,6 @@ ban_msgs_regex = [ [normal_chat] #普通聊天 #一般回复参数 -emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率 willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现) response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数 mentioned_bot_inevitable_reply = true # 提及 bot 必然回复 @@ -137,6 +136,9 @@ enable_in_normal_chat = false # 是否在普通聊天中启用工具 enable_in_focus_chat = true # 是否在专注聊天中启用工具 [emoji] +emoji_chance = 0.6 # 麦麦激活表情包动作的概率 +emoji_activate_type = "random" # 表情包激活类型,可选:random,llm ; random下,表情包动作随机启用,llm下,表情包动作根据llm判断是否启用 + max_reg_num = 60 # 表情包最大注册数量 do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包 check_interval = 10 # 检查表情包(注册,破损,删除)的时间间隔(分钟) From 6e15fec8b42b3d4d8bd3a782b2a77670bfff21f9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 10:36:29 +0000 Subject: [PATCH 45/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 10 ++---- src/chat/focus_chat/memory_activator.py | 1 - src/chat/normal_chat/normal_chat.py | 15 +++++---- src/chat/planner_actions/action_modifier.py | 34 ++++++++++----------- src/chat/planner_actions/planner_focus.py | 4 +-- src/chat/replyer/default_generator.py | 21 ++++++++++--- src/config/official_configs.py | 4 +-- src/person_info/relationship_fetcher.py | 2 +- src/plugin_system/apis/generator_api.py | 4 +-- src/tools/tool_executor.py | 2 +- 10 files changed, 46 insertions(+), 51 deletions(-) diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index a6d12b821..b6ac6f050 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -50,7 +50,6 @@ PROCESSOR_CLASSES = { logger = get_logger("hfc") # Logger Name Changed - class HeartFChatting: """ 管理一个连续的Focus Chat循环 @@ -100,9 +99,7 @@ class HeartFChatting: self._register_default_processors() self.action_manager = ActionManager() - self.action_planner = ActionPlanner( - log_prefix=self.log_prefix, action_manager=self.action_manager - ) + self.action_planner = ActionPlanner(log_prefix=self.log_prefix, action_manager=self.action_manager) self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id) self.action_observation = ActionObservation(observe_id=self.stream_id) self.action_observation.set_action_manager(self.action_manager) @@ -360,7 +357,6 @@ class HeartFChatting: formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" timer_strings.append(f"{name}: {formatted_time}") - logger.info( f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考," f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, " @@ -382,7 +378,7 @@ class HeartFChatting: self.performance_logger.record_cycle(cycle_performance_data) except Exception as perf_e: logger.warning(f"{self.log_prefix} 记录性能数据失败: {perf_e}") - + await asyncio.sleep(global_config.focus_chat.think_interval) except asyncio.CancelledError: @@ -494,7 +490,6 @@ class HeartFChatting: ) traceback.print_exc() - return all_plan_info async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict: @@ -528,7 +523,6 @@ class HeartFChatting: logger.error(f"{self.log_prefix} 动作修改失败: {e}") # 继续执行,不中断流程 - try: all_plan_info = await self._process_processors(self.observations) except Exception as e: diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py index eb783d483..ab6e0c4a3 100644 --- a/src/chat/focus_chat/memory_activator.py +++ b/src/chat/focus_chat/memory_activator.py @@ -117,7 +117,6 @@ class MemoryActivator: # 添加新的关键词到缓存 self.cached_keywords.update(keywords) - # 调用记忆系统获取相关记忆 related_memory = await hippocampus_manager.get_memory_from_topic( diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 6817670f0..89e5dd0c3 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -28,6 +28,7 @@ willing_manager = get_willing_manager() logger = get_logger("normal_chat") + class NormalChat: """ 普通聊天处理类,负责处理非核心对话的聊天逻辑。 @@ -61,7 +62,7 @@ class NormalChat: self.willing_amplifier = 1 self.start_time = time.time() - + self.mood_manager = mood_manager self.start_time = time.time() @@ -77,7 +78,6 @@ class NormalChat: self.recent_replies = [] self.max_replies_history = 20 # 最多保存最近20条回复记录 - # 添加回调函数,用于在满足条件时通知切换到focus_chat模式 self.on_switch_to_focus_callback = on_switch_to_focus_callback @@ -561,16 +561,14 @@ class NormalChat: async def reply_one_message(self, message: MessageRecv) -> None: # 回复前处理 await self.relationship_builder.build_relation() - + thinking_id = await self._create_thinking_message(message) # 如果启用planner,预先修改可用actions(避免在并行任务中重复调用) available_actions = None if self.enable_planner: try: - await self.action_modifier.modify_actions( - mode="normal", message_content=message.processed_plain_text - ) + await self.action_modifier.modify_actions(mode="normal", message_content=message.processed_plain_text) available_actions = self.action_manager.get_using_actions_for_mode("normal") except Exception as e: logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}") @@ -647,7 +645,7 @@ class NormalChat: logger.info(f"[{self.stream_name}] 回复内容中没有文本,不发送消息") await self._cleanup_thinking_message_by_id(thinking_id) return False - + # 发送回复 (不再需要传入 chat) first_bot_msg = await self._add_messages_to_manager(message, reply_texts, thinking_id) @@ -954,6 +952,7 @@ class NormalChat: except Exception as e: logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}") return 1.0 # 出错时返回正常系数 + async def _check_should_switch_to_focus(self) -> bool: """ 检查是否满足切换到focus模式的条件 @@ -1027,4 +1026,4 @@ def get_recent_message_stats(minutes: int = 30, chat_id: str = None) -> dict: bot_filter["user_id"] = bot_id bot_reply_count = count_messages(bot_filter) - return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count} \ No newline at end of file + return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count} diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index c57842ae4..f75ce1235 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -2,7 +2,6 @@ from typing import List, Optional, Any, Dict from src.chat.heart_flow.observation.observation import Observation from src.common.logger import get_logger from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.message_receive.chat_stream import get_chat_manager from src.config.config import global_config from src.llm_models.utils_model import LLMRequest @@ -62,10 +61,10 @@ class ActionModifier: removals_s1 = [] removals_s2 = [] - + self.action_manager.restore_actions() all_actions = self.action_manager.get_using_actions_for_mode(mode) - + message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( chat_id=self.chat_stream.stream_id, timestamp=time.time(), @@ -79,7 +78,7 @@ class ActionModifier: read_mark=0.0, show_actions=True, ) - + if message_content: chat_content = chat_content + "\n" + f"现在,最新的消息是:{message_content}" @@ -104,14 +103,13 @@ class ActionModifier: self.action_manager.remove_action_from_using(action_name) logger.debug(f"{self.log_prefix}阶段一移除动作: {action_name},原因: {reason}") - # === 第二阶段:激活类型判定 === if chat_content is not None: logger.debug(f"{self.log_prefix}开始激活类型判定阶段") # 获取当前使用的动作集(经过第一阶段处理) current_using_actions = self.action_manager.get_using_actions_for_mode(mode) - + # 获取因激活类型判定而需要移除的动作 removals_s2 = await self._get_deactivated_actions_by_type( current_using_actions, @@ -123,7 +121,7 @@ class ActionModifier: for action_name, reason in removals_s2: self.action_manager.remove_action_from_using(action_name) logger.debug(f"{self.log_prefix}阶段二移除动作: {action_name},原因: {reason}") - + # === 统一日志记录 === all_removals = removals_s1 + removals_s2 if all_removals: @@ -141,11 +139,9 @@ class ActionModifier: associated_types_str = ", ".join(data["associated_types"]) reason = f"适配器不支持(需要: {associated_types_str})" type_mismatched_actions.append((action_name, reason)) - logger.debug( - f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}" - ) + logger.debug(f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}") return type_mismatched_actions - + async def _get_deactivated_actions_by_type( self, actions_with_info: Dict[str, Any], @@ -166,7 +162,7 @@ class ActionModifier: # 分类处理不同激活类型的actions llm_judge_actions = {} - + actions_to_check = list(actions_with_info.items()) random.shuffle(actions_to_check) @@ -193,7 +189,7 @@ class ActionModifier: elif activation_type == "llm_judge": llm_judge_actions[action_name] = action_info - + else: logger.warning(f"{self.log_prefix}未知的激活类型: {activation_type},跳过处理") @@ -517,21 +513,23 @@ class ActionModifier: # 如果最近sec_thres_reply_num次都是reply,40%概率移除 removal_probability = 0.4 / global_config.focus_chat.consecutive_replies if random.random() < removal_probability: - reason = f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + reason = ( + f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + ) removals.append(("reply", reason)) elif len(last_max_reply_num) >= one_thres_reply_num and all(last_max_reply_num[-one_thres_reply_num:]): # 如果最近one_thres_reply_num次都是reply,20%概率移除 removal_probability = 0.2 / global_config.focus_chat.consecutive_replies if random.random() < removal_probability: - reason = f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + reason = ( + f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + ) removals.append(("reply", reason)) else: logger.debug(f"{self.log_prefix}连续回复检测:无需移除reply动作,最近回复模式正常") return removals - - def get_available_actions_count(self) -> int: """获取当前可用动作数量(排除默认的no_action)""" current_actions = self.action_manager.get_using_actions_for_mode("normal") @@ -545,4 +543,4 @@ class ActionModifier: if available_count == 0: logger.debug(f"{self.log_prefix} 没有可用动作,跳过规划") return True - return False \ No newline at end of file + return False diff --git a/src/chat/planner_actions/planner_focus.py b/src/chat/planner_actions/planner_focus.py index bb3bdcacd..5a093ce93 100644 --- a/src/chat/planner_actions/planner_focus.py +++ b/src/chat/planner_actions/planner_focus.py @@ -83,9 +83,7 @@ class ActionPlanner: request_type="focus.planner", # 用于动作规划 ) - async def plan( - self, all_plan_info: List[InfoBase],loop_start_time: float - ) -> Dict[str, Any]: + async def plan(self, all_plan_info: List[InfoBase], loop_start_time: float) -> Dict[str, Any]: """ 规划器 (Planner): 使用LLM根据上下文决定做出什么动作。 diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index dd1b4e8a6..c5b9080ef 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -98,7 +98,6 @@ class DefaultReplyer: self.log_prefix = "replyer" self.request_type = request_type - if model_configs: self.express_model_configs = model_configs else: @@ -470,7 +469,13 @@ class DefaultReplyer: duration = end_time - start_time return name, result, duration - async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None, enable_timeout: bool = False, enable_tool: bool = True) -> str: + async def build_prompt_reply_context( + self, + reply_data=None, + available_actions: List[str] = None, + enable_timeout: bool = False, + enable_tool: bool = True, + ) -> str: """ 构建回复器上下文 @@ -537,10 +542,16 @@ class DefaultReplyer: # 并行执行四个构建任务 task_results = await asyncio.gather( - self._time_and_run_task(self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits"), - self._time_and_run_task(self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info"), + self._time_and_run_task( + self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits" + ), + self._time_and_run_task( + self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info" + ), self._time_and_run_task(self.build_memory_block(chat_talking_prompt_half, target), "build_memory_block"), - self._time_and_run_task(self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info"), + self._time_and_run_task( + self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info" + ), ) # 处理结果 diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 1c28ab7c8..290a73f1e 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -273,7 +273,6 @@ class MessageReceiveConfig(ConfigBase): class NormalChatConfig(ConfigBase): """普通聊天配置类""" - willing_mode: str = "classical" """意愿模式""" @@ -290,7 +289,6 @@ class NormalChatConfig(ConfigBase): """是否启用动作规划器""" - @dataclass class FocusChatConfig(ConfigBase): """专注聊天配置类""" @@ -348,7 +346,7 @@ class ToolConfig(ConfigBase): @dataclass class EmojiConfig(ConfigBase): """表情包配置类""" - + emoji_chance: float = 0.6 """发送表情包的基础概率""" diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index e2bde69de..f1c62851a 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -106,7 +106,7 @@ class RelationshipFetcher: await self._extract_single_info(person_id, info_type, person_name) relation_info = self._organize_known_info() - if short_impression and relation_info: + if short_impression and relation_info: relation_info = f"你对{person_name}的印象是:{short_impression}。具体来说:{relation_info}" elif short_impression: relation_info = f"你对{person_name}的印象是:{short_impression}" diff --git a/src/plugin_system/apis/generator_api.py b/src/plugin_system/apis/generator_api.py index ead002064..d4ed0f51b 100644 --- a/src/plugin_system/apis/generator_api.py +++ b/src/plugin_system/apis/generator_api.py @@ -92,9 +92,7 @@ async def generate_reply( """ try: # 获取回复器 - replyer = get_replyer( - chat_stream, chat_id, model_configs=model_configs, request_type=request_type - ) + replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") return False, [] diff --git a/src/tools/tool_executor.py b/src/tools/tool_executor.py index b43dfcff3..b7b0d8f69 100644 --- a/src/tools/tool_executor.py +++ b/src/tools/tool_executor.py @@ -45,7 +45,7 @@ class ToolExecutor: self.chat_id = chat_id self.chat_stream = get_chat_manager().get_stream(self.chat_id) self.log_prefix = f"[{get_chat_manager().get_stream_name(self.chat_id) or self.chat_id}]" - + self.llm_model = LLMRequest( model=global_config.model.tool_use, request_type="tool_executor", From 1de15bcc3138a4a1b8faa67db64ebd80ee68308b Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 18:47:08 +0800 Subject: [PATCH 46/63] =?UTF-8?q?ref=EF=BC=9A=E8=B0=83=E6=95=B4=E6=96=87?= =?UTF-8?q?=E4=BB=B6=E4=BD=8D=E7=BD=AE=E5=92=8C=E5=91=BD=E5=90=8D=EF=BC=8C?= =?UTF-8?q?=E7=BB=93=E6=9E=84=E6=9B=B4=E6=B8=85=E6=99=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_Cycleinfo.py | 135 ------------------ src/chat/focus_chat/heartFC_chat.py | 16 +-- src/chat/focus_chat/hfc_utils.py | 134 +++++++++++++++++ src/chat/focus_chat/info/chat_info.py | 97 ------------- .../info_processors/base_processor.py | 2 +- .../info_processors/chattinginfo_processor.py | 4 +- .../working_memory_processor.py | 6 +- .../observation/actions_observation.py | 0 .../observation/chatting_observation.py | 4 +- .../observation/hfcloop_observation.py | 2 +- .../observation/observation.py | 0 .../observation/working_observation.py | 0 src/chat/heart_flow/chat_state_info.py | 1 - .../heartflow_message_processor.py | 0 src/chat/heart_flow/sub_heartflow.py | 22 +-- src/chat/heart_flow/utils_chat.py | 73 ---------- .../memory_activator.py | 0 src/chat/message_receive/bot.py | 2 +- src/chat/normal_chat/normal_chat.py | 2 +- src/chat/planner_actions/action_modifier.py | 6 +- src/chat/planner_actions/planner_focus.py | 2 +- src/chat/replyer/default_generator.py | 4 +- src/chat/utils/utils.py | 69 +++++++++ 23 files changed, 227 insertions(+), 354 deletions(-) delete mode 100644 src/chat/focus_chat/heartFC_Cycleinfo.py delete mode 100644 src/chat/focus_chat/info/chat_info.py rename src/chat/{heart_flow => focus_chat}/observation/actions_observation.py (100%) rename src/chat/{heart_flow => focus_chat}/observation/chatting_observation.py (98%) rename src/chat/{heart_flow => focus_chat}/observation/hfcloop_observation.py (98%) rename src/chat/{heart_flow => focus_chat}/observation/observation.py (100%) rename src/chat/{heart_flow => focus_chat}/observation/working_observation.py (100%) rename src/chat/{focus_chat => heart_flow}/heartflow_message_processor.py (100%) delete mode 100644 src/chat/heart_flow/utils_chat.py rename src/chat/{focus_chat => memory_system}/memory_activator.py (100%) diff --git a/src/chat/focus_chat/heartFC_Cycleinfo.py b/src/chat/focus_chat/heartFC_Cycleinfo.py deleted file mode 100644 index f9a90780d..000000000 --- a/src/chat/focus_chat/heartFC_Cycleinfo.py +++ /dev/null @@ -1,135 +0,0 @@ -import time -import os -from typing import Optional, Dict, Any -from src.common.logger import get_logger -import json - -logger = get_logger("hfc") # Logger Name Changed - -log_dir = "log/log_cycle_debug/" - - -class CycleDetail: - """循环信息记录类""" - - def __init__(self, cycle_id: int): - self.cycle_id = cycle_id - self.prefix = "" - self.thinking_id = "" - self.start_time = time.time() - self.end_time: Optional[float] = None - self.timers: Dict[str, float] = {} - - # 新字段 - self.loop_observation_info: Dict[str, Any] = {} - self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息 - self.loop_plan_info: Dict[str, Any] = {} - self.loop_action_info: Dict[str, Any] = {} - - def to_dict(self) -> Dict[str, Any]: - """将循环信息转换为字典格式""" - - def convert_to_serializable(obj, depth=0, seen=None): - if seen is None: - seen = set() - - # 防止递归过深 - if depth > 5: # 降低递归深度限制 - return str(obj) - - # 防止循环引用 - obj_id = id(obj) - if obj_id in seen: - return str(obj) - seen.add(obj_id) - - try: - if hasattr(obj, "to_dict"): - # 对于有to_dict方法的对象,直接调用其to_dict方法 - return obj.to_dict() - elif isinstance(obj, dict): - # 对于字典,只保留基本类型和可序列化的值 - return { - k: convert_to_serializable(v, depth + 1, seen) - for k, v in obj.items() - if isinstance(k, (str, int, float, bool)) - } - elif isinstance(obj, (list, tuple)): - # 对于列表和元组,只保留可序列化的元素 - return [ - convert_to_serializable(item, depth + 1, seen) - for item in obj - if not isinstance(item, (dict, list, tuple)) - or isinstance(item, (str, int, float, bool, type(None))) - ] - elif isinstance(obj, (str, int, float, bool, type(None))): - return obj - else: - return str(obj) - finally: - seen.remove(obj_id) - - return { - "cycle_id": self.cycle_id, - "start_time": self.start_time, - "end_time": self.end_time, - "timers": self.timers, - "thinking_id": self.thinking_id, - "loop_observation_info": convert_to_serializable(self.loop_observation_info), - "loop_processor_info": convert_to_serializable(self.loop_processor_info), - "loop_plan_info": convert_to_serializable(self.loop_plan_info), - "loop_action_info": convert_to_serializable(self.loop_action_info), - } - - def complete_cycle(self): - """完成循环,记录结束时间""" - self.end_time = time.time() - - # 处理 prefix,只保留中英文字符和基本标点 - if not self.prefix: - self.prefix = "group" - else: - # 只保留中文、英文字母、数字和基本标点 - allowed_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_") - self.prefix = ( - "".join(char for char in self.prefix if "\u4e00" <= char <= "\u9fff" or char in allowed_chars) - or "group" - ) - - # current_time_minute = time.strftime("%Y%m%d_%H%M", time.localtime()) - - # try: - # self.log_cycle_to_file( - # log_dir + self.prefix + f"/{current_time_minute}_cycle_" + str(self.cycle_id) + ".json" - # ) - # except Exception as e: - # logger.warning(f"写入文件日志,可能是群名称包含非法字符: {e}") - - def log_cycle_to_file(self, file_path: str): - """将循环信息写入文件""" - # 如果目录不存在,则创建目 - dir_name = os.path.dirname(file_path) - # 去除特殊字符,保留字母、数字、下划线、中划线和中文 - dir_name = "".join( - char for char in dir_name if char.isalnum() or char in ["_", "-", "/"] or "\u4e00" <= char <= "\u9fff" - ) - # print("dir_name:", dir_name) - if dir_name and not os.path.exists(dir_name): - os.makedirs(dir_name, exist_ok=True) - # 写入文件 - - file_path = os.path.join(dir_name, os.path.basename(file_path)) - # print("file_path:", file_path) - with open(file_path, "a", encoding="utf-8") as f: - f.write(json.dumps(self.to_dict(), ensure_ascii=False) + "\n") - - def set_thinking_id(self, thinking_id: str): - """设置思考消息ID""" - self.thinking_id = thinking_id - - def set_loop_info(self, loop_info: Dict[str, Any]): - """设置循环信息""" - self.loop_observation_info = loop_info["loop_observation_info"] - self.loop_processor_info = loop_info["loop_processor_info"] - self.loop_plan_info = loop_info["loop_plan_info"] - self.loop_action_info = loop_info["loop_action_info"] diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index a6d12b821..9e10da366 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -9,17 +9,14 @@ from rich.traceback import install from src.chat.utils.prompt_builder import global_prompt_manager from src.common.logger import get_logger from src.chat.utils.timer_calculator import Timer -from src.chat.heart_flow.observation.observation import Observation -from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail +from src.chat.focus_chat.observation.observation import Observation from src.chat.focus_chat.info.info_base import InfoBase from src.chat.focus_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor from src.chat.focus_chat.info_processors.working_memory_processor import WorkingMemoryProcessor -from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation -from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation -from src.chat.heart_flow.observation.actions_observation import ActionObservation - -from src.chat.focus_chat.memory_activator import MemoryActivator +from src.chat.focus_chat.observation.hfcloop_observation import HFCloopObservation +from src.chat.focus_chat.observation.working_observation import WorkingMemoryObservation +from src.chat.focus_chat.observation.chatting_observation import ChattingObservation +from src.chat.focus_chat.observation.actions_observation import ActionObservation from src.chat.focus_chat.info_processors.base_processor import BaseProcessor from src.chat.planner_actions.planner_focus import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier @@ -28,6 +25,7 @@ from src.config.config import global_config from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger from src.chat.focus_chat.hfc_version_manager import get_hfc_version from src.person_info.relationship_builder_manager import relationship_builder_manager +from src.chat.focus_chat.hfc_utils import CycleDetail install(extra_lines=3) @@ -76,8 +74,6 @@ class HeartFChatting: self.chat_stream = get_chat_manager().get_stream(self.stream_id) self.log_prefix = f"[{get_chat_manager().get_stream_name(self.stream_id) or self.stream_id}]" - self.memory_activator = MemoryActivator() - self.relationship_builder = relationship_builder_manager.get_or_create_builder(self.stream_id) # 新增:消息计数器和疲惫阈值 diff --git a/src/chat/focus_chat/hfc_utils.py b/src/chat/focus_chat/hfc_utils.py index faec67eb8..496239851 100644 --- a/src/chat/focus_chat/hfc_utils.py +++ b/src/chat/focus_chat/hfc_utils.py @@ -5,9 +5,143 @@ from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.message import UserInfo from src.common.logger import get_logger import json +import time +import os +from typing import Optional, Dict, Any +from src.common.logger import get_logger +import json logger = get_logger(__name__) +log_dir = "log/log_cycle_debug/" + + +class CycleDetail: + """循环信息记录类""" + + def __init__(self, cycle_id: int): + self.cycle_id = cycle_id + self.prefix = "" + self.thinking_id = "" + self.start_time = time.time() + self.end_time: Optional[float] = None + self.timers: Dict[str, float] = {} + + # 新字段 + self.loop_observation_info: Dict[str, Any] = {} + self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息 + self.loop_plan_info: Dict[str, Any] = {} + self.loop_action_info: Dict[str, Any] = {} + + def to_dict(self) -> Dict[str, Any]: + """将循环信息转换为字典格式""" + + def convert_to_serializable(obj, depth=0, seen=None): + if seen is None: + seen = set() + + # 防止递归过深 + if depth > 5: # 降低递归深度限制 + return str(obj) + + # 防止循环引用 + obj_id = id(obj) + if obj_id in seen: + return str(obj) + seen.add(obj_id) + + try: + if hasattr(obj, "to_dict"): + # 对于有to_dict方法的对象,直接调用其to_dict方法 + return obj.to_dict() + elif isinstance(obj, dict): + # 对于字典,只保留基本类型和可序列化的值 + return { + k: convert_to_serializable(v, depth + 1, seen) + for k, v in obj.items() + if isinstance(k, (str, int, float, bool)) + } + elif isinstance(obj, (list, tuple)): + # 对于列表和元组,只保留可序列化的元素 + return [ + convert_to_serializable(item, depth + 1, seen) + for item in obj + if not isinstance(item, (dict, list, tuple)) + or isinstance(item, (str, int, float, bool, type(None))) + ] + elif isinstance(obj, (str, int, float, bool, type(None))): + return obj + else: + return str(obj) + finally: + seen.remove(obj_id) + + return { + "cycle_id": self.cycle_id, + "start_time": self.start_time, + "end_time": self.end_time, + "timers": self.timers, + "thinking_id": self.thinking_id, + "loop_observation_info": convert_to_serializable(self.loop_observation_info), + "loop_processor_info": convert_to_serializable(self.loop_processor_info), + "loop_plan_info": convert_to_serializable(self.loop_plan_info), + "loop_action_info": convert_to_serializable(self.loop_action_info), + } + + def complete_cycle(self): + """完成循环,记录结束时间""" + self.end_time = time.time() + + # 处理 prefix,只保留中英文字符和基本标点 + if not self.prefix: + self.prefix = "group" + else: + # 只保留中文、英文字母、数字和基本标点 + allowed_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_") + self.prefix = ( + "".join(char for char in self.prefix if "\u4e00" <= char <= "\u9fff" or char in allowed_chars) + or "group" + ) + + # current_time_minute = time.strftime("%Y%m%d_%H%M", time.localtime()) + + # try: + # self.log_cycle_to_file( + # log_dir + self.prefix + f"/{current_time_minute}_cycle_" + str(self.cycle_id) + ".json" + # ) + # except Exception as e: + # logger.warning(f"写入文件日志,可能是群名称包含非法字符: {e}") + + def log_cycle_to_file(self, file_path: str): + """将循环信息写入文件""" + # 如果目录不存在,则创建目 + dir_name = os.path.dirname(file_path) + # 去除特殊字符,保留字母、数字、下划线、中划线和中文 + dir_name = "".join( + char for char in dir_name if char.isalnum() or char in ["_", "-", "/"] or "\u4e00" <= char <= "\u9fff" + ) + # print("dir_name:", dir_name) + if dir_name and not os.path.exists(dir_name): + os.makedirs(dir_name, exist_ok=True) + # 写入文件 + + file_path = os.path.join(dir_name, os.path.basename(file_path)) + # print("file_path:", file_path) + with open(file_path, "a", encoding="utf-8") as f: + f.write(json.dumps(self.to_dict(), ensure_ascii=False) + "\n") + + def set_thinking_id(self, thinking_id: str): + """设置思考消息ID""" + self.thinking_id = thinking_id + + def set_loop_info(self, loop_info: Dict[str, Any]): + """设置循环信息""" + self.loop_observation_info = loop_info["loop_observation_info"] + self.loop_processor_info = loop_info["loop_processor_info"] + self.loop_plan_info = loop_info["loop_plan_info"] + self.loop_action_info = loop_info["loop_action_info"] + + async def create_empty_anchor_message( platform: str, group_info: dict, chat_stream: ChatStream diff --git a/src/chat/focus_chat/info/chat_info.py b/src/chat/focus_chat/info/chat_info.py deleted file mode 100644 index 445529318..000000000 --- a/src/chat/focus_chat/info/chat_info.py +++ /dev/null @@ -1,97 +0,0 @@ -from typing import Dict, Optional -from dataclasses import dataclass -from .info_base import InfoBase - - -@dataclass -class ChatInfo(InfoBase): - """聊天信息类 - - 用于记录和管理聊天相关的信息,包括聊天ID、名称和类型等。 - 继承自 InfoBase 类,使用字典存储具体数据。 - - Attributes: - type (str): 信息类型标识符,固定为 "chat" - - Data Fields: - chat_id (str): 聊天的唯一标识符 - chat_name (str): 聊天的名称 - chat_type (str): 聊天的类型 - """ - - type: str = "chat" - - def set_chat_id(self, chat_id: str) -> None: - """设置聊天ID - - Args: - chat_id (str): 聊天的唯一标识符 - """ - self.data["chat_id"] = chat_id - - def set_chat_name(self, chat_name: str) -> None: - """设置聊天名称 - - Args: - chat_name (str): 聊天的名称 - """ - self.data["chat_name"] = chat_name - - def set_chat_type(self, chat_type: str) -> None: - """设置聊天类型 - - Args: - chat_type (str): 聊天的类型 - """ - self.data["chat_type"] = chat_type - - def get_chat_id(self) -> Optional[str]: - """获取聊天ID - - Returns: - Optional[str]: 聊天的唯一标识符,如果未设置则返回 None - """ - return self.get_info("chat_id") - - def get_chat_name(self) -> Optional[str]: - """获取聊天名称 - - Returns: - Optional[str]: 聊天的名称,如果未设置则返回 None - """ - return self.get_info("chat_name") - - def get_chat_type(self) -> Optional[str]: - """获取聊天类型 - - Returns: - Optional[str]: 聊天的类型,如果未设置则返回 None - """ - return self.get_info("chat_type") - - def get_type(self) -> str: - """获取信息类型 - - Returns: - str: 当前信息对象的类型标识符 - """ - return self.type - - def get_data(self) -> Dict[str, str]: - """获取所有信息数据 - - Returns: - Dict[str, str]: 包含所有信息数据的字典 - """ - return self.data - - def get_info(self, key: str) -> Optional[str]: - """获取特定属性的信息 - - Args: - key: 要获取的属性键名 - - Returns: - Optional[str]: 属性值,如果键不存在则返回 None - """ - return self.data.get(key) diff --git a/src/chat/focus_chat/info_processors/base_processor.py b/src/chat/focus_chat/info_processors/base_processor.py index 3b88eb841..26396580c 100644 --- a/src/chat/focus_chat/info_processors/base_processor.py +++ b/src/chat/focus_chat/info_processors/base_processor.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from typing import List, Any from src.chat.focus_chat.info.info_base import InfoBase -from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.observation.observation import Observation from src.common.logger import get_logger logger = get_logger("base_processor") diff --git a/src/chat/focus_chat/info_processors/chattinginfo_processor.py b/src/chat/focus_chat/info_processors/chattinginfo_processor.py index 6443982e1..a4aea17c4 100644 --- a/src/chat/focus_chat/info_processors/chattinginfo_processor.py +++ b/src/chat/focus_chat/info_processors/chattinginfo_processor.py @@ -1,10 +1,10 @@ from typing import List, Any from src.chat.focus_chat.info.obs_info import ObsInfo -from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.observation.observation import Observation from src.chat.focus_chat.info.info_base import InfoBase from .base_processor import BaseProcessor from src.common.logger import get_logger -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.focus_chat.observation.chatting_observation import ChattingObservation from datetime import datetime from src.llm_models.utils_model import LLMRequest from src.config.config import global_config diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/focus_chat/info_processors/working_memory_processor.py index abe9786d4..ad2c88876 100644 --- a/src/chat/focus_chat/info_processors/working_memory_processor.py +++ b/src/chat/focus_chat/info_processors/working_memory_processor.py @@ -1,5 +1,5 @@ -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation -from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.observation.chatting_observation import ChattingObservation +from src.chat.focus_chat.observation.observation import Observation from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import time @@ -9,7 +9,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.message_receive.chat_stream import get_chat_manager from .base_processor import BaseProcessor from typing import List -from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation +from src.chat.focus_chat.observation.working_observation import WorkingMemoryObservation from src.chat.focus_chat.working_memory.working_memory import WorkingMemory from src.chat.focus_chat.info.info_base import InfoBase from json_repair import repair_json diff --git a/src/chat/heart_flow/observation/actions_observation.py b/src/chat/focus_chat/observation/actions_observation.py similarity index 100% rename from src/chat/heart_flow/observation/actions_observation.py rename to src/chat/focus_chat/observation/actions_observation.py diff --git a/src/chat/heart_flow/observation/chatting_observation.py b/src/chat/focus_chat/observation/chatting_observation.py similarity index 98% rename from src/chat/heart_flow/observation/chatting_observation.py rename to src/chat/focus_chat/observation/chatting_observation.py index 2a4a42856..201e313fa 100644 --- a/src/chat/heart_flow/observation/chatting_observation.py +++ b/src/chat/focus_chat/observation/chatting_observation.py @@ -8,9 +8,9 @@ from src.chat.utils.chat_message_builder import ( get_person_id_list, ) from src.chat.utils.prompt_builder import global_prompt_manager, Prompt -from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.observation.observation import Observation from src.common.logger import get_logger -from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.utils.utils import get_chat_type_and_target_info logger = get_logger("observation") diff --git a/src/chat/heart_flow/observation/hfcloop_observation.py b/src/chat/focus_chat/observation/hfcloop_observation.py similarity index 98% rename from src/chat/heart_flow/observation/hfcloop_observation.py rename to src/chat/focus_chat/observation/hfcloop_observation.py index c2834257b..ad7245f8a 100644 --- a/src/chat/heart_flow/observation/hfcloop_observation.py +++ b/src/chat/focus_chat/observation/hfcloop_observation.py @@ -2,7 +2,7 @@ # 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 from datetime import datetime from src.common.logger import get_logger -from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail +from src.chat.focus_chat.hfc_utils import CycleDetail from typing import List # Import the new utility function diff --git a/src/chat/heart_flow/observation/observation.py b/src/chat/focus_chat/observation/observation.py similarity index 100% rename from src/chat/heart_flow/observation/observation.py rename to src/chat/focus_chat/observation/observation.py diff --git a/src/chat/heart_flow/observation/working_observation.py b/src/chat/focus_chat/observation/working_observation.py similarity index 100% rename from src/chat/heart_flow/observation/working_observation.py rename to src/chat/focus_chat/observation/working_observation.py diff --git a/src/chat/heart_flow/chat_state_info.py b/src/chat/heart_flow/chat_state_info.py index db4c2d5c7..320093533 100644 --- a/src/chat/heart_flow/chat_state_info.py +++ b/src/chat/heart_flow/chat_state_info.py @@ -7,7 +7,6 @@ class ChatState(enum.Enum): NORMAL = "随便水群" FOCUSED = "认真水群" - class ChatStateInfo: def __init__(self): self.chat_status: ChatState = ChatState.NORMAL diff --git a/src/chat/focus_chat/heartflow_message_processor.py b/src/chat/heart_flow/heartflow_message_processor.py similarity index 100% rename from src/chat/focus_chat/heartflow_message_processor.py rename to src/chat/heart_flow/heartflow_message_processor.py diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index 206c00364..6dee805a0 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -1,5 +1,3 @@ -from .observation.observation import Observation -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation import asyncio import time from typing import Optional, List, Dict, Tuple @@ -10,7 +8,7 @@ from src.chat.message_receive.chat_stream import get_chat_manager from src.chat.focus_chat.heartFC_chat import HeartFChatting from src.chat.normal_chat.normal_chat import NormalChat from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo -from .utils_chat import get_chat_type_and_target_info +from src.chat.utils.utils import get_chat_type_and_target_info from src.config.config import global_config from rich.traceback import install @@ -314,24 +312,6 @@ class SubHeartflow: f"{log_prefix} 尝试将状态从 {current_state.value} 变为 {new_state.value},但未成功或未执行更改。" ) - def add_observation(self, observation: Observation): - for existing_obs in self.observations: - if existing_obs.observe_id == observation.observe_id: - return - self.observations.append(observation) - - def remove_observation(self, observation: Observation): - if observation in self.observations: - self.observations.remove(observation) - - def get_all_observations(self) -> list[Observation]: - return self.observations - - def _get_primary_observation(self) -> Optional[ChattingObservation]: - if self.observations and isinstance(self.observations[0], ChattingObservation): - return self.observations[0] - logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation") - return None def get_normal_chat_last_speak_time(self) -> float: if self.normal_chat_instance: diff --git a/src/chat/heart_flow/utils_chat.py b/src/chat/heart_flow/utils_chat.py deleted file mode 100644 index e25ee6b6e..000000000 --- a/src/chat/heart_flow/utils_chat.py +++ /dev/null @@ -1,73 +0,0 @@ -from typing import Optional, Tuple, Dict -from src.common.logger import get_logger -from src.chat.message_receive.chat_stream import get_chat_manager -from src.person_info.person_info import PersonInfoManager, get_person_info_manager - -logger = get_logger("heartflow_utils") - - -def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]: - """ - 获取聊天类型(是否群聊)和私聊对象信息。 - - Args: - chat_id: 聊天流ID - - Returns: - Tuple[bool, Optional[Dict]]: - - bool: 是否为群聊 (True 是群聊, False 是私聊或未知) - - Optional[Dict]: 如果是私聊,包含对方信息的字典;否则为 None。 - 字典包含: platform, user_id, user_nickname, person_id, person_name - """ - is_group_chat = False # Default to private/unknown - chat_target_info = None - - try: - chat_stream = get_chat_manager().get_stream(chat_id) - - if chat_stream: - if chat_stream.group_info: - is_group_chat = True - chat_target_info = None # Explicitly None for group chat - elif chat_stream.user_info: # It's a private chat - is_group_chat = False - user_info = chat_stream.user_info - platform = chat_stream.platform - user_id = user_info.user_id - - # Initialize target_info with basic info - target_info = { - "platform": platform, - "user_id": user_id, - "user_nickname": user_info.user_nickname, - "person_id": None, - "person_name": None, - } - - # Try to fetch person info - try: - # Assume get_person_id is sync (as per original code), keep using to_thread - person_id = PersonInfoManager.get_person_id(platform, user_id) - person_name = None - if person_id: - # get_value is async, so await it directly - person_info_manager = get_person_info_manager() - person_name = person_info_manager.get_value_sync(person_id, "person_name") - - target_info["person_id"] = person_id - target_info["person_name"] = person_name - except Exception as person_e: - logger.warning( - f"获取 person_id 或 person_name 时出错 for {platform}:{user_id} in utils: {person_e}" - ) - - chat_target_info = target_info - else: - logger.warning(f"无法获取 chat_stream for {chat_id} in utils") - # Keep defaults: is_group_chat=False, chat_target_info=None - - except Exception as e: - logger.error(f"获取聊天类型和目标信息时出错 for {chat_id}: {e}", exc_info=True) - # Keep defaults on error - - return is_group_chat, chat_target_info diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/memory_system/memory_activator.py similarity index 100% rename from src/chat/focus_chat/memory_activator.py rename to src/chat/memory_system/memory_activator.py diff --git a/src/chat/message_receive/bot.py b/src/chat/message_receive/bot.py index 6126fc751..0bc5bec58 100644 --- a/src/chat/message_receive/bot.py +++ b/src/chat/message_receive/bot.py @@ -9,7 +9,7 @@ from src.chat.message_receive.message import MessageRecv from src.experimental.only_message_process import MessageProcessor from src.chat.message_receive.storage import MessageStorage from src.experimental.PFC.pfc_manager import PFCManager -from src.chat.focus_chat.heartflow_message_processor import HeartFCMessageReceiver +from src.chat.heart_flow.heartflow_message_processor import HeartFCMessageReceiver from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.config.config import global_config from src.plugin_system.core.component_registry import component_registry # 导入新插件系统 diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 6817670f0..5e6b14f63 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -21,7 +21,7 @@ import traceback from src.chat.planner_actions.planner_normal import NormalChatPlanner from src.chat.planner_actions.action_modifier import ActionModifier -from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.utils.utils import get_chat_type_and_target_info from src.manager.mood_manager import mood_manager willing_manager = get_willing_manager() diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index c57842ae4..426c54657 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -1,8 +1,8 @@ from typing import List, Optional, Any, Dict -from src.chat.heart_flow.observation.observation import Observation +from src.chat.focus_chat.observation.observation import Observation from src.common.logger import get_logger -from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation +from src.chat.focus_chat.observation.hfcloop_observation import HFCloopObservation +from src.chat.focus_chat.observation.chatting_observation import ChattingObservation from src.chat.message_receive.chat_stream import get_chat_manager from src.config.config import global_config from src.llm_models.utils_model import LLMRequest diff --git a/src/chat/planner_actions/planner_focus.py b/src/chat/planner_actions/planner_focus.py index bb3bdcacd..c52b8b486 100644 --- a/src/chat/planner_actions/planner_focus.py +++ b/src/chat/planner_actions/planner_focus.py @@ -11,7 +11,7 @@ from src.common.logger import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.planner_actions.action_manager import ActionManager from json_repair import repair_json -from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.utils.utils import get_chat_type_and_target_info from datetime import datetime logger = get_logger("planner") diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index dd1b4e8a6..62ff926f5 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -10,7 +10,7 @@ from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.utils.timer_calculator import Timer # <--- Import Timer from src.chat.focus_chat.heartFC_sender import HeartFCSender -from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info +from src.chat.utils.utils import get_chat_type_and_target_info from src.chat.message_receive.chat_stream import ChatStream from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp from src.chat.utils.prompt_builder import Prompt, global_prompt_manager @@ -26,7 +26,7 @@ from src.person_info.person_info import get_person_info_manager from datetime import datetime import re from src.chat.knowledge.knowledge_lib import qa_manager -from src.chat.focus_chat.memory_activator import MemoryActivator +from src.chat.memory_system.memory_activator import MemoryActivator from src.tools.tool_executor import ToolExecutor logger = get_logger("replyer") diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index a081ad9a5..d4bb5b17a 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -14,6 +14,9 @@ from src.llm_models.utils_model import LLMRequest from .typo_generator import ChineseTypoGenerator from ...config.config import global_config from ...common.message_repository import find_messages, count_messages +from typing import Optional, Tuple, Dict +from src.chat.message_receive.chat_stream import get_chat_manager +from src.person_info.person_info import PersonInfoManager, get_person_info_manager logger = get_logger("chat_utils") @@ -638,3 +641,69 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal" else: # mode = "lite" or unknown # 只返回时分秒格式,喵~ return time.strftime("%H:%M:%S", time.localtime(timestamp)) + +def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]: + """ + 获取聊天类型(是否群聊)和私聊对象信息。 + + Args: + chat_id: 聊天流ID + + Returns: + Tuple[bool, Optional[Dict]]: + - bool: 是否为群聊 (True 是群聊, False 是私聊或未知) + - Optional[Dict]: 如果是私聊,包含对方信息的字典;否则为 None。 + 字典包含: platform, user_id, user_nickname, person_id, person_name + """ + is_group_chat = False # Default to private/unknown + chat_target_info = None + + try: + chat_stream = get_chat_manager().get_stream(chat_id) + + if chat_stream: + if chat_stream.group_info: + is_group_chat = True + chat_target_info = None # Explicitly None for group chat + elif chat_stream.user_info: # It's a private chat + is_group_chat = False + user_info = chat_stream.user_info + platform = chat_stream.platform + user_id = user_info.user_id + + # Initialize target_info with basic info + target_info = { + "platform": platform, + "user_id": user_id, + "user_nickname": user_info.user_nickname, + "person_id": None, + "person_name": None, + } + + # Try to fetch person info + try: + # Assume get_person_id is sync (as per original code), keep using to_thread + person_id = PersonInfoManager.get_person_id(platform, user_id) + person_name = None + if person_id: + # get_value is async, so await it directly + person_info_manager = get_person_info_manager() + person_name = person_info_manager.get_value_sync(person_id, "person_name") + + target_info["person_id"] = person_id + target_info["person_name"] = person_name + except Exception as person_e: + logger.warning( + f"获取 person_id 或 person_name 时出错 for {platform}:{user_id} in utils: {person_e}" + ) + + chat_target_info = target_info + else: + logger.warning(f"无法获取 chat_stream for {chat_id} in utils") + # Keep defaults: is_group_chat=False, chat_target_info=None + + except Exception as e: + logger.error(f"获取聊天类型和目标信息时出错 for {chat_id}: {e}", exc_info=True) + # Keep defaults on error + + return is_group_chat, chat_target_info \ No newline at end of file From dc24a764137f5108383519600f253e6ea9aaa99d Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 18:47:18 +0800 Subject: [PATCH 47/63] ruff --- src/chat/focus_chat/hfc_utils.py | 5 +---- src/chat/planner_actions/action_modifier.py | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/chat/focus_chat/hfc_utils.py b/src/chat/focus_chat/hfc_utils.py index 496239851..0e7fe6a2c 100644 --- a/src/chat/focus_chat/hfc_utils.py +++ b/src/chat/focus_chat/hfc_utils.py @@ -5,11 +5,8 @@ from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.message import UserInfo from src.common.logger import get_logger import json -import time import os -from typing import Optional, Dict, Any -from src.common.logger import get_logger -import json +from typing import Dict, Any logger = get_logger(__name__) diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index 426c54657..44acabf9c 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -2,7 +2,6 @@ from typing import List, Optional, Any, Dict from src.chat.focus_chat.observation.observation import Observation from src.common.logger import get_logger from src.chat.focus_chat.observation.hfcloop_observation import HFCloopObservation -from src.chat.focus_chat.observation.chatting_observation import ChattingObservation from src.chat.message_receive.chat_stream import get_chat_manager from src.config.config import global_config from src.llm_models.utils_model import LLMRequest From 1365099fd4fb8e80ca8627d129d20cd3e5be02e9 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 20:14:09 +0800 Subject: [PATCH 48/63] =?UTF-8?q?remove=EF=BC=9A=E5=86=97=E4=BD=99?= =?UTF-8?q?=E7=9A=84sbhf=E4=BB=A3=E7=A0=81=E5=92=8Cfocus=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 1 + src/api/apiforgui.py | 34 -- ...loop_observation.py => focus_loop_info.py} | 42 +-- src/chat/focus_chat/heartFC_chat.py | 209 +---------- src/chat/focus_chat/hfc_performance_logger.py | 1 - src/chat/focus_chat/hfc_utils.py | 35 -- src/chat/focus_chat/hfc_version_manager.py | 2 +- src/chat/focus_chat/info/action_info.py | 83 ----- src/chat/focus_chat/info/cycle_info.py | 157 -------- src/chat/focus_chat/info/info_base.py | 69 ---- src/chat/focus_chat/info/obs_info.py | 165 --------- .../focus_chat/info/workingmemory_info.py | 86 ----- .../info_processors/base_processor.py | 51 --- .../info_processors/chattinginfo_processor.py | 142 -------- .../observation/actions_observation.py | 46 --- .../observation/chatting_observation.py | 183 ---------- .../focus_chat/observation/observation.py | 25 -- .../observation/working_observation.py | 34 -- src/chat/heart_flow/background_tasks.py | 173 --------- src/chat/heart_flow/heartflow.py | 106 ++---- .../heart_flow/heartflow_message_processor.py | 21 +- src/chat/heart_flow/sub_heartflow.py | 83 ----- src/chat/heart_flow/subheartflow_manager.py | 337 ------------------ src/chat/memory_system/memory_activator.py | 6 - src/chat/message_receive/__init__.py | 2 +- ...age_sender.py => normal_message_sender.py} | 0 .../uni_message_sender.py} | 0 src/chat/normal_chat/normal_chat.py | 2 +- src/chat/planner_actions/action_modifier.py | 18 +- src/chat/planner_actions/planner_focus.py | 138 +++---- src/chat/replyer/default_generator.py | 2 +- src/chat/utils/statistic.py | 6 +- .../working_memory/memory_item.py | 0 .../working_memory/memory_manager.py | 0 .../working_memory/working_memory.py | 0 .../working_memory_processor.py | 5 +- src/common/logger.py | 1 - src/config/official_configs.py | 8 - src/experimental/PFC/message_sender.py | 2 +- src/main.py | 7 +- src/plugin_system/apis/chat_api.py | 34 -- src/plugin_system/apis/send_api.py | 2 +- src/plugin_system/base/base_action.py | 1 - template/bot_config_template.toml | 23 +- 44 files changed, 132 insertions(+), 2210 deletions(-) rename src/chat/focus_chat/{observation/hfcloop_observation.py => focus_loop_info.py} (67%) delete mode 100644 src/chat/focus_chat/info/action_info.py delete mode 100644 src/chat/focus_chat/info/cycle_info.py delete mode 100644 src/chat/focus_chat/info/info_base.py delete mode 100644 src/chat/focus_chat/info/obs_info.py delete mode 100644 src/chat/focus_chat/info/workingmemory_info.py delete mode 100644 src/chat/focus_chat/info_processors/base_processor.py delete mode 100644 src/chat/focus_chat/info_processors/chattinginfo_processor.py delete mode 100644 src/chat/focus_chat/observation/actions_observation.py delete mode 100644 src/chat/focus_chat/observation/chatting_observation.py delete mode 100644 src/chat/focus_chat/observation/observation.py delete mode 100644 src/chat/focus_chat/observation/working_observation.py delete mode 100644 src/chat/heart_flow/background_tasks.py delete mode 100644 src/chat/heart_flow/subheartflow_manager.py rename src/chat/message_receive/{message_sender.py => normal_message_sender.py} (100%) rename src/chat/{focus_chat/heartFC_sender.py => message_receive/uni_message_sender.py} (100%) rename src/chat/{focus_chat => }/working_memory/memory_item.py (100%) rename src/chat/{focus_chat => }/working_memory/memory_manager.py (100%) rename src/chat/{focus_chat => }/working_memory/working_memory.py (100%) rename src/chat/{focus_chat/info_processors => working_memory}/working_memory_processor.py (98%) diff --git a/changelogs/changelog.md b/changelogs/changelog.md index eab206f1b..f31a46239 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -12,6 +12,7 @@ - normal的插件允许llm激活 - 合并action激活器 - emoji统一可选随机激活或llm激活 +- 移除observation和processor,简化focus的代码逻辑 ## [0.8.1] - 2025-7-5 diff --git a/src/api/apiforgui.py b/src/api/apiforgui.py index e1cffebb6..01685939e 100644 --- a/src/api/apiforgui.py +++ b/src/api/apiforgui.py @@ -1,7 +1,6 @@ from src.chat.heart_flow.heartflow import heartflow from src.chat.heart_flow.sub_heartflow import ChatState from src.common.logger import get_logger -import time logger = get_logger("api") @@ -20,39 +19,6 @@ async def forced_change_subheartflow_status(subheartflow_id: str, status: ChatSt return False -async def get_subheartflow_cycle_info(subheartflow_id: str, history_len: int) -> dict: - """获取子心流的循环信息""" - subheartflow_cycle_info = await heartflow.api_get_subheartflow_cycle_info(subheartflow_id, history_len) - logger.debug(f"子心流 {subheartflow_id} 循环信息: {subheartflow_cycle_info}") - if subheartflow_cycle_info: - return subheartflow_cycle_info - else: - logger.warning(f"子心流 {subheartflow_id} 循环信息未找到") - return None - - -async def get_normal_chat_replies(subheartflow_id: str, limit: int = 10) -> list: - """获取子心流的NormalChat回复记录 - - Args: - subheartflow_id: 子心流ID - limit: 最大返回数量,默认10条 - - Returns: - list: 回复记录列表,如果未找到则返回空列表 - """ - replies = await heartflow.api_get_normal_chat_replies(subheartflow_id, limit) - logger.debug(f"子心流 {subheartflow_id} NormalChat回复记录: 获取到 {len(replies) if replies else 0} 条") - if replies: - # 格式化时间戳为可读时间 - for reply in replies: - if "time" in reply: - reply["formatted_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(reply["time"])) - return replies - else: - logger.warning(f"子心流 {subheartflow_id} NormalChat回复记录未找到") - return [] - async def get_all_states(): """获取所有状态""" diff --git a/src/chat/focus_chat/observation/hfcloop_observation.py b/src/chat/focus_chat/focus_loop_info.py similarity index 67% rename from src/chat/focus_chat/observation/hfcloop_observation.py rename to src/chat/focus_chat/focus_loop_info.py index ad7245f8a..2389f10c9 100644 --- a/src/chat/focus_chat/observation/hfcloop_observation.py +++ b/src/chat/focus_chat/focus_loop_info.py @@ -6,20 +6,16 @@ from src.chat.focus_chat.hfc_utils import CycleDetail from typing import List # Import the new utility function -logger = get_logger("observation") +logger = get_logger("loop_info") # 所有观察的基类 -class HFCloopObservation: +class FocusLoopInfo: def __init__(self, observe_id): - self.observe_info = "" self.observe_id = observe_id self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 self.history_loop: List[CycleDetail] = [] - def get_observe_info(self): - return self.observe_info - def add_loop_info(self, loop_info: CycleDetail): self.history_loop.append(loop_info) @@ -50,11 +46,6 @@ class HFCloopObservation: action_taken_time_str = ( datetime.fromtimestamp(action_taken_time).strftime("%H:%M:%S") if action_taken_time > 0 else "未知时间" ) - # print(action_type) - # print(action_reasoning) - # print(is_taken) - # print(action_taken_time_str) - # print("--------------------------------") if action_reasoning != cycle_last_reason: cycle_last_reason = action_reasoning action_reasoning_str = f"你选择这个action的原因是:{action_reasoning}" @@ -71,9 +62,6 @@ class HFCloopObservation: else: action_detailed_str += f"{action_taken_time_str}时,你选择回复(action:{action_type},内容是:'{response_text}'),但是动作失败了。{action_reasoning_str}\n" elif action_type == "no_reply": - # action_detailed_str += ( - # f"{action_taken_time_str}时,你选择不回复(action:{action_type}),{action_reasoning_str}\n" - # ) pass else: if is_taken: @@ -88,17 +76,6 @@ class HFCloopObservation: else: cycle_info_block = "\n" - # 根据连续文本回复的数量构建提示信息 - if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复 - cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意' - elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复 - cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意' - - # 包装提示块,增加可读性,即使没有连续回复也给个标记 - # if cycle_info_block: - # cycle_info_block = f"\n你最近的回复\n{cycle_info_block}\n" - # else: - # cycle_info_block = "\n" # 获取history_loop中最新添加的 if self.history_loop: @@ -112,17 +89,4 @@ class HFCloopObservation: else: cycle_info_block += f"距离你上一次阅读消息并思考和规划,已经过去了{time_diff}秒\n" else: - cycle_info_block += "你还没看过消息\n" - - self.observe_info = cycle_info_block - - def to_dict(self) -> dict: - """将观察对象转换为可序列化的字典""" - # 只序列化基本信息,避免循环引用 - return { - "observe_info": self.observe_info, - "observe_id": self.observe_id, - "last_observe_time": self.last_observe_time, - # 不序列化history_loop,避免循环引用 - "history_loop_count": len(self.history_loop), - } + cycle_info_block += "你还没看过消息\n" \ No newline at end of file diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 9e10da366..ac95a984b 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -9,15 +9,7 @@ from rich.traceback import install from src.chat.utils.prompt_builder import global_prompt_manager from src.common.logger import get_logger from src.chat.utils.timer_calculator import Timer -from src.chat.focus_chat.observation.observation import Observation -from src.chat.focus_chat.info.info_base import InfoBase -from src.chat.focus_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor -from src.chat.focus_chat.info_processors.working_memory_processor import WorkingMemoryProcessor -from src.chat.focus_chat.observation.hfcloop_observation import HFCloopObservation -from src.chat.focus_chat.observation.working_observation import WorkingMemoryObservation -from src.chat.focus_chat.observation.chatting_observation import ChattingObservation -from src.chat.focus_chat.observation.actions_observation import ActionObservation -from src.chat.focus_chat.info_processors.base_processor import BaseProcessor +from src.chat.focus_chat.focus_loop_info import FocusLoopInfo from src.chat.planner_actions.planner_focus import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.planner_actions.action_manager import ActionManager @@ -32,23 +24,8 @@ install(extra_lines=3) # 注释:原来的动作修改超时常量已移除,因为改为顺序执行 -# 定义观察器映射:键是观察器名称,值是 (观察器类, 初始化参数) -OBSERVATION_CLASSES = { - "ChattingObservation": (ChattingObservation, "chat_id"), - "WorkingMemoryObservation": (WorkingMemoryObservation, "observe_id"), - "HFCloopObservation": (HFCloopObservation, "observe_id"), -} - -# 定义处理器映射:键是处理器名称,值是 (处理器类, 可选的配置键名) -PROCESSOR_CLASSES = { - "ChattingInfoProcessor": (ChattingInfoProcessor, None), - "WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"), -} - logger = get_logger("hfc") # Logger Name Changed - - class HeartFChatting: """ 管理一个连续的Focus Chat循环 @@ -83,25 +60,14 @@ class HeartFChatting: self._message_threshold = max(10, int(30 * global_config.chat.exit_focus_threshold)) self._fatigue_triggered = False # 是否已触发疲惫退出 - # 初始化观察器 - self.observations: List[Observation] = [] - self._register_observations() - - # 根据配置文件和默认规则确定启用的处理器 - self.enabled_processor_names = ["ChattingInfoProcessor"] - if global_config.focus_chat.working_memory_processor: - self.enabled_processor_names.append("WorkingMemoryProcessor") - - self.processors: List[BaseProcessor] = [] - self._register_default_processors() + self.loop_info: FocusLoopInfo = FocusLoopInfo(observe_id=self.stream_id) self.action_manager = ActionManager() self.action_planner = ActionPlanner( - log_prefix=self.log_prefix, action_manager=self.action_manager + chat_id = self.stream_id, + action_manager=self.action_manager ) self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id) - self.action_observation = ActionObservation(observe_id=self.stream_id) - self.action_observation.set_action_manager(self.action_manager) self._processing_lock = asyncio.Lock() @@ -130,66 +96,8 @@ class HeartFChatting: f"{self.log_prefix} HeartFChatting 初始化完成,消息疲惫阈值: {self._message_threshold}条(基于exit_focus_threshold={global_config.chat.exit_focus_threshold}计算,仅在auto模式下生效)" ) - def _register_observations(self): - """注册所有观察器""" - self.observations = [] # 清空已有的 - - for name, (observation_class, param_name) in OBSERVATION_CLASSES.items(): - try: - # 检查是否需要跳过WorkingMemoryObservation - if name == "WorkingMemoryObservation": - # 如果工作记忆处理器被禁用,则跳过WorkingMemoryObservation - if not global_config.focus_chat.working_memory_processor: - logger.debug(f"{self.log_prefix} 工作记忆处理器已禁用,跳过注册观察器 {name}") - continue - - # 根据参数名使用正确的参数 - kwargs = {param_name: self.stream_id} - observation = observation_class(**kwargs) - self.observations.append(observation) - logger.debug(f"{self.log_prefix} 注册观察器 {name}") - except Exception as e: - logger.error(f"{self.log_prefix} 观察器 {name} 构造失败: {e}") - - if self.observations: - logger.info(f"{self.log_prefix} 已注册观察器: {[o.__class__.__name__ for o in self.observations]}") - else: - logger.warning(f"{self.log_prefix} 没有注册任何观察器") - - def _register_default_processors(self): - """根据 self.enabled_processor_names 注册信息处理器""" - self.processors = [] # 清空已有的 - - for name in self.enabled_processor_names: # 'name' is "ChattingInfoProcessor", etc. - processor_info = PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key) - if processor_info: - processor_actual_class = processor_info[0] # 获取实际的类定义 - # 根据处理器类名判断构造参数 - if name == "ChattingInfoProcessor": - self.processors.append(processor_actual_class()) - elif name == "WorkingMemoryProcessor": - self.processors.append(processor_actual_class(subheartflow_id=self.stream_id)) - else: - try: - self.processors.append(processor_actual_class()) # 尝试无参构造 - logger.debug(f"{self.log_prefix} 注册处理器 {name} (尝试无参构造).") - except TypeError: - logger.error( - f"{self.log_prefix} 处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id)但未在注册逻辑中明确处理。" - ) - else: - logger.warning( - f"{self.log_prefix} 在 PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。" - ) - - if self.processors: - logger.info(f"{self.log_prefix} 已注册处理器: {[p.__class__.__name__ for p in self.processors]}") - else: - logger.warning(f"{self.log_prefix} 没有注册任何处理器。这可能是由于配置错误或所有处理器都被禁用了。") - async def start(self): """检查是否需要启动主循环,如果未激活则启动。""" - logger.debug(f"{self.log_prefix} 开始启动 HeartFChatting") # 如果循环已经激活,直接返回 if self._loop_active: @@ -210,8 +118,6 @@ class HeartFChatting: try: # 等待旧任务确实被取消 await asyncio.wait_for(self._loop_task, timeout=5.0) - except (asyncio.CancelledError, asyncio.TimeoutError): - pass # 忽略取消或超时错误 except Exception as e: logger.warning(f"{self.log_prefix} 等待旧任务取消时出错: {e}") self._loop_task = None # 清理旧任务引用 @@ -310,14 +216,11 @@ class HeartFChatting: logger.error(f"{self.log_prefix} 处理上下文时出错: {e}") # 为当前循环设置错误状态,防止后续重复报错 error_loop_info = { - "loop_observation_info": {}, - "loop_processor_info": {}, "loop_plan_info": { "action_result": { "action_type": "error", "action_data": {}, }, - "observed_messages": "", }, "loop_action_info": { "action_taken": False, @@ -335,14 +238,8 @@ class HeartFChatting: self._current_cycle_detail.set_loop_info(loop_info) - # 从observations列表中获取HFCloopObservation - hfcloop_observation = next( - (obs for obs in self.observations if isinstance(obs, HFCloopObservation)), None - ) - if hfcloop_observation: - hfcloop_observation.add_loop_info(self._current_cycle_detail) - else: - logger.warning(f"{self.log_prefix} 未找到HFCloopObservation实例") + + self.loop_info.add_loop_info(self._current_cycle_detail) self._current_cycle_detail.timers = cycle_timers @@ -391,15 +288,12 @@ class HeartFChatting: # 如果_current_cycle_detail存在但未完成,为其设置错误状态 if self._current_cycle_detail and not hasattr(self._current_cycle_detail, "end_time"): error_loop_info = { - "loop_observation_info": {}, - "loop_processor_info": {}, "loop_plan_info": { "action_result": { "action_type": "error", "action_data": {}, "reasoning": f"循环处理失败: {e}", }, - "observed_messages": "", }, "loop_action_info": { "action_taken": False, @@ -445,65 +339,10 @@ class HeartFChatting: if acquired and self._processing_lock.locked(): self._processing_lock.release() - async def _process_processors(self, observations: List[Observation]) -> tuple[List[InfoBase], Dict[str, float]]: - # 记录并行任务开始时间 - parallel_start_time = time.time() - logger.debug(f"{self.log_prefix} 开始信息处理器并行任务") - - processor_tasks = [] - task_to_name_map = {} - - for processor in self.processors: - processor_name = processor.__class__.log_prefix - - async def run_with_timeout(proc=processor): - return await proc.process_info(observations=observations) - - task = asyncio.create_task(run_with_timeout()) - - processor_tasks.append(task) - task_to_name_map[task] = processor_name - logger.debug(f"{self.log_prefix} 启动处理器任务: {processor_name}") - - pending_tasks = set(processor_tasks) - all_plan_info: List[InfoBase] = [] - - while pending_tasks: - done, pending_tasks = await asyncio.wait(pending_tasks, return_when=asyncio.FIRST_COMPLETED) - - for task in done: - processor_name = task_to_name_map[task] - task_completed_time = time.time() - duration_since_parallel_start = task_completed_time - parallel_start_time - - try: - result_list = await task - logger.debug(f"{self.log_prefix} 处理器 {processor_name} 已完成!") - if result_list is not None: - all_plan_info.extend(result_list) - else: - logger.warning(f"{self.log_prefix} 处理器 {processor_name} 返回了 None") - except Exception as e: - logger.error( - f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}", - exc_info=True, - ) - traceback.print_exc() - - - return all_plan_info - async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict: try: loop_start_time = time.time() - with Timer("观察", cycle_timers): - # 执行所有观察器的观察 - for observation in self.observations: - await observation.observe() - - loop_observation_info = { - "observations": self.observations, - } + await self.loop_info.observe() await self.relationship_builder.build_relation() @@ -513,37 +352,18 @@ class HeartFChatting: try: # 调用完整的动作修改流程 await self.action_modifier.modify_actions( - observations=self.observations, + loop_info = self.loop_info, mode="focus", ) - - await self.action_observation.observe() - self.observations.append(self.action_observation) - logger.debug(f"{self.log_prefix} 动作修改完成") except Exception as e: logger.error(f"{self.log_prefix} 动作修改失败: {e}") # 继续执行,不中断流程 - - try: - all_plan_info = await self._process_processors(self.observations) - except Exception as e: - logger.error(f"{self.log_prefix} 信息处理器失败: {e}") - # 设置默认值以继续执行 - all_plan_info = [] - - loop_processor_info = { - "all_plan_info": all_plan_info, - } - - logger.debug(f"{self.log_prefix} 并行阶段完成,准备进入规划器,plan_info数量: {len(all_plan_info)}") - with Timer("规划器", cycle_timers): - plan_result = await self.action_planner.plan(all_plan_info, loop_start_time) + plan_result = await self.action_planner.plan() loop_plan_info = { "action_result": plan_result.get("action_result", {}), - "observed_messages": plan_result.get("observed_messages", ""), } action_type, action_data, reasoning = ( @@ -551,6 +371,8 @@ class HeartFChatting: plan_result.get("action_result", {}).get("action_data", {}), plan_result.get("action_result", {}).get("reasoning", "未提供理由"), ) + + action_data["loop_start_time"] = loop_start_time if action_type == "reply": action_str = "回复" @@ -559,7 +381,7 @@ class HeartFChatting: else: action_str = action_type - logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}'") + logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}',理由是:{reasoning}") # 动作执行计时 with Timer("动作执行", cycle_timers): @@ -575,8 +397,6 @@ class HeartFChatting: } loop_info = { - "loop_observation_info": loop_observation_info, - "loop_processor_info": loop_processor_info, "loop_plan_info": loop_plan_info, "loop_action_info": loop_action_info, } @@ -587,11 +407,8 @@ class HeartFChatting: logger.error(f"{self.log_prefix} FOCUS聊天处理失败: {e}") logger.error(traceback.format_exc()) return { - "loop_observation_info": {}, - "loop_processor_info": {}, "loop_plan_info": { "action_result": {"action_type": "error", "action_data": {}, "reasoning": f"处理失败: {e}"}, - "observed_messages": "", }, "loop_action_info": {"action_taken": False, "reply_text": "", "command": "", "taken_time": time.time()}, } @@ -636,7 +453,7 @@ class HeartFChatting: return False, "", "" if not action_handler: - logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}, 原因: {reasoning}") + logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}") return False, "", "" # 处理动作并获取结果 diff --git a/src/chat/focus_chat/hfc_performance_logger.py b/src/chat/focus_chat/hfc_performance_logger.py index 7ae3ea2de..88b4c66a3 100644 --- a/src/chat/focus_chat/hfc_performance_logger.py +++ b/src/chat/focus_chat/hfc_performance_logger.py @@ -41,7 +41,6 @@ class HFCPerformanceLogger: "action_type": cycle_data.get("action_type", "unknown"), "total_time": cycle_data.get("total_time", 0), "step_times": cycle_data.get("step_times", {}), - "processor_time_costs": cycle_data.get("processor_time_costs", {}), # 前处理器时间 "reasoning": cycle_data.get("reasoning", ""), "success": cycle_data.get("success", False), } diff --git a/src/chat/focus_chat/hfc_utils.py b/src/chat/focus_chat/hfc_utils.py index 0e7fe6a2c..7eeb9a7ab 100644 --- a/src/chat/focus_chat/hfc_utils.py +++ b/src/chat/focus_chat/hfc_utils.py @@ -5,7 +5,6 @@ from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.message import UserInfo from src.common.logger import get_logger import json -import os from typing import Dict, Any logger = get_logger(__name__) @@ -24,9 +23,6 @@ class CycleDetail: self.end_time: Optional[float] = None self.timers: Dict[str, float] = {} - # 新字段 - self.loop_observation_info: Dict[str, Any] = {} - self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息 self.loop_plan_info: Dict[str, Any] = {} self.loop_action_info: Dict[str, Any] = {} @@ -79,8 +75,6 @@ class CycleDetail: "end_time": self.end_time, "timers": self.timers, "thinking_id": self.thinking_id, - "loop_observation_info": convert_to_serializable(self.loop_observation_info), - "loop_processor_info": convert_to_serializable(self.loop_processor_info), "loop_plan_info": convert_to_serializable(self.loop_plan_info), "loop_action_info": convert_to_serializable(self.loop_action_info), } @@ -100,41 +94,12 @@ class CycleDetail: or "group" ) - # current_time_minute = time.strftime("%Y%m%d_%H%M", time.localtime()) - - # try: - # self.log_cycle_to_file( - # log_dir + self.prefix + f"/{current_time_minute}_cycle_" + str(self.cycle_id) + ".json" - # ) - # except Exception as e: - # logger.warning(f"写入文件日志,可能是群名称包含非法字符: {e}") - - def log_cycle_to_file(self, file_path: str): - """将循环信息写入文件""" - # 如果目录不存在,则创建目 - dir_name = os.path.dirname(file_path) - # 去除特殊字符,保留字母、数字、下划线、中划线和中文 - dir_name = "".join( - char for char in dir_name if char.isalnum() or char in ["_", "-", "/"] or "\u4e00" <= char <= "\u9fff" - ) - # print("dir_name:", dir_name) - if dir_name and not os.path.exists(dir_name): - os.makedirs(dir_name, exist_ok=True) - # 写入文件 - - file_path = os.path.join(dir_name, os.path.basename(file_path)) - # print("file_path:", file_path) - with open(file_path, "a", encoding="utf-8") as f: - f.write(json.dumps(self.to_dict(), ensure_ascii=False) + "\n") - def set_thinking_id(self, thinking_id: str): """设置思考消息ID""" self.thinking_id = thinking_id def set_loop_info(self, loop_info: Dict[str, Any]): """设置循环信息""" - self.loop_observation_info = loop_info["loop_observation_info"] - self.loop_processor_info = loop_info["loop_processor_info"] self.loop_plan_info = loop_info["loop_plan_info"] self.loop_action_info = loop_info["loop_action_info"] diff --git a/src/chat/focus_chat/hfc_version_manager.py b/src/chat/focus_chat/hfc_version_manager.py index 91a3f51be..c41dff2a8 100644 --- a/src/chat/focus_chat/hfc_version_manager.py +++ b/src/chat/focus_chat/hfc_version_manager.py @@ -20,7 +20,7 @@ class HFCVersionManager: """HFC版本号管理器""" # 默认版本号 - DEFAULT_VERSION = "v5.0.0" + DEFAULT_VERSION = "v6.0.0" # 当前运行时版本号 _current_version: Optional[str] = None diff --git a/src/chat/focus_chat/info/action_info.py b/src/chat/focus_chat/info/action_info.py deleted file mode 100644 index 8c97029d0..000000000 --- a/src/chat/focus_chat/info/action_info.py +++ /dev/null @@ -1,83 +0,0 @@ -from typing import Dict, Optional, Any, List -from dataclasses import dataclass -from .info_base import InfoBase - - -@dataclass -class ActionInfo(InfoBase): - """动作信息类 - - 用于管理和记录动作的变更信息,包括需要添加或移除的动作。 - 继承自 InfoBase 类,使用字典存储具体数据。 - - Attributes: - type (str): 信息类型标识符,固定为 "action" - - Data Fields: - add_actions (List[str]): 需要添加的动作列表 - remove_actions (List[str]): 需要移除的动作列表 - reason (str): 变更原因说明 - """ - - type: str = "action" - - def get_type(self) -> str: - """获取信息类型""" - return self.type - - def get_data(self) -> Dict[str, Any]: - """获取信息数据""" - return self.data - - def set_action_changes(self, action_changes: Dict[str, List[str]]) -> None: - """设置动作变更信息 - - Args: - action_changes (Dict[str, List[str]]): 包含要增加和删除的动作列表 - { - "add": ["action1", "action2"], - "remove": ["action3"] - } - """ - self.data["add_actions"] = action_changes.get("add", []) - self.data["remove_actions"] = action_changes.get("remove", []) - - def set_reason(self, reason: str) -> None: - """设置变更原因 - - Args: - reason (str): 动作变更的原因说明 - """ - self.data["reason"] = reason - - def get_add_actions(self) -> List[str]: - """获取需要添加的动作列表 - - Returns: - List[str]: 需要添加的动作列表 - """ - return self.data.get("add_actions", []) - - def get_remove_actions(self) -> List[str]: - """获取需要移除的动作列表 - - Returns: - List[str]: 需要移除的动作列表 - """ - return self.data.get("remove_actions", []) - - def get_reason(self) -> Optional[str]: - """获取变更原因 - - Returns: - Optional[str]: 动作变更的原因说明,如果未设置则返回 None - """ - return self.data.get("reason") - - def has_changes(self) -> bool: - """检查是否有动作变更 - - Returns: - bool: 如果有任何动作需要添加或移除则返回True - """ - return bool(self.get_add_actions() or self.get_remove_actions()) diff --git a/src/chat/focus_chat/info/cycle_info.py b/src/chat/focus_chat/info/cycle_info.py deleted file mode 100644 index 3701aa153..000000000 --- a/src/chat/focus_chat/info/cycle_info.py +++ /dev/null @@ -1,157 +0,0 @@ -from typing import Dict, Optional, Any -from dataclasses import dataclass -from .info_base import InfoBase - - -@dataclass -class CycleInfo(InfoBase): - """循环信息类 - - 用于记录和管理心跳循环的相关信息,包括循环ID、时间信息、动作信息等。 - 继承自 InfoBase 类,使用字典存储具体数据。 - - Attributes: - type (str): 信息类型标识符,固定为 "cycle" - - Data Fields: - cycle_id (str): 当前循环的唯一标识符 - start_time (str): 循环开始的时间 - end_time (str): 循环结束的时间 - action (str): 在循环中采取的动作 - action_data (Dict[str, Any]): 动作相关的详细数据 - reason (str): 触发循环的原因 - observe_info (str): 当前的回复信息 - """ - - type: str = "cycle" - - def get_type(self) -> str: - """获取信息类型""" - return self.type - - def get_data(self) -> Dict[str, str]: - """获取信息数据""" - return self.data - - def get_info(self, key: str) -> Optional[str]: - """获取特定属性的信息 - - Args: - key: 要获取的属性键名 - - Returns: - 属性值,如果键不存在则返回 None - """ - return self.data.get(key) - - def set_cycle_id(self, cycle_id: str) -> None: - """设置循环ID - - Args: - cycle_id (str): 循环的唯一标识符 - """ - self.data["cycle_id"] = cycle_id - - def set_start_time(self, start_time: str) -> None: - """设置开始时间 - - Args: - start_time (str): 循环开始的时间,建议使用标准时间格式 - """ - self.data["start_time"] = start_time - - def set_end_time(self, end_time: str) -> None: - """设置结束时间 - - Args: - end_time (str): 循环结束的时间,建议使用标准时间格式 - """ - self.data["end_time"] = end_time - - def set_action(self, action: str) -> None: - """设置采取的动作 - - Args: - action (str): 在循环中执行的动作名称 - """ - self.data["action"] = action - - def set_action_data(self, action_data: Dict[str, Any]) -> None: - """设置动作数据 - - Args: - action_data (Dict[str, Any]): 动作相关的详细数据,将被转换为字符串存储 - """ - self.data["action_data"] = str(action_data) - - def set_reason(self, reason: str) -> None: - """设置原因 - - Args: - reason (str): 触发循环的原因说明 - """ - self.data["reason"] = reason - - def set_observe_info(self, observe_info: str) -> None: - """设置回复信息 - - Args: - observe_info (str): 当前的回复信息 - """ - self.data["observe_info"] = observe_info - - def get_cycle_id(self) -> Optional[str]: - """获取循环ID - - Returns: - Optional[str]: 循环的唯一标识符,如果未设置则返回 None - """ - return self.get_info("cycle_id") - - def get_start_time(self) -> Optional[str]: - """获取开始时间 - - Returns: - Optional[str]: 循环开始的时间,如果未设置则返回 None - """ - return self.get_info("start_time") - - def get_end_time(self) -> Optional[str]: - """获取结束时间 - - Returns: - Optional[str]: 循环结束的时间,如果未设置则返回 None - """ - return self.get_info("end_time") - - def get_action(self) -> Optional[str]: - """获取采取的动作 - - Returns: - Optional[str]: 在循环中执行的动作名称,如果未设置则返回 None - """ - return self.get_info("action") - - def get_action_data(self) -> Optional[str]: - """获取动作数据 - - Returns: - Optional[str]: 动作相关的详细数据(字符串形式),如果未设置则返回 None - """ - return self.get_info("action_data") - - def get_reason(self) -> Optional[str]: - """获取原因 - - Returns: - Optional[str]: 触发循环的原因说明,如果未设置则返回 None - """ - return self.get_info("reason") - - def get_observe_info(self) -> Optional[str]: - """获取回复信息 - - Returns: - Optional[str]: 当前的回复信息,如果未设置则返回 None - """ - return self.get_info("observe_info") diff --git a/src/chat/focus_chat/info/info_base.py b/src/chat/focus_chat/info/info_base.py deleted file mode 100644 index 53ad30230..000000000 --- a/src/chat/focus_chat/info/info_base.py +++ /dev/null @@ -1,69 +0,0 @@ -from typing import Dict, Optional, Any, List -from dataclasses import dataclass, field - - -@dataclass -class InfoBase: - """信息基类 - - 这是一个基础信息类,用于存储和管理各种类型的信息数据。 - 所有具体的信息类都应该继承自这个基类。 - - Attributes: - type (str): 信息类型标识符,默认为 "base" - data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典, - 支持存储字符串、字典、列表等嵌套数据结构 - """ - - type: str = "base" - data: Dict[str, Any] = field(default_factory=dict) - processed_info: str = "" - - def get_type(self) -> str: - """获取信息类型 - - Returns: - str: 当前信息对象的类型标识符 - """ - return self.type - - def get_data(self) -> Dict[str, Any]: - """获取所有信息数据 - - Returns: - Dict[str, Any]: 包含所有信息数据的字典 - """ - return self.data - - def get_info(self, key: str) -> Optional[Any]: - """获取特定属性的信息 - - Args: - key: 要获取的属性键名 - - Returns: - Optional[Any]: 属性值,如果键不存在则返回 None - """ - return self.data.get(key) - - def get_info_list(self, key: str) -> List[Any]: - """获取特定属性的信息列表 - - Args: - key: 要获取的属性键名 - - Returns: - List[Any]: 属性值列表,如果键不存在则返回空列表 - """ - value = self.data.get(key) - if isinstance(value, list): - return value - return [] - - def get_processed_info(self) -> str: - """获取处理后的信息 - - Returns: - str: 处理后的信息字符串 - """ - return self.processed_info diff --git a/src/chat/focus_chat/info/obs_info.py b/src/chat/focus_chat/info/obs_info.py deleted file mode 100644 index 9cc1e1e9b..000000000 --- a/src/chat/focus_chat/info/obs_info.py +++ /dev/null @@ -1,165 +0,0 @@ -from typing import Dict, Optional -from dataclasses import dataclass -from .info_base import InfoBase - - -@dataclass -class ObsInfo(InfoBase): - """OBS信息类 - - 用于记录和管理OBS相关的信息,包括说话消息、截断后的说话消息和聊天类型。 - 继承自 InfoBase 类,使用字典存储具体数据。 - - Attributes: - type (str): 信息类型标识符,固定为 "obs" - - Data Fields: - talking_message (str): 说话消息内容 - talking_message_str_truncate (str): 截断后的说话消息内容 - talking_message_str_short (str): 简短版本的说话消息内容(使用最新一半消息) - talking_message_str_truncate_short (str): 截断简短版本的说话消息内容(使用最新一半消息) - chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) - """ - - type: str = "obs" - - def set_talking_message(self, message: str) -> None: - """设置说话消息 - - Args: - message (str): 说话消息内容 - """ - self.data["talking_message"] = message - - def set_talking_message_str_truncate(self, message: str) -> None: - """设置截断后的说话消息 - - Args: - message (str): 截断后的说话消息内容 - """ - self.data["talking_message_str_truncate"] = message - - def set_talking_message_str_short(self, message: str) -> None: - """设置简短版本的说话消息 - - Args: - message (str): 简短版本的说话消息内容 - """ - self.data["talking_message_str_short"] = message - - def set_talking_message_str_truncate_short(self, message: str) -> None: - """设置截断简短版本的说话消息 - - Args: - message (str): 截断简短版本的说话消息内容 - """ - self.data["talking_message_str_truncate_short"] = message - - def set_previous_chat_info(self, message: str) -> None: - """设置之前聊天信息 - - Args: - message (str): 之前聊天信息内容 - """ - self.data["previous_chat_info"] = message - - def set_chat_type(self, chat_type: str) -> None: - """设置聊天类型 - - Args: - chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) - """ - if chat_type not in ["private", "group", "other"]: - chat_type = "other" - self.data["chat_type"] = chat_type - - def set_chat_target(self, chat_target: str) -> None: - """设置聊天目标 - - Args: - chat_target (str): 聊天目标,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他) - """ - self.data["chat_target"] = chat_target - - def set_chat_id(self, chat_id: str) -> None: - """设置聊天ID - - Args: - chat_id (str): 聊天ID - """ - self.data["chat_id"] = chat_id - - def get_chat_id(self) -> Optional[str]: - """获取聊天ID - - Returns: - Optional[str]: 聊天ID,如果未设置则返回 None - """ - return self.get_info("chat_id") - - def get_talking_message(self) -> Optional[str]: - """获取说话消息 - - Returns: - Optional[str]: 说话消息内容,如果未设置则返回 None - """ - return self.get_info("talking_message") - - def get_talking_message_str_truncate(self) -> Optional[str]: - """获取截断后的说话消息 - - Returns: - Optional[str]: 截断后的说话消息内容,如果未设置则返回 None - """ - return self.get_info("talking_message_str_truncate") - - def get_talking_message_str_short(self) -> Optional[str]: - """获取简短版本的说话消息 - - Returns: - Optional[str]: 简短版本的说话消息内容,如果未设置则返回 None - """ - return self.get_info("talking_message_str_short") - - def get_talking_message_str_truncate_short(self) -> Optional[str]: - """获取截断简短版本的说话消息 - - Returns: - Optional[str]: 截断简短版本的说话消息内容,如果未设置则返回 None - """ - return self.get_info("talking_message_str_truncate_short") - - def get_chat_type(self) -> str: - """获取聊天类型 - - Returns: - str: 聊天类型,默认为 "other" - """ - return self.get_info("chat_type") or "other" - - def get_type(self) -> str: - """获取信息类型 - - Returns: - str: 当前信息对象的类型标识符 - """ - return self.type - - def get_data(self) -> Dict[str, str]: - """获取所有信息数据 - - Returns: - Dict[str, str]: 包含所有信息数据的字典 - """ - return self.data - - def get_info(self, key: str) -> Optional[str]: - """获取特定属性的信息 - - Args: - key: 要获取的属性键名 - - Returns: - Optional[str]: 属性值,如果键不存在则返回 None - """ - return self.data.get(key) diff --git a/src/chat/focus_chat/info/workingmemory_info.py b/src/chat/focus_chat/info/workingmemory_info.py deleted file mode 100644 index 0a3282edf..000000000 --- a/src/chat/focus_chat/info/workingmemory_info.py +++ /dev/null @@ -1,86 +0,0 @@ -from typing import Dict, Optional, List -from dataclasses import dataclass -from .info_base import InfoBase - - -@dataclass -class WorkingMemoryInfo(InfoBase): - type: str = "workingmemory" - - processed_info: str = "" - - def set_talking_message(self, message: str) -> None: - """设置说话消息 - - Args: - message (str): 说话消息内容 - """ - self.data["talking_message"] = message - - def set_working_memory(self, working_memory: List[str]) -> None: - """设置工作记忆列表 - - Args: - working_memory (List[str]): 工作记忆内容列表 - """ - self.data["working_memory"] = working_memory - - def add_working_memory(self, working_memory: str) -> None: - """添加一条工作记忆 - - Args: - working_memory (str): 工作记忆内容,格式为"记忆要点:xxx" - """ - working_memory_list = self.data.get("working_memory", []) - working_memory_list.append(working_memory) - self.data["working_memory"] = working_memory_list - - def get_working_memory(self) -> List[str]: - """获取所有工作记忆 - - Returns: - List[str]: 工作记忆内容列表,每条记忆格式为"记忆要点:xxx" - """ - return self.data.get("working_memory", []) - - def get_type(self) -> str: - """获取信息类型 - - Returns: - str: 当前信息对象的类型标识符 - """ - return self.type - - def get_data(self) -> Dict[str, List[str]]: - """获取所有信息数据 - - Returns: - Dict[str, List[str]]: 包含所有信息数据的字典 - """ - return self.data - - def get_info(self, key: str) -> Optional[List[str]]: - """获取特定属性的信息 - - Args: - key: 要获取的属性键名 - - Returns: - Optional[List[str]]: 属性值,如果键不存在则返回 None - """ - return self.data.get(key) - - def get_processed_info(self) -> str: - """获取处理后的信息 - - Returns: - str: 处理后的信息数据,所有记忆要点按行拼接 - """ - all_memory = self.get_working_memory() - memory_str = "" - for memory in all_memory: - memory_str += f"{memory}\n" - - self.processed_info = memory_str - - return self.processed_info diff --git a/src/chat/focus_chat/info_processors/base_processor.py b/src/chat/focus_chat/info_processors/base_processor.py deleted file mode 100644 index 26396580c..000000000 --- a/src/chat/focus_chat/info_processors/base_processor.py +++ /dev/null @@ -1,51 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Any -from src.chat.focus_chat.info.info_base import InfoBase -from src.chat.focus_chat.observation.observation import Observation -from src.common.logger import get_logger - -logger = get_logger("base_processor") - - -class BaseProcessor(ABC): - """信息处理器基类 - - 所有具体的信息处理器都应该继承这个基类,并实现process_info方法。 - 支持处理InfoBase和Observation类型的输入。 - """ - - log_prefix = "Base信息处理器" - - @abstractmethod - def __init__(self): - """初始化处理器""" - - @abstractmethod - async def process_info( - self, - observations: List[Observation] = None, - **kwargs: Any, - ) -> List[InfoBase]: - """处理信息对象的抽象方法 - - Args: - infos: InfoBase对象列表 - observations: 可选的Observation对象列表 - **kwargs: 其他可选参数 - - Returns: - List[InfoBase]: 处理后的InfoBase实例列表 - """ - pass - - def _create_processed_item(self, info_type: str, info_data: Any) -> dict: - """创建处理后的信息项 - - Args: - info_type: 信息类型 - info_data: 信息数据 - - Returns: - dict: 处理后的信息项 - """ - return {"type": info_type, "id": f"info_{info_type}", "content": info_data, "ttl": 3} diff --git a/src/chat/focus_chat/info_processors/chattinginfo_processor.py b/src/chat/focus_chat/info_processors/chattinginfo_processor.py deleted file mode 100644 index a4aea17c4..000000000 --- a/src/chat/focus_chat/info_processors/chattinginfo_processor.py +++ /dev/null @@ -1,142 +0,0 @@ -from typing import List, Any -from src.chat.focus_chat.info.obs_info import ObsInfo -from src.chat.focus_chat.observation.observation import Observation -from src.chat.focus_chat.info.info_base import InfoBase -from .base_processor import BaseProcessor -from src.common.logger import get_logger -from src.chat.focus_chat.observation.chatting_observation import ChattingObservation -from datetime import datetime -from src.llm_models.utils_model import LLMRequest -from src.config.config import global_config - -logger = get_logger("processor") - - -class ChattingInfoProcessor(BaseProcessor): - """观察处理器 - - 用于处理Observation对象,将其转换为ObsInfo对象。 - """ - - log_prefix = "聊天信息处理" - - def __init__(self): - """初始化观察处理器""" - super().__init__() - # TODO: API-Adapter修改标记 - self.model_summary = LLMRequest( - model=global_config.model.utils_small, - temperature=0.7, - request_type="focus.observation.chat", - ) - - async def process_info( - self, - observations: List[Observation] = None, - **kwargs: Any, - ) -> List[InfoBase]: - """处理Observation对象 - - Args: - infos: InfoBase对象列表 - observations: 可选的Observation对象列表 - **kwargs: 其他可选参数 - - Returns: - List[InfoBase]: 处理后的ObsInfo实例列表 - """ - # print(f"observations: {observations}") - processed_infos = [] - - # 处理Observation对象 - if observations: - for obs in observations: - # print(f"obs: {obs}") - if isinstance(obs, ChattingObservation): - obs_info = ObsInfo() - - # 设置聊天ID - if hasattr(obs, "chat_id"): - obs_info.set_chat_id(obs.chat_id) - - # 设置说话消息 - if hasattr(obs, "talking_message_str"): - # print(f"设置说话消息:obs.talking_message_str: {obs.talking_message_str}") - obs_info.set_talking_message(obs.talking_message_str) - - # 设置截断后的说话消息 - if hasattr(obs, "talking_message_str_truncate"): - # print(f"设置截断后的说话消息:obs.talking_message_str_truncate: {obs.talking_message_str_truncate}") - obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate) - - # 设置简短版本的说话消息 - if hasattr(obs, "talking_message_str_short"): - obs_info.set_talking_message_str_short(obs.talking_message_str_short) - - # 设置截断简短版本的说话消息 - if hasattr(obs, "talking_message_str_truncate_short"): - obs_info.set_talking_message_str_truncate_short(obs.talking_message_str_truncate_short) - - if hasattr(obs, "mid_memory_info"): - # print(f"设置之前聊天信息:obs.mid_memory_info: {obs.mid_memory_info}") - obs_info.set_previous_chat_info(obs.mid_memory_info) - - # 设置聊天类型 - is_group_chat = obs.is_group_chat - if is_group_chat: - chat_type = "group" - else: - chat_type = "private" - if hasattr(obs, "chat_target_info") and obs.chat_target_info: - obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人")) - obs_info.set_chat_type(chat_type) - - # logger.debug(f"聊天信息处理器处理后的信息: {obs_info}") - - processed_infos.append(obs_info) - - return processed_infos - - async def chat_compress(self, obs: ChattingObservation): - log_msg = "" - if obs.compressor_prompt: - summary = "" - try: - summary_result, _ = await self.model_summary.generate_response_async(obs.compressor_prompt) - summary = "没有主题的闲聊" - if summary_result: - summary = summary_result - except Exception as e: - log_msg = f"总结主题失败 for chat {obs.chat_id}: {e}" - logger.error(log_msg) - else: - log_msg = f"chat_compress 完成 for chat {obs.chat_id}, summary: {summary}" - logger.info(log_msg) - - mid_memory = { - "id": str(int(datetime.now().timestamp())), - "theme": summary, - "messages": obs.oldest_messages, # 存储原始消息对象 - "readable_messages": obs.oldest_messages_str, - # "timestamps": oldest_timestamps, - "chat_id": obs.chat_id, - "created_at": datetime.now().timestamp(), - } - - obs.mid_memories.append(mid_memory) - if len(obs.mid_memories) > obs.max_mid_memory_len: - obs.mid_memories.pop(0) # 移除最旧的 - - mid_memory_str = "之前聊天的内容概述是:\n" - for mid_memory_item in obs.mid_memories: # 重命名循环变量以示区分 - time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) - mid_memory_str += ( - f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" - ) - obs.mid_memory_info = mid_memory_str - - obs.compressor_prompt = "" - obs.oldest_messages = [] - obs.oldest_messages_str = "" - - return log_msg diff --git a/src/chat/focus_chat/observation/actions_observation.py b/src/chat/focus_chat/observation/actions_observation.py deleted file mode 100644 index 125032140..000000000 --- a/src/chat/focus_chat/observation/actions_observation.py +++ /dev/null @@ -1,46 +0,0 @@ -# 定义了来自外部世界的信息 -# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 -from datetime import datetime -from src.common.logger import get_logger -from src.chat.planner_actions.action_manager import ActionManager - -logger = get_logger("observation") - - -# 特殊的观察,专门用于观察动作 -# 所有观察的基类 -class ActionObservation: - def __init__(self, observe_id): - self.observe_info = "" - self.observe_id = observe_id - self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 - self.action_manager: ActionManager = None - - self.all_actions = {} - self.all_using_actions = {} - - def get_observe_info(self): - return self.observe_info - - def set_action_manager(self, action_manager: ActionManager): - self.action_manager = action_manager - self.all_actions = self.action_manager.get_registered_actions() - - async def observe(self): - action_info_block = "" - self.all_using_actions = self.action_manager.get_using_actions() - for action_name, action_info in self.all_using_actions.items(): - action_info_block += f"\n{action_name}: {action_info.get('description', '')}" - action_info_block += "\n注意,除了上面动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n" - - self.observe_info = action_info_block - - def to_dict(self) -> dict: - """将观察对象转换为可序列化的字典""" - return { - "observe_info": self.observe_info, - "observe_id": self.observe_id, - "last_observe_time": self.last_observe_time, - "all_actions": self.all_actions, - "all_using_actions": self.all_using_actions, - } diff --git a/src/chat/focus_chat/observation/chatting_observation.py b/src/chat/focus_chat/observation/chatting_observation.py deleted file mode 100644 index 201e313fa..000000000 --- a/src/chat/focus_chat/observation/chatting_observation.py +++ /dev/null @@ -1,183 +0,0 @@ -from datetime import datetime -from src.config.config import global_config -from src.chat.utils.chat_message_builder import ( - get_raw_msg_before_timestamp_with_chat, - build_readable_messages, - get_raw_msg_by_timestamp_with_chat, - num_new_messages_since, - get_person_id_list, -) -from src.chat.utils.prompt_builder import global_prompt_manager, Prompt -from src.chat.focus_chat.observation.observation import Observation -from src.common.logger import get_logger -from src.chat.utils.utils import get_chat_type_and_target_info - -logger = get_logger("observation") - -# 定义提示模板 -Prompt( - """这是{chat_type_description},请总结以下聊天记录的主题: -{chat_logs} -请概括这段聊天记录的主题和主要内容 -主题:简短的概括,包括时间,人物和事件,不要超过20个字 -内容:具体的信息内容,包括人物、事件和信息,不要超过200个字,不要分点。 - -请用json格式返回,格式如下: -{{ - "theme": "主题,例如 2025-06-14 10:00:00 群聊 麦麦 和 网友 讨论了 游戏 的话题", - "content": "内容,可以是对聊天记录的概括,也可以是聊天记录的详细内容" -}} -""", - "chat_summary_prompt", -) - - -class ChattingObservation(Observation): - def __init__(self, chat_id): - super().__init__(chat_id) - self.chat_id = chat_id - self.platform = "qq" - - self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_id) - - self.talking_message = [] - self.talking_message_str = "" - self.talking_message_str_truncate = "" - self.talking_message_str_short = "" - self.talking_message_str_truncate_short = "" - self.name = global_config.bot.nickname - self.nick_name = global_config.bot.alias_names - self.max_now_obs_len = global_config.chat.max_context_size - self.overlap_len = global_config.focus_chat.compressed_length - self.person_list = [] - self.compressor_prompt = "" - self.oldest_messages = [] - self.oldest_messages_str = "" - - self.last_observe_time = datetime.now().timestamp() - initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10) - initial_messages_short = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 5) - self.last_observe_time = initial_messages[-1]["time"] if initial_messages else self.last_observe_time - self.talking_message = initial_messages - self.talking_message_short = initial_messages_short - self.talking_message_str = build_readable_messages(self.talking_message, show_actions=True) - self.talking_message_str_truncate = build_readable_messages( - self.talking_message, show_actions=True, truncate=True - ) - self.talking_message_str_short = build_readable_messages(self.talking_message_short, show_actions=True) - self.talking_message_str_truncate_short = build_readable_messages( - self.talking_message_short, show_actions=True, truncate=True - ) - - def to_dict(self) -> dict: - """将观察对象转换为可序列化的字典""" - return { - "chat_id": self.chat_id, - "platform": self.platform, - "is_group_chat": self.is_group_chat, - "chat_target_info": self.chat_target_info, - "talking_message_str": self.talking_message_str, - "talking_message_str_truncate": self.talking_message_str_truncate, - "talking_message_str_short": self.talking_message_str_short, - "talking_message_str_truncate_short": self.talking_message_str_truncate_short, - "name": self.name, - "nick_name": self.nick_name, - "last_observe_time": self.last_observe_time, - } - - def get_observe_info(self, ids=None): - return self.talking_message_str - - async def observe(self): - # 自上一次观察的新消息 - new_messages_list = get_raw_msg_by_timestamp_with_chat( - chat_id=self.chat_id, - timestamp_start=self.last_observe_time, - timestamp_end=datetime.now().timestamp(), - limit=self.max_now_obs_len, - limit_mode="latest", - ) - - # print(f"new_messages_list: {new_messages_list}") - - last_obs_time_mark = self.last_observe_time - if new_messages_list: - self.last_observe_time = new_messages_list[-1]["time"] - self.talking_message.extend(new_messages_list) - - if len(self.talking_message) > self.max_now_obs_len: - # 计算需要移除的消息数量,保留最新的 max_now_obs_len 条 - messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len - oldest_messages = self.talking_message[:messages_to_remove_count] - self.talking_message = self.talking_message[messages_to_remove_count:] - - # 构建压缩提示 - oldest_messages_str = build_readable_messages( - messages=oldest_messages, timestamp_mode="normal_no_YMD", read_mark=0, show_actions=True - ) - - # 根据聊天类型选择提示模板 - prompt_template_name = "chat_summary_prompt" - if self.is_group_chat: - chat_type_description = "qq群聊的聊天记录" - else: - chat_target_name = "对方" - if self.chat_target_info: - chat_target_name = ( - self.chat_target_info.get("person_name") - or self.chat_target_info.get("user_nickname") - or chat_target_name - ) - chat_type_description = f"你和{chat_target_name}的私聊记录" - - prompt = await global_prompt_manager.format_prompt( - prompt_template_name, - chat_type_description=chat_type_description, - chat_logs=oldest_messages_str, - ) - - self.compressor_prompt = prompt - - # 构建当前消息 - self.talking_message_str = build_readable_messages( - messages=self.talking_message, - timestamp_mode="lite", - read_mark=last_obs_time_mark, - show_actions=True, - ) - self.talking_message_str_truncate = build_readable_messages( - messages=self.talking_message, - timestamp_mode="normal_no_YMD", - read_mark=last_obs_time_mark, - truncate=True, - show_actions=True, - ) - - # 构建简短版本 - 使用最新一半的消息 - half_count = len(self.talking_message) // 2 - recent_messages = self.talking_message[-half_count:] if half_count > 0 else self.talking_message - - self.talking_message_str_short = build_readable_messages( - messages=recent_messages, - timestamp_mode="lite", - read_mark=last_obs_time_mark, - show_actions=True, - ) - self.talking_message_str_truncate_short = build_readable_messages( - messages=recent_messages, - timestamp_mode="normal_no_YMD", - read_mark=last_obs_time_mark, - truncate=True, - show_actions=True, - ) - - self.person_list = await get_person_id_list(self.talking_message) - - # logger.debug( - # f"Chat {self.chat_id} - 现在聊天内容:{self.talking_message_str}" - # ) - - async def has_new_messages_since(self, timestamp: float) -> bool: - """检查指定时间戳之后是否有新消息""" - count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp) - return count > 0 diff --git a/src/chat/focus_chat/observation/observation.py b/src/chat/focus_chat/observation/observation.py deleted file mode 100644 index 272f43d99..000000000 --- a/src/chat/focus_chat/observation/observation.py +++ /dev/null @@ -1,25 +0,0 @@ -# 定义了来自外部世界的信息 -# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 -from datetime import datetime -from src.common.logger import get_logger - -logger = get_logger("observation") - - -# 所有观察的基类 -class Observation: - def __init__(self, observe_id): - self.observe_info = "" - self.observe_id = observe_id - self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间 - - def to_dict(self) -> dict: - """将观察对象转换为可序列化的字典""" - return { - "observe_info": self.observe_info, - "observe_id": self.observe_id, - "last_observe_time": self.last_observe_time, - } - - async def observe(self): - pass diff --git a/src/chat/focus_chat/observation/working_observation.py b/src/chat/focus_chat/observation/working_observation.py deleted file mode 100644 index 6052a120a..000000000 --- a/src/chat/focus_chat/observation/working_observation.py +++ /dev/null @@ -1,34 +0,0 @@ -# 定义了来自外部世界的信息 -# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体 -from datetime import datetime -from src.common.logger import get_logger -from src.chat.focus_chat.working_memory.working_memory import WorkingMemory -from src.chat.focus_chat.working_memory.memory_item import MemoryItem -from typing import List -# Import the new utility function - -logger = get_logger("observation") - - -# 所有观察的基类 -class WorkingMemoryObservation: - def __init__(self, observe_id): - self.observe_info = "" - self.observe_id = observe_id - self.last_observe_time = datetime.now().timestamp() - - self.working_memory = WorkingMemory(chat_id=observe_id) - - self.retrieved_working_memory = [] - - def get_observe_info(self): - return self.working_memory - - def add_retrieved_working_memory(self, retrieved_working_memory: List[MemoryItem]): - self.retrieved_working_memory.append(retrieved_working_memory) - - def get_retrieved_working_memory(self): - return self.retrieved_working_memory - - async def observe(self): - pass diff --git a/src/chat/heart_flow/background_tasks.py b/src/chat/heart_flow/background_tasks.py deleted file mode 100644 index b24dad32b..000000000 --- a/src/chat/heart_flow/background_tasks.py +++ /dev/null @@ -1,173 +0,0 @@ -import asyncio -import traceback -from typing import Optional, Coroutine, Callable, Any, List -from src.common.logger import get_logger -from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager -from src.config.config import global_config - -logger = get_logger("background_tasks") - - -# 新增私聊激活检查间隔 -PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS = 5 # 与兴趣评估类似,设为5秒 - -CLEANUP_INTERVAL_SECONDS = 1200 - - -async def _run_periodic_loop( - task_name: str, interval: int, task_func: Callable[..., Coroutine[Any, Any, None]], **kwargs -): - """周期性任务主循环""" - while True: - start_time = asyncio.get_event_loop().time() - # logger.debug(f"开始执行后台任务: {task_name}") - - try: - await task_func(**kwargs) # 执行实际任务 - except asyncio.CancelledError: - logger.info(f"任务 {task_name} 已取消") - break - except Exception as e: - logger.error(f"任务 {task_name} 执行出错: {e}") - logger.error(traceback.format_exc()) - - # 计算并执行间隔等待 - elapsed = asyncio.get_event_loop().time() - start_time - sleep_time = max(0, interval - elapsed) - # if sleep_time < 0.1: # 任务超时处理, DEBUG 时可能干扰断点 - # logger.warning(f"任务 {task_name} 超时执行 ({elapsed:.2f}s > {interval}s)") - await asyncio.sleep(sleep_time) - - logger.debug(f"任务循环结束: {task_name}") # 调整日志信息 - - -class BackgroundTaskManager: - """管理 Heartflow 的后台周期性任务。""" - - def __init__( - self, - subheartflow_manager: SubHeartflowManager, - ): - self.subheartflow_manager = subheartflow_manager - - # Task references - self._cleanup_task: Optional[asyncio.Task] = None - self._hf_judge_state_update_task: Optional[asyncio.Task] = None - self._private_chat_activation_task: Optional[asyncio.Task] = None # 新增私聊激活任务引用 - self._tasks: List[Optional[asyncio.Task]] = [] # Keep track of all tasks - - async def start_tasks(self): - """启动所有后台任务 - - 功能说明: - - 启动核心后台任务: 状态更新、清理、日志记录、兴趣评估和随机停用 - - 每个任务启动前检查是否已在运行 - - 将任务引用保存到任务列表 - """ - - task_configs = [] - - # 根据 chat_mode 条件添加其他任务 - if not (global_config.chat.chat_mode == "normal"): - task_configs.extend( - [ - ( - self._run_cleanup_cycle, - "info", - f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s", - "_cleanup_task", - ), - # 新增私聊激活任务配置 - ( - # Use lambda to pass the interval to the runner function - lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS), - "debug", - f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s", - "_private_chat_activation_task", - ), - ] - ) - - # 统一启动所有任务 - for task_func, log_level, log_msg, task_attr_name in task_configs: - # 检查任务变量是否存在且未完成 - current_task_var = getattr(self, task_attr_name) - if current_task_var is None or current_task_var.done(): - new_task = asyncio.create_task(task_func()) - setattr(self, task_attr_name, new_task) # 更新任务变量 - if new_task not in self._tasks: # 避免重复添加 - self._tasks.append(new_task) - - # 根据配置记录不同级别的日志 - getattr(logger, log_level)(log_msg) - else: - logger.warning(f"{task_attr_name}任务已在运行") - - async def stop_tasks(self): - """停止所有后台任务。 - - 该方法会: - 1. 遍历所有后台任务并取消未完成的任务 - 2. 等待所有取消操作完成 - 3. 清空任务列表 - """ - logger.info("正在停止所有后台任务...") - cancelled_count = 0 - - # 第一步:取消所有运行中的任务 - for task in self._tasks: - if task and not task.done(): - task.cancel() # 发送取消请求 - cancelled_count += 1 - - # 第二步:处理取消结果 - if cancelled_count > 0: - logger.debug(f"正在等待{cancelled_count}个任务完成取消...") - # 使用gather等待所有取消操作完成,忽略异常 - await asyncio.gather(*[t for t in self._tasks if t and t.cancelled()], return_exceptions=True) - logger.info(f"成功取消{cancelled_count}个后台任务") - else: - logger.info("没有需要取消的后台任务") - - # 第三步:清空任务列表 - self._tasks = [] # 重置任务列表 - - # 状态转换处理 - - async def _perform_cleanup_work(self): - """执行子心流清理任务 - 1. 获取需要清理的不活跃子心流列表 - 2. 逐个停止这些子心流 - 3. 记录清理结果 - """ - # 获取需要清理的子心流列表(包含ID和原因) - flows_to_stop = self.subheartflow_manager.get_inactive_subheartflows() - - if not flows_to_stop: - return # 没有需要清理的子心流直接返回 - - logger.info(f"准备删除 {len(flows_to_stop)} 个不活跃(1h)子心流") - stopped_count = 0 - - # 逐个停止子心流 - for flow_id in flows_to_stop: - success = await self.subheartflow_manager.delete_subflow(flow_id) - if success: - stopped_count += 1 - logger.debug(f"[清理任务] 已停止子心流 {flow_id}") - - # 记录最终清理结果 - logger.info(f"[清理任务] 清理完成, 共停止 {stopped_count}/{len(flows_to_stop)} 个子心流") - - async def _run_cleanup_cycle(self): - await _run_periodic_loop( - task_name="Subflow Cleanup", interval=CLEANUP_INTERVAL_SECONDS, task_func=self._perform_cleanup_work - ) - - # 新增私聊激活任务运行器 - async def _run_private_chat_activation_cycle(self, interval: int): - await _run_periodic_loop( - task_name="Private Chat Activation Check", - interval=interval, - task_func=self.subheartflow_manager.sbhf_absent_private_into_focus, - ) diff --git a/src/chat/heart_flow/heartflow.py b/src/chat/heart_flow/heartflow.py index c8c5d1295..7ab71fc39 100644 --- a/src/chat/heart_flow/heartflow.py +++ b/src/chat/heart_flow/heartflow.py @@ -1,84 +1,56 @@ from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState from src.common.logger import get_logger -from typing import Any, Optional, List -from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager -from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager - +from typing import Any, Optional +from typing import Dict +from src.chat.message_receive.chat_stream import get_chat_manager logger = get_logger("heartflow") class Heartflow: - """主心流协调器,负责初始化并协调各个子系统: - - 状态管理 (MaiState) - - 子心流管理 (SubHeartflow) - - 后台任务 (BackgroundTaskManager) - """ + """主心流协调器,负责初始化并协调聊天""" def __init__(self): - # 子心流管理 (在初始化时传入 current_state) - self.subheartflow_manager: SubHeartflowManager = SubHeartflowManager() - - # 后台任务管理器 (整合所有定时任务) - self.background_task_manager: BackgroundTaskManager = BackgroundTaskManager( - subheartflow_manager=self.subheartflow_manager, - ) + self.subheartflows: Dict[Any, "SubHeartflow"] = {} async def get_or_create_subheartflow(self, subheartflow_id: Any) -> Optional["SubHeartflow"]: - """获取或创建一个新的SubHeartflow实例 - 委托给 SubHeartflowManager""" - # 不再需要传入 self.current_state - return await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id) + """获取或创建一个新的SubHeartflow实例""" + if subheartflow_id in self.subheartflows: + subflow = self.subheartflows.get(subheartflow_id) + if subflow: + return subflow + + try: + new_subflow = SubHeartflow( + subheartflow_id, + ) + + await new_subflow.initialize() + + # 注册子心流 + self.subheartflows[subheartflow_id] = new_subflow + heartflow_name = get_chat_manager().get_stream_name(subheartflow_id) or subheartflow_id + logger.info(f"[{heartflow_name}] 开始接收消息") + + return new_subflow + except Exception as e: + logger.error(f"创建子心流 {subheartflow_id} 失败: {e}", exc_info=True) + return None + async def force_change_subheartflow_status(self, subheartflow_id: str, status: ChatState) -> None: """强制改变子心流的状态""" # 这里的 message 是可选的,可能是一个消息对象,也可能是其他类型的数据 - return await self.subheartflow_manager.force_change_state(subheartflow_id, status) - - async def api_get_all_states(self): - """获取所有状态""" - return await self.interest_logger.api_get_all_states() - - async def api_get_subheartflow_cycle_info(self, subheartflow_id: str, history_len: int) -> Optional[dict]: - """获取子心流的循环信息""" - subheartflow = await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id) - if not subheartflow: - logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的周期信息") - return None - heartfc_instance = subheartflow.heart_fc_instance - if not heartfc_instance: - logger.warning(f"子心流 {subheartflow_id} 没有心流实例,无法获取周期信息") - return None - - return heartfc_instance.get_cycle_history(last_n=history_len) - - async def api_get_normal_chat_replies(self, subheartflow_id: str, limit: int = 10) -> Optional[List[dict]]: - """获取子心流的NormalChat回复记录 - - Args: - subheartflow_id: 子心流ID - limit: 最大返回数量,默认10条 - - Returns: - Optional[List[dict]]: 回复记录列表,如果子心流不存在则返回None - """ - subheartflow = await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id) - if not subheartflow: - logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的NormalChat回复记录") - return None - - return subheartflow.get_normal_chat_recent_replies(limit) - - async def heartflow_start_working(self): - """启动后台任务""" - await self.background_task_manager.start_tasks() - logger.info("[Heartflow] 后台任务已启动") - - # 根本不会用到这个函数吧,那样麦麦直接死了 - async def stop_working(self): - """停止所有任务和子心流""" - logger.info("[Heartflow] 正在停止任务和子心流...") - await self.background_task_manager.stop_tasks() - await self.subheartflow_manager.deactivate_all_subflows() - logger.info("[Heartflow] 所有任务和子心流已停止") + return await self.force_change_state(subheartflow_id, status) + + async def force_change_state(self, subflow_id: Any, target_state: ChatState) -> bool: + """强制改变指定子心流的状态""" + subflow = self.subheartflows.get(subflow_id) + if not subflow: + logger.warning(f"[强制状态转换]尝试转换不存在的子心流{subflow_id} 到 {target_state.value}") + return False + await subflow.change_chat_state(target_state) + logger.info(f"[强制状态转换]子心流 {subflow_id} 已转换到 {target_state.value}") + return True heartflow = Heartflow() diff --git a/src/chat/heart_flow/heartflow_message_processor.py b/src/chat/heart_flow/heartflow_message_processor.py index 56f4a73e2..f68139058 100644 --- a/src/chat/heart_flow/heartflow_message_processor.py +++ b/src/chat/heart_flow/heartflow_message_processor.py @@ -10,29 +10,13 @@ from src.common.logger import get_logger import re import math import traceback -from typing import Optional, Tuple +from typing import Tuple from src.person_info.relationship_manager import get_relationship_manager -# from ..message_receive.message_buffer import message_buffer logger = get_logger("chat") - -async def _handle_error(error: Exception, context: str, message: Optional[MessageRecv] = None) -> None: - """统一的错误处理函数 - - Args: - error: 捕获到的异常 - context: 错误发生的上下文描述 - message: 可选的消息对象,用于记录相关消息内容 - """ - logger.error(f"{context}: {error}") - logger.error(traceback.format_exc()) - if message and hasattr(message, "raw_message"): - logger.error(f"相关消息原始内容: {message.raw_message}") - - async def _process_relationship(message: MessageRecv) -> None: """处理用户关系逻辑 @@ -149,4 +133,5 @@ class HeartFCMessageReceiver: await _process_relationship(message) except Exception as e: - await _handle_error(e, "消息处理失败", message) + logger.error(f"消息处理失败: {e}") + print(traceback.format_exc()) diff --git a/src/chat/heart_flow/sub_heartflow.py b/src/chat/heart_flow/sub_heartflow.py index 6dee805a0..51b663dfe 100644 --- a/src/chat/heart_flow/sub_heartflow.py +++ b/src/chat/heart_flow/sub_heartflow.py @@ -44,10 +44,6 @@ class SubHeartflow: # 兴趣消息集合 self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {} - # 活动状态管理 - self.should_stop = False # 停止标志 - self.task: Optional[asyncio.Task] = None # 后台任务 - # focus模式退出冷却时间管理 self.last_focus_exit_time: float = 0 # 上次退出focus模式的时间 @@ -211,10 +207,6 @@ class SubHeartflow: await asyncio.wait_for(self.heart_fc_instance.start(), timeout=15.0) logger.info(f"{log_prefix} HeartFChatting 循环已启动。") return True - except asyncio.TimeoutError: - logger.error(f"{log_prefix} 启动现有 HeartFChatting 循环超时") - # 超时时清理实例,准备重新创建 - self.heart_fc_instance = None except Exception as e: logger.error(f"{log_prefix} 尝试启动现有 HeartFChatting 循环时出错: {e}") logger.error(traceback.format_exc()) @@ -231,7 +223,6 @@ class SubHeartflow: logger.debug(f"{log_prefix} 创建新的 HeartFChatting 实例") self.heart_fc_instance = HeartFChatting( chat_id=self.subheartflow_id, - # observations=self.observations, on_stop_focus_chat=self._handle_stop_focus_chat_request, ) @@ -241,10 +232,6 @@ class SubHeartflow: logger.debug(f"{log_prefix} 麦麦已成功进入专注聊天模式 (新实例已启动)。") return True - except asyncio.TimeoutError: - logger.error(f"{log_prefix} 创建或启动新 HeartFChatting 实例超时") - self.heart_fc_instance = None # 超时时清理实例 - return False except Exception as e: logger.error(f"{log_prefix} 创建或启动 HeartFChatting 实例时出错: {e}") logger.error(traceback.format_exc()) @@ -255,8 +242,6 @@ class SubHeartflow: logger.error(f"{self.log_prefix} _start_heart_fc_chat 执行时出错: {e}") logger.error(traceback.format_exc()) return False - finally: - logger.debug(f"{self.log_prefix} _start_heart_fc_chat 完成") async def change_chat_state(self, new_state: ChatState) -> None: """ @@ -312,25 +297,6 @@ class SubHeartflow: f"{log_prefix} 尝试将状态从 {current_state.value} 变为 {new_state.value},但未成功或未执行更改。" ) - - def get_normal_chat_last_speak_time(self) -> float: - if self.normal_chat_instance: - return self.normal_chat_instance.last_speak_time - return 0 - - def get_normal_chat_recent_replies(self, limit: int = 10) -> List[dict]: - """获取NormalChat实例的最近回复记录 - - Args: - limit: 最大返回数量,默认10条 - - Returns: - List[dict]: 最近的回复记录列表,如果没有NormalChat实例则返回空列表 - """ - if self.normal_chat_instance: - return self.normal_chat_instance.get_recent_replies(limit) - return [] - def add_message_to_normal_chat_cache(self, message: MessageRecv, interest_value: float, is_mentioned: bool): self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned) # 如果字典长度超过10,删除最旧的消息 @@ -338,55 +304,6 @@ class SubHeartflow: oldest_key = next(iter(self.interest_dict)) self.interest_dict.pop(oldest_key) - def get_normal_chat_action_manager(self): - """获取NormalChat的ActionManager实例 - - Returns: - ActionManager: NormalChat的ActionManager实例,如果不存在则返回None - """ - if self.normal_chat_instance: - return self.normal_chat_instance.get_action_manager() - return None - - async def get_full_state(self) -> dict: - """获取子心流的完整状态,包括兴趣、思维和聊天状态。""" - return { - "interest_state": "interest_state", - "chat_state": self.chat_state.chat_status.value, - "chat_state_changed_time": self.chat_state_changed_time, - } - - async def shutdown(self): - """安全地关闭子心流及其管理的任务""" - if self.should_stop: - logger.info(f"{self.log_prefix} 子心流已在关闭过程中。") - return - - logger.info(f"{self.log_prefix} 开始关闭子心流...") - self.should_stop = True # 标记为停止,让后台任务退出 - - # 使用新的停止方法 - await self._stop_normal_chat() - await self._stop_heart_fc_chat() - - # 取消可能存在的旧后台任务 (self.task) - if self.task and not self.task.done(): - logger.debug(f"{self.log_prefix} 取消子心流主任务 (Shutdown)...") - self.task.cancel() - try: - await asyncio.wait_for(self.task, timeout=1.0) # 给点时间响应取消 - except asyncio.CancelledError: - logger.debug(f"{self.log_prefix} 子心流主任务已取消 (Shutdown)。") - except asyncio.TimeoutError: - logger.warning(f"{self.log_prefix} 等待子心流主任务取消超时 (Shutdown)。") - except Exception as e: - logger.error(f"{self.log_prefix} 等待子心流主任务取消时发生错误 (Shutdown): {e}") - - self.task = None # 清理任务引用 - self.chat_state.chat_status = ChatState.ABSENT # 状态重置为不参与 - - logger.info(f"{self.log_prefix} 子心流关闭完成。") - def is_in_focus_cooldown(self) -> bool: """检查是否在focus模式的冷却期内 diff --git a/src/chat/heart_flow/subheartflow_manager.py b/src/chat/heart_flow/subheartflow_manager.py deleted file mode 100644 index 587234cba..000000000 --- a/src/chat/heart_flow/subheartflow_manager.py +++ /dev/null @@ -1,337 +0,0 @@ -import asyncio -import time -from typing import Dict, Any, Optional, List -from src.common.logger import get_logger -from src.chat.message_receive.chat_stream import get_chat_manager -from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState - - -# 初始化日志记录器 - -logger = get_logger("subheartflow_manager") - -# 子心流管理相关常量 -INACTIVE_THRESHOLD_SECONDS = 3600 # 子心流不活跃超时时间(秒) -NORMAL_CHAT_TIMEOUT_SECONDS = 30 * 60 # 30分钟 - - -async def _try_set_subflow_absent_internal(subflow: "SubHeartflow", log_prefix: str) -> bool: - """ - 尝试将给定的子心流对象状态设置为 ABSENT (内部方法,不处理锁)。 - - Args: - subflow: 子心流对象。 - log_prefix: 用于日志记录的前缀 (例如 "[子心流管理]" 或 "[停用]")。 - - Returns: - bool: 如果状态成功变为 ABSENT 或原本就是 ABSENT,返回 True;否则返回 False。 - """ - flow_id = subflow.subheartflow_id - stream_name = get_chat_manager().get_stream_name(flow_id) or flow_id - - if subflow.chat_state.chat_status != ChatState.ABSENT: - logger.debug(f"{log_prefix} 设置 {stream_name} 状态为 ABSENT") - try: - await subflow.change_chat_state(ChatState.ABSENT) - # 再次检查以确认状态已更改 (change_chat_state 内部应确保) - if subflow.chat_state.chat_status == ChatState.ABSENT: - return True - else: - logger.warning( - f"{log_prefix} 调用 change_chat_state 后,{stream_name} 状态仍为 {subflow.chat_state.chat_status.value}" - ) - return False - except Exception as e: - logger.error(f"{log_prefix} 设置 {stream_name} 状态为 ABSENT 时失败: {e}", exc_info=True) - return False - else: - logger.debug(f"{log_prefix} {stream_name} 已是 ABSENT 状态") - return True # 已经是目标状态,视为成功 - - -class SubHeartflowManager: - """管理所有活跃的 SubHeartflow 实例。""" - - def __init__(self): - self.subheartflows: Dict[Any, "SubHeartflow"] = {} - self._lock = asyncio.Lock() # 用于保护 self.subheartflows 的访问 - - async def force_change_state(self, subflow_id: Any, target_state: ChatState) -> bool: - """强制改变指定子心流的状态""" - async with self._lock: - subflow = self.subheartflows.get(subflow_id) - if not subflow: - logger.warning(f"[强制状态转换]尝试转换不存在的子心流{subflow_id} 到 {target_state.value}") - return False - await subflow.change_chat_state(target_state) - logger.info(f"[强制状态转换]子心流 {subflow_id} 已转换到 {target_state.value}") - return True - - def get_all_subheartflows(self) -> List["SubHeartflow"]: - """获取所有当前管理的 SubHeartflow 实例列表 (快照)。""" - return list(self.subheartflows.values()) - - async def get_or_create_subheartflow(self, subheartflow_id: Any) -> Optional["SubHeartflow"]: - """获取或创建指定ID的子心流实例 - - Args: - subheartflow_id: 子心流唯一标识符 - mai_states 参数已被移除,使用 self.mai_state_info - - Returns: - 成功返回SubHeartflow实例,失败返回None - """ - async with self._lock: - # 检查是否已存在该子心流 - if subheartflow_id in self.subheartflows: - subflow = self.subheartflows[subheartflow_id] - if subflow.should_stop: - logger.warning(f"尝试获取已停止的子心流 {subheartflow_id},正在重新激活") - subflow.should_stop = False # 重置停止标志 - return subflow - - try: - new_subflow = SubHeartflow( - subheartflow_id, - ) - - # 然后再进行异步初始化,此时 SubHeartflow 内部若需启动 HeartFChatting,就能拿到 observation - await new_subflow.initialize() - - # 注册子心流 - self.subheartflows[subheartflow_id] = new_subflow - heartflow_name = get_chat_manager().get_stream_name(subheartflow_id) or subheartflow_id - logger.info(f"[{heartflow_name}] 开始接收消息") - - return new_subflow - except Exception as e: - logger.error(f"创建子心流 {subheartflow_id} 失败: {e}", exc_info=True) - return None - - async def sleep_subheartflow(self, subheartflow_id: Any, reason: str) -> bool: - """停止指定的子心流并将其状态设置为 ABSENT""" - log_prefix = "[子心流管理]" - async with self._lock: # 加锁以安全访问字典 - subheartflow = self.subheartflows.get(subheartflow_id) - - stream_name = get_chat_manager().get_stream_name(subheartflow_id) or subheartflow_id - logger.info(f"{log_prefix} 正在停止 {stream_name}, 原因: {reason}") - - # 调用内部方法处理状态变更 - success = await _try_set_subflow_absent_internal(subheartflow, log_prefix) - - return success - # 锁在此处自动释放 - - def get_inactive_subheartflows(self, max_age_seconds=INACTIVE_THRESHOLD_SECONDS): - """识别并返回需要清理的不活跃(处于ABSENT状态超过一小时)子心流(id, 原因)""" - _current_time = time.time() - flows_to_stop = [] - - for subheartflow_id, subheartflow in list(self.subheartflows.items()): - state = subheartflow.chat_state.chat_status - if state != ChatState.ABSENT: - continue - subheartflow.update_last_chat_state_time() - _absent_last_time = subheartflow.chat_state_last_time - flows_to_stop.append(subheartflow_id) - - return flows_to_stop - - async def deactivate_all_subflows(self): - """将所有子心流的状态更改为 ABSENT (例如主状态变为OFFLINE时调用)""" - log_prefix = "[停用]" - changed_count = 0 - processed_count = 0 - - async with self._lock: # 获取锁以安全迭代 - # 使用 list() 创建一个当前值的快照,防止在迭代时修改字典 - flows_to_update = list(self.subheartflows.values()) - processed_count = len(flows_to_update) - if not flows_to_update: - logger.debug(f"{log_prefix} 无活跃子心流,无需操作") - return - - for subflow in flows_to_update: - # 记录原始状态,以便统计实际改变的数量 - original_state_was_absent = subflow.chat_state.chat_status == ChatState.ABSENT - - success = await _try_set_subflow_absent_internal(subflow, log_prefix) - - # 如果成功设置为 ABSENT 且原始状态不是 ABSENT,则计数 - if success and not original_state_was_absent: - if subflow.chat_state.chat_status == ChatState.ABSENT: - changed_count += 1 - else: - # 这种情况理论上不应发生,如果内部方法返回 True 的话 - stream_name = ( - get_chat_manager().get_stream_name(subflow.subheartflow_id) or subflow.subheartflow_id - ) - logger.warning(f"{log_prefix} 内部方法声称成功但 {stream_name} 状态未变为 ABSENT。") - # 锁在此处自动释放 - - logger.info( - f"{log_prefix} 完成,共处理 {processed_count} 个子心流,成功将 {changed_count} 个非 ABSENT 子心流的状态更改为 ABSENT。" - ) - - # async def sbhf_normal_into_focus(self): - # """评估子心流兴趣度,满足条件则提升到FOCUSED状态(基于start_hfc_probability)""" - # try: - # for sub_hf in list(self.subheartflows.values()): - # flow_id = sub_hf.subheartflow_id - # stream_name = get_chat_manager().get_stream_name(flow_id) or flow_id - - # # 跳过已经是FOCUSED状态的子心流 - # if sub_hf.chat_state.chat_status == ChatState.FOCUSED: - # continue - - # if sub_hf.interest_chatting.start_hfc_probability == 0: - # continue - # else: - # logger.debug( - # f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}" - # ) - - # if random.random() >= sub_hf.interest_chatting.start_hfc_probability: - # continue - - # # 获取最新状态并执行提升 - # current_subflow = self.subheartflows.get(flow_id) - # if not current_subflow: - # continue - - # logger.info( - # f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})" - # ) - - # # 执行状态提升 - # await current_subflow.change_chat_state(ChatState.FOCUSED) - - # except Exception as e: - # logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True) - - async def sbhf_focus_into_normal(self, subflow_id: Any): - """ - 接收来自 HeartFChatting 的请求,将特定子心流的状态转换为 NORMAL。 - 通常在连续多次 "no_reply" 后被调用。 - 对于私聊和群聊,都转换为 NORMAL。 - - Args: - subflow_id: 需要转换状态的子心流 ID。 - """ - async with self._lock: - subflow = self.subheartflows.get(subflow_id) - if not subflow: - logger.warning(f"[状态转换请求] 尝试转换不存在的子心流 {subflow_id} 到 NORMAL") - return - - stream_name = get_chat_manager().get_stream_name(subflow_id) or subflow_id - current_state = subflow.chat_state.chat_status - - if current_state == ChatState.FOCUSED: - target_state = ChatState.NORMAL - log_reason = "转为NORMAL" - - logger.info( - f"[状态转换请求] 接收到请求,将 {stream_name} (当前: {current_state.value}) 尝试转换为 {target_state.value} ({log_reason})" - ) - try: - # 从HFC到CHAT时,清空兴趣字典 - subflow.interest_dict.clear() - await subflow.change_chat_state(target_state) - final_state = subflow.chat_state.chat_status - if final_state == target_state: - logger.debug(f"[状态转换请求] {stream_name} 状态已成功转换为 {final_state.value}") - else: - logger.warning( - f"[状态转换请求] 尝试将 {stream_name} 转换为 {target_state.value} 后,状态实际为 {final_state.value}" - ) - except Exception as e: - logger.error( - f"[状态转换请求] 转换 {stream_name} 到 {target_state.value} 时出错: {e}", exc_info=True - ) - elif current_state == ChatState.ABSENT: - logger.debug(f"[状态转换请求] {stream_name} 处于 ABSENT 状态,尝试转为 NORMAL") - await subflow.change_chat_state(ChatState.NORMAL) - else: - logger.debug(f"[状态转换请求] {stream_name} 当前状态为 {current_state.value},无需转换") - - async def delete_subflow(self, subheartflow_id: Any): - """删除指定的子心流。""" - async with self._lock: - subflow = self.subheartflows.pop(subheartflow_id, None) - if subflow: - logger.info(f"正在删除 SubHeartflow: {subheartflow_id}...") - try: - # 调用 shutdown 方法确保资源释放 - await subflow.shutdown() - logger.info(f"SubHeartflow {subheartflow_id} 已成功删除。") - except Exception as e: - logger.error(f"删除 SubHeartflow {subheartflow_id} 时出错: {e}", exc_info=True) - else: - logger.warning(f"尝试删除不存在的 SubHeartflow: {subheartflow_id}") - - # --- 新增:处理私聊从 ABSENT 直接到 FOCUSED 的逻辑 --- # - async def sbhf_absent_private_into_focus(self): - """检查 ABSENT 状态的私聊子心流是否有新活动,若有则直接转换为 FOCUSED。""" - log_prefix_task = "[私聊激活检查]" - transitioned_count = 0 - checked_count = 0 - - async with self._lock: - # --- 筛选出所有 ABSENT 状态的私聊子心流 --- # - eligible_subflows = [ - hf - for hf in self.subheartflows.values() - if hf.chat_state.chat_status == ChatState.ABSENT and not hf.is_group_chat - ] - checked_count = len(eligible_subflows) - - if not eligible_subflows: - # logger.debug(f"{log_prefix_task} 没有 ABSENT 状态的私聊子心流可以评估。") - return - - # --- 遍历评估每个符合条件的私聊 --- # - for sub_hf in eligible_subflows: - flow_id = sub_hf.subheartflow_id - stream_name = get_chat_manager().get_stream_name(flow_id) or flow_id - log_prefix = f"[{stream_name}]({log_prefix_task})" - - try: - # --- 检查是否有新活动 --- # - observation = sub_hf._get_primary_observation() # 获取主要观察者 - is_active = False - if observation: - # 检查自上次状态变为 ABSENT 后是否有新消息 - # 使用 chat_state_changed_time 可能更精确 - # 加一点点缓冲时间(例如 1 秒)以防时间戳完全相等 - timestamp_to_check = sub_hf.chat_state_changed_time - 1 - has_new = await observation.has_new_messages_since(timestamp_to_check) - if has_new: - is_active = True - logger.debug(f"{log_prefix} 检测到新消息,标记为活跃。") - else: - logger.warning(f"{log_prefix} 无法获取主要观察者来检查活动状态。") - - # --- 如果活跃,则尝试转换 --- # - if is_active: - await sub_hf.change_chat_state(ChatState.FOCUSED) - # 确认转换成功 - if sub_hf.chat_state.chat_status == ChatState.FOCUSED: - transitioned_count += 1 - logger.info(f"{log_prefix} 成功进入 FOCUSED 状态。") - else: - logger.warning( - f"{log_prefix} 尝试进入 FOCUSED 状态失败。当前状态: {sub_hf.chat_state.chat_status.value}" - ) - # else: # 不活跃,无需操作 - # logger.debug(f"{log_prefix} 未检测到新活动,保持 ABSENT。") - - except Exception as e: - logger.error(f"{log_prefix} 检查私聊活动或转换状态时出错: {e}", exc_info=True) - - # --- 循环结束后记录总结日志 --- # - if transitioned_count > 0: - logger.debug( - f"{log_prefix_task} 完成,共检查 {checked_count} 个私聊,{transitioned_count} 个转换为 FOCUSED。" - ) diff --git a/src/chat/memory_system/memory_activator.py b/src/chat/memory_system/memory_activator.py index eb783d483..8640f2a88 100644 --- a/src/chat/memory_system/memory_activator.py +++ b/src/chat/memory_system/memory_activator.py @@ -80,12 +80,6 @@ class MemoryActivator: async def activate_memory_with_chat_history(self, target_message, chat_history_prompt) -> List[Dict]: """ 激活记忆 - - Args: - observations: 现有的进行观察后的 观察列表 - - Returns: - List[Dict]: 激活的记忆列表 """ # 如果记忆系统被禁用,直接返回空列表 if not global_config.memory.enable_memory: diff --git a/src/chat/message_receive/__init__.py b/src/chat/message_receive/__init__.py index a900de6b4..d01bea726 100644 --- a/src/chat/message_receive/__init__.py +++ b/src/chat/message_receive/__init__.py @@ -1,6 +1,6 @@ from src.chat.emoji_system.emoji_manager import get_emoji_manager from src.chat.message_receive.chat_stream import get_chat_manager -from src.chat.message_receive.message_sender import message_manager +from src.chat.message_receive.normal_message_sender import message_manager from src.chat.message_receive.storage import MessageStorage diff --git a/src/chat/message_receive/message_sender.py b/src/chat/message_receive/normal_message_sender.py similarity index 100% rename from src/chat/message_receive/message_sender.py rename to src/chat/message_receive/normal_message_sender.py diff --git a/src/chat/focus_chat/heartFC_sender.py b/src/chat/message_receive/uni_message_sender.py similarity index 100% rename from src/chat/focus_chat/heartFC_sender.py rename to src/chat/message_receive/uni_message_sender.py diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 5e6b14f63..a1f1e1bdf 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -12,7 +12,7 @@ from src.chat.utils.timer_calculator import Timer from src.common.message_repository import count_messages from src.chat.utils.prompt_builder import global_prompt_manager from ..message_receive.message import MessageSending, MessageRecv, MessageThinking, MessageSet -from src.chat.message_receive.message_sender import message_manager +from src.chat.message_receive.normal_message_sender import message_manager from src.chat.normal_chat.willing.willing_manager import get_willing_manager from src.chat.planner_actions.action_manager import ActionManager from src.person_info.relationship_builder_manager import relationship_builder_manager diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index 44acabf9c..439558dd1 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -1,7 +1,6 @@ from typing import List, Optional, Any, Dict -from src.chat.focus_chat.observation.observation import Observation from src.common.logger import get_logger -from src.chat.focus_chat.observation.hfcloop_observation import HFCloopObservation +from src.chat.focus_chat.focus_loop_info import FocusLoopInfo from src.chat.message_receive.chat_stream import get_chat_manager from src.config.config import global_config from src.llm_models.utils_model import LLMRequest @@ -44,8 +43,8 @@ class ActionModifier: async def modify_actions( self, + loop_info = None, mode: str = "focus", - observations: Optional[List[Observation]] = None, message_content: str = "", ): """ @@ -83,13 +82,10 @@ class ActionModifier: chat_content = chat_content + "\n" + f"现在,最新的消息是:{message_content}" # === 第一阶段:传统观察处理 === - if observations: - for obs in observations: - if isinstance(obs, HFCloopObservation): - # 获取适用于FOCUS模式的动作 - removals_from_loop = await self.analyze_loop_actions(obs) - if removals_from_loop: - removals_s1.extend(removals_from_loop) + if loop_info: + removals_from_loop = await self.analyze_loop_actions(loop_info) + if removals_from_loop: + removals_s1.extend(removals_from_loop) # 检查动作的关联类型 chat_context = self.chat_stream.context @@ -466,7 +462,7 @@ class ActionModifier: logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}") return False - async def analyze_loop_actions(self, obs: HFCloopObservation) -> List[tuple[str, str]]: + async def analyze_loop_actions(self, obs: FocusLoopInfo) -> List[tuple[str, str]]: """分析最近的循环内容并决定动作的移除 Returns: diff --git a/src/chat/planner_actions/planner_focus.py b/src/chat/planner_actions/planner_focus.py index c52b8b486..2aef5f429 100644 --- a/src/chat/planner_actions/planner_focus.py +++ b/src/chat/planner_actions/planner_focus.py @@ -1,18 +1,18 @@ import json # <--- 确保导入 json import traceback -from typing import List, Dict, Any, Optional +from typing import Dict, Any, Optional from rich.traceback import install from src.llm_models.utils_model import LLMRequest from src.config.config import global_config -from src.chat.focus_chat.info.info_base import InfoBase -from src.chat.focus_chat.info.obs_info import ObsInfo -from src.chat.focus_chat.info.action_info import ActionInfo from src.common.logger import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.planner_actions.action_manager import ActionManager from json_repair import repair_json from src.chat.utils.utils import get_chat_type_and_target_info from datetime import datetime +from src.chat.message_receive.chat_stream import get_chat_manager +from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat +import time logger = get_logger("planner") @@ -38,23 +38,6 @@ def init_prompt(): "simple_planner_prompt", ) - Prompt( - """ -{time_block} -{indentify_block} -你现在需要根据聊天内容,选择的合适的action来参与聊天。 -{chat_context_description},以下是具体的聊天内容: -{chat_content_block} -{moderation_prompt} -现在请你选择合适的action: - -{action_options_text} - -请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容: -""", - "simple_planner_prompt_private", - ) - Prompt( """ 动作:{action_name} @@ -69,8 +52,10 @@ def init_prompt(): class ActionPlanner: - def __init__(self, log_prefix: str, action_manager: ActionManager): - self.log_prefix = log_prefix + def __init__(self, chat_id: str, action_manager: ActionManager): + self.chat_id = chat_id + self.log_prefix = f"[{get_chat_manager().get_stream_name(chat_id) or chat_id}]" + self.action_manager = action_manager # LLM规划器配置 self.planner_llm = LLMRequest( @@ -82,17 +67,12 @@ class ActionPlanner: model=global_config.model.utils_small, request_type="focus.planner", # 用于动作规划 ) + + self.last_obs_time_mark = 0.0 - async def plan( - self, all_plan_info: List[InfoBase],loop_start_time: float - ) -> Dict[str, Any]: + async def plan(self) -> Dict[str, Any]: """ 规划器 (Planner): 使用LLM根据上下文决定做出什么动作。 - - 参数: - all_plan_info: 所有计划信息 - running_memorys: 回忆信息 - loop_start_time: 循环开始时间 """ action = "no_reply" # 默认动作 @@ -100,42 +80,36 @@ class ActionPlanner: action_data = {} try: - # 获取观察信息 - extra_info: list[str] = [] - - extra_info = [] - observed_messages = [] - observed_messages_str = "" - chat_type = "group" is_group_chat = True - chat_id = None # 添加chat_id变量 + + message_list_before_now = get_raw_msg_before_timestamp_with_chat( + chat_id=self.chat_id, + timestamp=time.time(), + limit=global_config.chat.max_context_size, + ) - for info in all_plan_info: - if isinstance(info, ObsInfo): - observed_messages = info.get_talking_message() - observed_messages_str = info.get_talking_message_str_truncate_short() - chat_type = info.get_chat_type() - is_group_chat = chat_type == "group" - # 从ObsInfo中获取chat_id - chat_id = info.get_chat_id() - else: - extra_info.append(info.get_processed_info()) + chat_context = build_readable_messages( + messages=message_list_before_now, + timestamp_mode="normal_no_YMD", + read_mark=self.last_obs_time_mark, + truncate=True, + show_actions=True, + ) + + self.last_obs_time_mark = time.time() # 获取聊天类型和目标信息 chat_target_info = None - if chat_id: - try: - # 重新获取更准确的聊天信息 - is_group_chat_updated, chat_target_info = get_chat_type_and_target_info(chat_id) - # 如果获取成功,更新is_group_chat - if is_group_chat_updated is not None: - is_group_chat = is_group_chat_updated - logger.debug( - f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}" - ) - except Exception as e: - logger.warning(f"{self.log_prefix}获取聊天目标信息失败: {e}") - chat_target_info = None + + try: + # 重新获取更准确的聊天信息 + is_group_chat, chat_target_info = get_chat_type_and_target_info(self.chat_id) + logger.debug( + f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}" + ) + except Exception as e: + logger.warning(f"{self.log_prefix}获取聊天目标信息失败: {e}") + chat_target_info = None # 获取经过modify_actions处理后的最终可用动作集 # 注意:动作的激活判定现在在主循环的modify_actions中完成 @@ -164,14 +138,13 @@ class ActionPlanner: ) return { "action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning}, - "observed_messages": observed_messages, } # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- prompt = await self.build_planner_prompt( is_group_chat=is_group_chat, # <-- Pass HFC state chat_target_info=chat_target_info, # <-- 传递获取到的聊天目标信息 - observed_messages_str=observed_messages_str, # <-- Pass local variable + observed_messages_str=chat_context, # <-- Pass local variable current_available_actions=current_available_actions, # <-- Pass determined actions ) @@ -228,9 +201,6 @@ class ActionPlanner: if key not in ["action", "reasoning"]: action_data[key] = value - action_data["loop_start_time"] = loop_start_time - - # 对于reply动作不需要额外处理,因为相关字段已经在上面的循环中添加到action_data if extracted_action not in current_available_actions: logger.warning( @@ -265,7 +235,6 @@ class ActionPlanner: plan_result = { "action_result": action_result, - "observed_messages": observed_messages, "action_prompt": prompt, } @@ -276,7 +245,7 @@ class ActionPlanner: is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument observed_messages_str: str, - current_available_actions: Dict[str, ActionInfo], + current_available_actions, ) -> str: """构建 Planner LLM 的提示词 (获取模板并填充数据)""" try: @@ -295,11 +264,9 @@ class ActionPlanner: chat_content_block = "你还未开始聊天" action_options_block = "" - # 根据聊天类型选择不同的动作prompt模板 - action_template_name = "action_prompt_private" if not is_group_chat else "action_prompt" for using_actions_name, using_actions_info in current_available_actions.items(): - using_action_prompt = await global_prompt_manager.get_prompt_async(action_template_name) + if using_actions_info["parameters"]: param_text = "\n" @@ -314,22 +281,13 @@ class ActionPlanner: require_text += f"- {require_item}\n" require_text = require_text.rstrip("\n") - # 根据模板类型决定是否包含description参数 - if action_template_name == "action_prompt_private": - # 私聊模板不包含description参数 - using_action_prompt = using_action_prompt.format( - action_name=using_actions_name, - action_parameters=param_text, - action_require=require_text, - ) - else: - # 群聊模板包含description参数 - using_action_prompt = using_action_prompt.format( - action_name=using_actions_name, - action_description=using_actions_info["description"], - action_parameters=param_text, - action_require=require_text, - ) + using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt") + using_action_prompt = using_action_prompt.format( + action_name=using_actions_name, + action_description=using_actions_info["description"], + action_parameters=param_text, + action_require=require_text, + ) action_options_block += using_action_prompt @@ -347,9 +305,7 @@ class ActionPlanner: bot_core_personality = global_config.personality.personality_core indentify_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}:" - # 根据聊天类型选择不同的prompt模板 - template_name = "simple_planner_prompt_private" if not is_group_chat else "simple_planner_prompt" - planner_prompt_template = await global_prompt_manager.get_prompt_async(template_name) + planner_prompt_template = await global_prompt_manager.get_prompt_async("simple_planner_prompt") prompt = planner_prompt_template.format( time_block=time_block, chat_context_description=chat_context_description, diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 62ff926f5..befa22230 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -9,7 +9,7 @@ from src.common.logger import get_logger from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.utils.timer_calculator import Timer # <--- Import Timer -from src.chat.focus_chat.heartFC_sender import HeartFCSender +from src.chat.message_receive.uni_message_sender import HeartFCSender from src.chat.utils.utils import get_chat_type_and_target_info from src.chat.message_receive.chat_stream import ChatStream from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp diff --git a/src/chat/utils/statistic.py b/src/chat/utils/statistic.py index bb3f53a1a..25d231c01 100644 --- a/src/chat/utils/statistic.py +++ b/src/chat/utils/statistic.py @@ -1243,7 +1243,7 @@ class StatisticOutputTask(AsyncTask): focus_chat_rows = "" if stat_data[FOCUS_AVG_TIMES_BY_CHAT_ACTION]: # 获取前三个阶段(不包括执行动作) - basic_stages = ["观察", "并行调整动作、处理", "规划器"] + basic_stages = ["观察", "规划器"] existing_basic_stages = [] for stage in basic_stages: # 检查是否有任何聊天流在这个阶段有数据 @@ -1352,7 +1352,7 @@ class StatisticOutputTask(AsyncTask): focus_action_stage_rows = "" if stat_data[FOCUS_AVG_TIMES_BY_ACTION]: # 获取所有阶段(按固定顺序) - stage_order = ["观察", "并行调整动作、处理", "规划器", "执行动作"] + stage_order = ["观察", "规划器", "执行动作"] all_stages = [] for stage in stage_order: if any(stage in stage_times for stage_times in stat_data[FOCUS_AVG_TIMES_BY_ACTION].values()): @@ -1618,7 +1618,7 @@ class StatisticOutputTask(AsyncTask): focus_version_stage_rows = "" if stat_data[FOCUS_AVG_TIMES_BY_VERSION]: # 基础三个阶段 - basic_stages = ["观察", "并行调整动作、处理", "规划器"] + basic_stages = ["观察", "规划器"] # 获取所有action类型用于执行时间列 all_action_types_for_exec = set() diff --git a/src/chat/focus_chat/working_memory/memory_item.py b/src/chat/working_memory/memory_item.py similarity index 100% rename from src/chat/focus_chat/working_memory/memory_item.py rename to src/chat/working_memory/memory_item.py diff --git a/src/chat/focus_chat/working_memory/memory_manager.py b/src/chat/working_memory/memory_manager.py similarity index 100% rename from src/chat/focus_chat/working_memory/memory_manager.py rename to src/chat/working_memory/memory_manager.py diff --git a/src/chat/focus_chat/working_memory/working_memory.py b/src/chat/working_memory/working_memory.py similarity index 100% rename from src/chat/focus_chat/working_memory/working_memory.py rename to src/chat/working_memory/working_memory.py diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/working_memory/working_memory_processor.py similarity index 98% rename from src/chat/focus_chat/info_processors/working_memory_processor.py rename to src/chat/working_memory/working_memory_processor.py index ad2c88876..562278462 100644 --- a/src/chat/focus_chat/info_processors/working_memory_processor.py +++ b/src/chat/working_memory/working_memory_processor.py @@ -7,7 +7,6 @@ import traceback from src.common.logger import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.message_receive.chat_stream import get_chat_manager -from .base_processor import BaseProcessor from typing import List from src.chat.focus_chat.observation.working_observation import WorkingMemoryObservation from src.chat.focus_chat.working_memory.working_memory import WorkingMemory @@ -44,12 +43,10 @@ def init_prompt(): Prompt(memory_proces_prompt, "prompt_memory_proces") -class WorkingMemoryProcessor(BaseProcessor): +class WorkingMemoryProcessor: log_prefix = "工作记忆" def __init__(self, subheartflow_id: str): - super().__init__() - self.subheartflow_id = subheartflow_id self.llm_model = LLMRequest( diff --git a/src/common/logger.py b/src/common/logger.py index c0fa7be2d..6be06d241 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -352,7 +352,6 @@ MODULE_COLORS = { "heartflow_utils": "\033[38;5;219m", # 浅粉色 "sub_heartflow": "\033[38;5;207m", # 粉紫色 "subheartflow_manager": "\033[38;5;201m", # 深粉色 - "observation": "\033[38;5;141m", # 紫色 "background_tasks": "\033[38;5;240m", # 灰色 "chat_message": "\033[38;5;45m", # 青色 "chat_stream": "\033[38;5;51m", # 亮青色 diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 1c28ab7c8..e8ecb2885 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -295,20 +295,12 @@ class NormalChatConfig(ConfigBase): class FocusChatConfig(ConfigBase): """专注聊天配置类""" - compressed_length: int = 5 - """心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5""" - - compress_length_limit: int = 5 - """最多压缩份数,超过该数值的压缩上下文会被删除""" - think_interval: float = 1 """思考间隔(秒)""" consecutive_replies: float = 1 """连续回复能力,值越高,麦麦连续回复的概率越高""" - working_memory_processor: bool = False - """是否启用工作记忆处理器""" @dataclass diff --git a/src/experimental/PFC/message_sender.py b/src/experimental/PFC/message_sender.py index 841ebe450..d0816d8b5 100644 --- a/src/experimental/PFC/message_sender.py +++ b/src/experimental/PFC/message_sender.py @@ -5,7 +5,7 @@ from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.message import Message from maim_message import UserInfo, Seg from src.chat.message_receive.message import MessageSending, MessageSet -from src.chat.message_receive.message_sender import message_manager +from src.chat.message_receive.normal_message_sender import message_manager from src.chat.message_receive.storage import MessageStorage from src.config.config import global_config from rich.traceback import install diff --git a/src/main.py b/src/main.py index 768913c4b..fae064773 100644 --- a/src/main.py +++ b/src/main.py @@ -10,8 +10,7 @@ from src.manager.mood_manager import MoodPrintTask, MoodUpdateTask from src.chat.emoji_system.emoji_manager import get_emoji_manager from src.chat.normal_chat.willing.willing_manager import get_willing_manager from src.chat.message_receive.chat_stream import get_chat_manager -from src.chat.heart_flow.heartflow import heartflow -from src.chat.message_receive.message_sender import message_manager +from src.chat.message_receive.normal_message_sender import message_manager from src.chat.message_receive.storage import MessageStorage from src.config.config import global_config from src.chat.message_receive.bot import chat_bot @@ -142,10 +141,6 @@ class MainSystem: await message_manager.start() logger.info("全局消息管理器启动成功") - # 启动心流系统主循环 - asyncio.create_task(heartflow.heartflow_start_working()) - logger.info("心流系统启动成功") - init_time = int(1000 * (time.time() - init_start_time)) logger.info(f"初始化完成,神经元放电{init_time}次") except Exception as e: diff --git a/src/plugin_system/apis/chat_api.py b/src/plugin_system/apis/chat_api.py index 23a5a3be0..b56142a47 100644 --- a/src/plugin_system/apis/chat_api.py +++ b/src/plugin_system/apis/chat_api.py @@ -17,7 +17,6 @@ from src.common.logger import get_logger # 导入依赖 from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager -from src.chat.focus_chat.info.obs_info import ObsInfo logger = get_logger("chat_api") @@ -193,39 +192,6 @@ class ChatManager: logger.error(f"[ChatAPI] 获取聊天流信息失败: {e}") return {} - @staticmethod - def get_recent_messages_from_obs(observations: List[Any], count: int = 5) -> List[Dict[str, Any]]: - """从观察对象获取最近的消息 - - Args: - observations: 观察对象列表 - count: 要获取的消息数量 - - Returns: - List[Dict]: 消息列表,每个消息包含发送者、内容等信息 - """ - messages = [] - - try: - if observations and len(observations) > 0: - obs = observations[0] - if hasattr(obs, "get_talking_message"): - obs: ObsInfo - raw_messages = obs.get_talking_message() - # 转换为简化格式 - for msg in raw_messages[-count:]: - simple_msg = { - "sender": msg.get("sender", "未知"), - "content": msg.get("content", ""), - "timestamp": msg.get("timestamp", 0), - } - messages.append(simple_msg) - logger.debug(f"[ChatAPI] 获取到 {len(messages)} 条最近消息") - except Exception as e: - logger.error(f"[ChatAPI] 获取最近消息失败: {e}") - - return messages - @staticmethod def get_streams_summary() -> Dict[str, int]: """获取聊天流统计摘要 diff --git a/src/plugin_system/apis/send_api.py b/src/plugin_system/apis/send_api.py index c0486e164..7a6bd1be1 100644 --- a/src/plugin_system/apis/send_api.py +++ b/src/plugin_system/apis/send_api.py @@ -28,7 +28,7 @@ from src.common.logger import get_logger # 导入依赖 from src.chat.message_receive.chat_stream import get_chat_manager -from src.chat.focus_chat.heartFC_sender import HeartFCSender +from src.chat.message_receive.uni_message_sender import HeartFCSender from src.chat.message_receive.message import MessageSending, MessageRecv from src.chat.utils.chat_message_builder import get_raw_msg_before_timestamp_with_chat from src.person_info.person_info import get_person_info_manager diff --git a/src/plugin_system/base/base_action.py b/src/plugin_system/base/base_action.py index a68091b96..cc5cbc261 100644 --- a/src/plugin_system/base/base_action.py +++ b/src/plugin_system/base/base_action.py @@ -44,7 +44,6 @@ class BaseAction(ABC): reasoning: 执行该动作的理由 cycle_timers: 计时器字典 thinking_id: 思考ID - observations: 观察列表 expressor: 表达器对象 replyer: 回复器对象 chat_stream: 聊天流对象 diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 478d62ed8..e269cdddf 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "3.4.0" +version = "3.5.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -61,12 +61,15 @@ enable_relationship = true # 是否启用关系系统 relation_frequency = 1 # 关系频率,麦麦构建关系的速度,仅在normal_chat模式下有效 [chat] #麦麦的聊天通用设置 -chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,自动auto:在普通模式和专注模式之间自动切换 -# chat_mode = "focus" -# chat_mode = "auto" +chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,在普通模式和专注模式之间自动切换 +auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天 +exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天 +# 普通模式下,麦麦会针对感兴趣的消息进行回复,token消耗量较低 +# 专注模式下,麦麦会进行主动的观察,并给出回复,token消耗量略高,但是回复时机更准确 +# 自动模式下,麦麦会根据消息内容自动切换到专注模式或普通模式 max_context_size = 18 # 上下文长度 - +thinking_timeout = 20 # 麦麦一次回复最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢) replyer_random_probability = 0.5 # 首要replyer模型被选择的概率 talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁 @@ -96,13 +99,6 @@ talk_frequency_adjust = [ # - 时间支持跨天,例如 "00:10,0.3" 表示从凌晨0:10开始使用频率0.3 # - 系统会自动将 "platform:id:type" 转换为内部的哈希chat_id进行匹配 -auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天 -exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天 -# 普通模式下,麦麦会针对感兴趣的消息进行回复,token消耗量较低 -# 专注模式下,麦麦会进行主动的观察和回复,并给出回复,token消耗量较高 -# 自动模式下,麦麦会根据消息内容自动切换到专注模式或普通模式 - -thinking_timeout = 30 # 麦麦一次回复最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢) [message_receive] # 以下是消息过滤,可以根据规则过滤特定消息,将不会读取这些消息 @@ -127,9 +123,6 @@ enable_planner = true # 是否启用动作规划器(与focus_chat共享actions [focus_chat] #专注聊天 think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗 consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高 -compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5 -compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 -working_memory_processor = false # 是否启用工作记忆处理器,消耗量大 [tool] enable_in_normal_chat = false # 是否在普通聊天中启用工具 From 18778d2dc7296e6e3e2ea1a96b6c6fdbad4ab7b6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 12:17:38 +0000 Subject: [PATCH 49/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/api/apiforgui.py | 1 - src/chat/focus_chat/focus_loop_info.py | 3 +- src/chat/focus_chat/heartFC_chat.py | 14 +++----- src/chat/focus_chat/hfc_utils.py | 1 - src/chat/heart_flow/chat_state_info.py | 1 + src/chat/heart_flow/heartflow.py | 4 +-- .../heart_flow/heartflow_message_processor.py | 1 + src/chat/planner_actions/action_modifier.py | 35 +++++++++---------- src/chat/planner_actions/planner_focus.py | 15 +++----- src/chat/utils/utils.py | 3 +- src/config/official_configs.py | 1 - 11 files changed, 34 insertions(+), 45 deletions(-) diff --git a/src/api/apiforgui.py b/src/api/apiforgui.py index 01685939e..058c6fc96 100644 --- a/src/api/apiforgui.py +++ b/src/api/apiforgui.py @@ -19,7 +19,6 @@ async def forced_change_subheartflow_status(subheartflow_id: str, status: ChatSt return False - async def get_all_states(): """获取所有状态""" all_states = await heartflow.api_get_all_states() diff --git a/src/chat/focus_chat/focus_loop_info.py b/src/chat/focus_chat/focus_loop_info.py index 2389f10c9..342368df7 100644 --- a/src/chat/focus_chat/focus_loop_info.py +++ b/src/chat/focus_chat/focus_loop_info.py @@ -76,7 +76,6 @@ class FocusLoopInfo: else: cycle_info_block = "\n" - # 获取history_loop中最新添加的 if self.history_loop: last_loop = self.history_loop[0] @@ -89,4 +88,4 @@ class FocusLoopInfo: else: cycle_info_block += f"距离你上一次阅读消息并思考和规划,已经过去了{time_diff}秒\n" else: - cycle_info_block += "你还没看过消息\n" \ No newline at end of file + cycle_info_block += "你还没看过消息\n" diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index ac95a984b..e0d679e0f 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -26,6 +26,7 @@ install(extra_lines=3) logger = get_logger("hfc") # Logger Name Changed + class HeartFChatting: """ 管理一个连续的Focus Chat循环 @@ -63,10 +64,7 @@ class HeartFChatting: self.loop_info: FocusLoopInfo = FocusLoopInfo(observe_id=self.stream_id) self.action_manager = ActionManager() - self.action_planner = ActionPlanner( - chat_id = self.stream_id, - action_manager=self.action_manager - ) + self.action_planner = ActionPlanner(chat_id=self.stream_id, action_manager=self.action_manager) self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id) self._processing_lock = asyncio.Lock() @@ -238,7 +236,6 @@ class HeartFChatting: self._current_cycle_detail.set_loop_info(loop_info) - self.loop_info.add_loop_info(self._current_cycle_detail) self._current_cycle_detail.timers = cycle_timers @@ -253,7 +250,6 @@ class HeartFChatting: formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒" timer_strings.append(f"{name}: {formatted_time}") - logger.info( f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考," f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, " @@ -275,7 +271,7 @@ class HeartFChatting: self.performance_logger.record_cycle(cycle_performance_data) except Exception as perf_e: logger.warning(f"{self.log_prefix} 记录性能数据失败: {perf_e}") - + await asyncio.sleep(global_config.focus_chat.think_interval) except asyncio.CancelledError: @@ -352,7 +348,7 @@ class HeartFChatting: try: # 调用完整的动作修改流程 await self.action_modifier.modify_actions( - loop_info = self.loop_info, + loop_info=self.loop_info, mode="focus", ) except Exception as e: @@ -371,7 +367,7 @@ class HeartFChatting: plan_result.get("action_result", {}).get("action_data", {}), plan_result.get("action_result", {}).get("reasoning", "未提供理由"), ) - + action_data["loop_start_time"] = loop_start_time if action_type == "reply": diff --git a/src/chat/focus_chat/hfc_utils.py b/src/chat/focus_chat/hfc_utils.py index 7eeb9a7ab..11b04c801 100644 --- a/src/chat/focus_chat/hfc_utils.py +++ b/src/chat/focus_chat/hfc_utils.py @@ -104,7 +104,6 @@ class CycleDetail: self.loop_action_info = loop_info["loop_action_info"] - async def create_empty_anchor_message( platform: str, group_info: dict, chat_stream: ChatStream ) -> Optional[MessageRecv]: diff --git a/src/chat/heart_flow/chat_state_info.py b/src/chat/heart_flow/chat_state_info.py index 320093533..db4c2d5c7 100644 --- a/src/chat/heart_flow/chat_state_info.py +++ b/src/chat/heart_flow/chat_state_info.py @@ -7,6 +7,7 @@ class ChatState(enum.Enum): NORMAL = "随便水群" FOCUSED = "认真水群" + class ChatStateInfo: def __init__(self): self.chat_status: ChatState = ChatState.NORMAL diff --git a/src/chat/heart_flow/heartflow.py b/src/chat/heart_flow/heartflow.py index 7ab71fc39..f0e01e838 100644 --- a/src/chat/heart_flow/heartflow.py +++ b/src/chat/heart_flow/heartflow.py @@ -3,6 +3,7 @@ from src.common.logger import get_logger from typing import Any, Optional from typing import Dict from src.chat.message_receive.chat_stream import get_chat_manager + logger = get_logger("heartflow") @@ -36,12 +37,11 @@ class Heartflow: logger.error(f"创建子心流 {subheartflow_id} 失败: {e}", exc_info=True) return None - async def force_change_subheartflow_status(self, subheartflow_id: str, status: ChatState) -> None: """强制改变子心流的状态""" # 这里的 message 是可选的,可能是一个消息对象,也可能是其他类型的数据 return await self.force_change_state(subheartflow_id, status) - + async def force_change_state(self, subflow_id: Any, target_state: ChatState) -> bool: """强制改变指定子心流的状态""" subflow = self.subheartflows.get(subflow_id) diff --git a/src/chat/heart_flow/heartflow_message_processor.py b/src/chat/heart_flow/heartflow_message_processor.py index f68139058..66ddf362e 100644 --- a/src/chat/heart_flow/heartflow_message_processor.py +++ b/src/chat/heart_flow/heartflow_message_processor.py @@ -17,6 +17,7 @@ from src.person_info.relationship_manager import get_relationship_manager logger = get_logger("chat") + async def _process_relationship(message: MessageRecv) -> None: """处理用户关系逻辑 diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index 439558dd1..a2e0066cf 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -43,7 +43,7 @@ class ActionModifier: async def modify_actions( self, - loop_info = None, + loop_info=None, mode: str = "focus", message_content: str = "", ): @@ -60,10 +60,10 @@ class ActionModifier: removals_s1 = [] removals_s2 = [] - + self.action_manager.restore_actions() all_actions = self.action_manager.get_using_actions_for_mode(mode) - + message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( chat_id=self.chat_stream.stream_id, timestamp=time.time(), @@ -77,7 +77,7 @@ class ActionModifier: read_mark=0.0, show_actions=True, ) - + if message_content: chat_content = chat_content + "\n" + f"现在,最新的消息是:{message_content}" @@ -99,14 +99,13 @@ class ActionModifier: self.action_manager.remove_action_from_using(action_name) logger.debug(f"{self.log_prefix}阶段一移除动作: {action_name},原因: {reason}") - # === 第二阶段:激活类型判定 === if chat_content is not None: logger.debug(f"{self.log_prefix}开始激活类型判定阶段") # 获取当前使用的动作集(经过第一阶段处理) current_using_actions = self.action_manager.get_using_actions_for_mode(mode) - + # 获取因激活类型判定而需要移除的动作 removals_s2 = await self._get_deactivated_actions_by_type( current_using_actions, @@ -118,7 +117,7 @@ class ActionModifier: for action_name, reason in removals_s2: self.action_manager.remove_action_from_using(action_name) logger.debug(f"{self.log_prefix}阶段二移除动作: {action_name},原因: {reason}") - + # === 统一日志记录 === all_removals = removals_s1 + removals_s2 if all_removals: @@ -136,11 +135,9 @@ class ActionModifier: associated_types_str = ", ".join(data["associated_types"]) reason = f"适配器不支持(需要: {associated_types_str})" type_mismatched_actions.append((action_name, reason)) - logger.debug( - f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}" - ) + logger.debug(f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}") return type_mismatched_actions - + async def _get_deactivated_actions_by_type( self, actions_with_info: Dict[str, Any], @@ -161,7 +158,7 @@ class ActionModifier: # 分类处理不同激活类型的actions llm_judge_actions = {} - + actions_to_check = list(actions_with_info.items()) random.shuffle(actions_to_check) @@ -188,7 +185,7 @@ class ActionModifier: elif activation_type == "llm_judge": llm_judge_actions[action_name] = action_info - + else: logger.warning(f"{self.log_prefix}未知的激活类型: {activation_type},跳过处理") @@ -512,21 +509,23 @@ class ActionModifier: # 如果最近sec_thres_reply_num次都是reply,40%概率移除 removal_probability = 0.4 / global_config.focus_chat.consecutive_replies if random.random() < removal_probability: - reason = f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + reason = ( + f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + ) removals.append(("reply", reason)) elif len(last_max_reply_num) >= one_thres_reply_num and all(last_max_reply_num[-one_thres_reply_num:]): # 如果最近one_thres_reply_num次都是reply,20%概率移除 removal_probability = 0.2 / global_config.focus_chat.consecutive_replies if random.random() < removal_probability: - reason = f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + reason = ( + f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)" + ) removals.append(("reply", reason)) else: logger.debug(f"{self.log_prefix}连续回复检测:无需移除reply动作,最近回复模式正常") return removals - - def get_available_actions_count(self) -> int: """获取当前可用动作数量(排除默认的no_action)""" current_actions = self.action_manager.get_using_actions_for_mode("normal") @@ -540,4 +539,4 @@ class ActionModifier: if available_count == 0: logger.debug(f"{self.log_prefix} 没有可用动作,跳过规划") return True - return False \ No newline at end of file + return False diff --git a/src/chat/planner_actions/planner_focus.py b/src/chat/planner_actions/planner_focus.py index 2aef5f429..0f5e84097 100644 --- a/src/chat/planner_actions/planner_focus.py +++ b/src/chat/planner_actions/planner_focus.py @@ -55,7 +55,7 @@ class ActionPlanner: def __init__(self, chat_id: str, action_manager: ActionManager): self.chat_id = chat_id self.log_prefix = f"[{get_chat_manager().get_stream_name(chat_id) or chat_id}]" - + self.action_manager = action_manager # LLM规划器配置 self.planner_llm = LLMRequest( @@ -67,7 +67,7 @@ class ActionPlanner: model=global_config.model.utils_small, request_type="focus.planner", # 用于动作规划 ) - + self.last_obs_time_mark = 0.0 async def plan(self) -> Dict[str, Any]: @@ -81,7 +81,7 @@ class ActionPlanner: try: is_group_chat = True - + message_list_before_now = get_raw_msg_before_timestamp_with_chat( chat_id=self.chat_id, timestamp=time.time(), @@ -95,7 +95,7 @@ class ActionPlanner: truncate=True, show_actions=True, ) - + self.last_obs_time_mark = time.time() # 获取聊天类型和目标信息 @@ -104,9 +104,7 @@ class ActionPlanner: try: # 重新获取更准确的聊天信息 is_group_chat, chat_target_info = get_chat_type_and_target_info(self.chat_id) - logger.debug( - f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}" - ) + logger.debug(f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}") except Exception as e: logger.warning(f"{self.log_prefix}获取聊天目标信息失败: {e}") chat_target_info = None @@ -201,7 +199,6 @@ class ActionPlanner: if key not in ["action", "reasoning"]: action_data[key] = value - if extracted_action not in current_available_actions: logger.warning( f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" @@ -266,8 +263,6 @@ class ActionPlanner: action_options_block = "" for using_actions_name, using_actions_info in current_available_actions.items(): - - if using_actions_info["parameters"]: param_text = "\n" for param_name, param_description in using_actions_info["parameters"].items(): diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index d4bb5b17a..6bf776202 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -642,6 +642,7 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal" # 只返回时分秒格式,喵~ return time.strftime("%H:%M:%S", time.localtime(timestamp)) + def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]: """ 获取聊天类型(是否群聊)和私聊对象信息。 @@ -706,4 +707,4 @@ def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]: logger.error(f"获取聊天类型和目标信息时出错 for {chat_id}: {e}", exc_info=True) # Keep defaults on error - return is_group_chat, chat_target_info \ No newline at end of file + return is_group_chat, chat_target_info diff --git a/src/config/official_configs.py b/src/config/official_configs.py index a18f4a99b..335b95c77 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -300,7 +300,6 @@ class FocusChatConfig(ConfigBase): """连续回复能力,值越高,麦麦连续回复的概率越高""" - @dataclass class ExpressionConfig(ConfigBase): """表达配置类""" From 42a68a29c3010fa298e4e5426d1f58364a1a0893 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 21:45:40 +0800 Subject: [PATCH 50/63] =?UTF-8?q?merge=EF=BC=9A=E5=90=88=E5=B9=B6focus?= =?UTF-8?q?=E5=92=8Cnormal=E7=9A=84planner?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat.py | 5 +- .../{planner_focus.py => planner.py} | 146 ++++----- src/chat/planner_actions/planner_normal.py | 306 ------------------ template/bot_config_template.toml | 2 +- 4 files changed, 67 insertions(+), 392 deletions(-) rename src/chat/planner_actions/{planner_focus.py => planner.py} (73%) delete mode 100644 src/chat/planner_actions/planner_normal.py diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 07b4f2908..7f1788591 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -19,6 +19,7 @@ from src.person_info.relationship_builder_manager import relationship_builder_ma from .priority_manager import PriorityManager import traceback from src.chat.planner_actions.planner_normal import NormalChatPlanner +from src.chat.planner_actions.planner_focus import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.utils.utils import get_chat_type_and_target_info @@ -70,7 +71,7 @@ class NormalChat: # Planner相关初始化 self.action_manager = ActionManager() - self.planner = NormalChatPlanner(self.stream_name, self.action_manager) + self.planner = ActionPlanner(self.stream_id, self.action_manager, mode="normal") self.action_modifier = ActionModifier(self.action_manager, self.stream_id) self.enable_planner = global_config.normal_chat.enable_planner # 从配置中读取是否启用planner @@ -525,7 +526,7 @@ class NormalChat: return no_action # 执行规划 - plan_result = await self.planner.plan(message) + plan_result = await self.planner.plan() action_type = plan_result["action_result"]["action_type"] action_data = plan_result["action_result"]["action_data"] reasoning = plan_result["action_result"]["reasoning"] diff --git a/src/chat/planner_actions/planner_focus.py b/src/chat/planner_actions/planner.py similarity index 73% rename from src/chat/planner_actions/planner_focus.py rename to src/chat/planner_actions/planner.py index 0f5e84097..2c2fcf007 100644 --- a/src/chat/planner_actions/planner_focus.py +++ b/src/chat/planner_actions/planner.py @@ -29,13 +29,15 @@ def init_prompt(): {chat_content_block} {moderation_prompt} -现在请你根据聊天内容选择合适的action: - +现在请你根据{by_what}选择合适的action: +{no_action_block} {action_options_text} +你必须从上面列出的可用action中选择一个,并说明原因。 + 请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容: """, - "simple_planner_prompt", + "planner_prompt", ) Prompt( @@ -52,20 +54,15 @@ def init_prompt(): class ActionPlanner: - def __init__(self, chat_id: str, action_manager: ActionManager): + def __init__(self, chat_id: str, action_manager: ActionManager, mode: str = "focus"): self.chat_id = chat_id self.log_prefix = f"[{get_chat_manager().get_stream_name(chat_id) or chat_id}]" - + self.mode = mode self.action_manager = action_manager # LLM规划器配置 self.planner_llm = LLMRequest( model=global_config.model.planner, - request_type="focus.planner", # 用于动作规划 - ) - - self.utils_llm = LLMRequest( - model=global_config.model.utils_small, - request_type="focus.planner", # 用于动作规划 + request_type=f"{self.mode}.planner", # 用于动作规划 ) self.last_obs_time_mark = 0.0 @@ -82,37 +79,10 @@ class ActionPlanner: try: is_group_chat = True - message_list_before_now = get_raw_msg_before_timestamp_with_chat( - chat_id=self.chat_id, - timestamp=time.time(), - limit=global_config.chat.max_context_size, - ) + is_group_chat, chat_target_info = get_chat_type_and_target_info(self.chat_id) + logger.debug(f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}") - chat_context = build_readable_messages( - messages=message_list_before_now, - timestamp_mode="normal_no_YMD", - read_mark=self.last_obs_time_mark, - truncate=True, - show_actions=True, - ) - - self.last_obs_time_mark = time.time() - - # 获取聊天类型和目标信息 - chat_target_info = None - - try: - # 重新获取更准确的聊天信息 - is_group_chat, chat_target_info = get_chat_type_and_target_info(self.chat_id) - logger.debug(f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}") - except Exception as e: - logger.warning(f"{self.log_prefix}获取聊天目标信息失败: {e}") - chat_target_info = None - - # 获取经过modify_actions处理后的最终可用动作集 - # 注意:动作的激活判定现在在主循环的modify_actions中完成 - # 使用Focus模式过滤动作 - current_available_actions_dict = self.action_manager.get_using_actions_for_mode("focus") + current_available_actions_dict = self.action_manager.get_using_actions_for_mode(self.mode) # 获取完整的动作信息 all_registered_actions = self.action_manager.get_registered_actions() @@ -130,7 +100,6 @@ class ActionPlanner: action = "no_reply" reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划" logger.info(f"{self.log_prefix}{reasoning}") - self.action_manager.restore_actions() logger.debug( f"{self.log_prefix}[focus]沉默后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}" ) @@ -142,14 +111,12 @@ class ActionPlanner: prompt = await self.build_planner_prompt( is_group_chat=is_group_chat, # <-- Pass HFC state chat_target_info=chat_target_info, # <-- 传递获取到的聊天目标信息 - observed_messages_str=chat_context, # <-- Pass local variable current_available_actions=current_available_actions, # <-- Pass determined actions ) # --- 调用 LLM (普通文本生成) --- llm_content = None try: - prompt = f"{prompt}" llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt) logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") @@ -164,34 +131,21 @@ class ActionPlanner: if llm_content: try: - fixed_json_string = repair_json(llm_content) - if isinstance(fixed_json_string, str): - try: - parsed_json = json.loads(fixed_json_string) - except json.JSONDecodeError as decode_error: - logger.error(f"JSON解析错误: {str(decode_error)}") - parsed_json = {} - else: - # 如果repair_json直接返回了字典对象,直接使用 - parsed_json = fixed_json_string + parsed_json = json.loads(repair_json(llm_content)) - # 处理repair_json可能返回列表的情况 if isinstance(parsed_json, list): if parsed_json: - # 取列表中最后一个元素(通常是最完整的) parsed_json = parsed_json[-1] logger.warning(f"{self.log_prefix}LLM返回了多个JSON对象,使用最后一个: {parsed_json}") else: parsed_json = {} - # 确保parsed_json是字典 if not isinstance(parsed_json, dict): logger.error(f"{self.log_prefix}解析后的JSON不是字典类型: {type(parsed_json)}") parsed_json = {} - # 提取决策,提供默认值 - extracted_action = parsed_json.get("action", "no_reply") - extracted_reasoning = "" + action = parsed_json.get("action", "no_reply") + reasoning = parsed_json.get("reasoning", "未提供原因") # 将所有其他属性添加到action_data action_data = {} @@ -199,16 +153,16 @@ class ActionPlanner: if key not in ["action", "reasoning"]: action_data[key] = value - if extracted_action not in current_available_actions: + if action == "no_action": + action = "no_reply" + reasoning = "决定不使用额外动作" + + if action not in current_available_actions and action != "no_action": logger.warning( - f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" + f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" ) action = "no_reply" - reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}" - else: - # 动作有效且可用 - action = extracted_action - reasoning = extracted_reasoning + reasoning = f"LLM 返回了当前不可用的动作 '{action}' (可用: {list(current_available_actions.keys())})。原始理由: {reasoning}" except Exception as json_e: logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'") @@ -222,13 +176,19 @@ class ActionPlanner: action = "no_reply" reasoning = f"Planner 内部处理错误: {outer_e}" - # 恢复到默认动作集 - self.action_manager.restore_actions() - logger.debug( - f"{self.log_prefix}规划后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}" - ) - action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning} + is_parallel = False + if action in current_available_actions: + action_info = current_available_actions[action] + is_parallel = action_info.get("parallel_action", False) + + action_result = { + "action_type": action, + "action_data": action_data, + "reasoning": reasoning, + "timestamp": time.time(), + "is_parallel": is_parallel, + } plan_result = { "action_result": action_result, @@ -241,11 +201,36 @@ class ActionPlanner: self, is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument - observed_messages_str: str, current_available_actions, ) -> str: """构建 Planner LLM 的提示词 (获取模板并填充数据)""" try: + message_list_before_now = get_raw_msg_before_timestamp_with_chat( + chat_id=self.chat_id, + timestamp=time.time(), + limit=global_config.chat.max_context_size, + ) + + chat_content_block = build_readable_messages( + messages=message_list_before_now, + timestamp_mode="normal_no_YMD", + read_mark=self.last_obs_time_mark, + truncate=True, + show_actions=True, + ) + + self.last_obs_time_mark = time.time() + + + if self.mode == "focus": + by_what = "聊天内容" + no_action_block = "" + else: + by_what = "聊天内容和用户的最新消息" + no_action_block = """重要说明: +- 'no_action' 表示只进行普通聊天回复,不执行任何额外动作 +- 其他action表示在普通回复的基础上,执行相应的额外动作""" + chat_context_description = "你现在正在一个群聊中" chat_target_name = None # Only relevant for private if not is_group_chat and chat_target_info: @@ -254,11 +239,6 @@ class ActionPlanner: ) chat_context_description = f"你正在和 {chat_target_name} 私聊" - chat_content_block = "" - if observed_messages_str: - chat_content_block = f"\n{observed_messages_str}" - else: - chat_content_block = "你还未开始聊天" action_options_block = "" @@ -286,10 +266,8 @@ class ActionPlanner: action_options_block += using_action_prompt - # moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。" - moderation_prompt_block = "" + moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。" - # 获取当前时间 time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}" bot_name = global_config.bot.nickname @@ -300,11 +278,13 @@ class ActionPlanner: bot_core_personality = global_config.personality.personality_core indentify_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}:" - planner_prompt_template = await global_prompt_manager.get_prompt_async("simple_planner_prompt") + planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") prompt = planner_prompt_template.format( time_block=time_block, + by_what=by_what, chat_context_description=chat_context_description, chat_content_block=chat_content_block, + no_action_block=no_action_block, action_options_text=action_options_block, moderation_prompt=moderation_prompt_block, indentify_block=indentify_block, diff --git a/src/chat/planner_actions/planner_normal.py b/src/chat/planner_actions/planner_normal.py deleted file mode 100644 index fce446b58..000000000 --- a/src/chat/planner_actions/planner_normal.py +++ /dev/null @@ -1,306 +0,0 @@ -import json -from typing import Dict, Any -from rich.traceback import install -from src.llm_models.utils_model import LLMRequest -from src.config.config import global_config -from src.common.logger import get_logger -from src.chat.utils.prompt_builder import Prompt, global_prompt_manager -from src.individuality.individuality import get_individuality -from src.chat.planner_actions.action_manager import ActionManager -from src.chat.message_receive.message import MessageThinking -from json_repair import repair_json -from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat -import time -import traceback - -logger = get_logger("normal_chat_planner") - -install(extra_lines=3) - - -def init_prompt(): - Prompt( - """ -你的自我认知是: -{self_info_block} -请记住你的性格,身份和特点。 - -你是群内的一员,你现在正在参与群内的闲聊,以下是群内的聊天内容: -{chat_context} - -基于以上聊天上下文和用户的最新消息,选择最合适的action。 - -注意,除了下面动作选项之外,你在聊天中不能做其他任何事情,这是你能力的边界,现在请你选择合适的action: - -{action_options_text} - -重要说明: -- "no_action" 表示只进行普通聊天回复,不执行任何额外动作 -- 其他action表示在普通回复的基础上,执行相应的额外动作 - -你必须从上面列出的可用action中选择一个,并说明原因。 -{moderation_prompt} - -请以动作的输出要求,以严格的 JSON 格式输出,且仅包含 JSON 内容。不要有任何其他文字或解释: -""", - "normal_chat_planner_prompt", - ) - - Prompt( - """ -动作:{action_name} -动作描述:{action_description} -{action_require} -{{ - "action": "{action_name}",{action_parameters} -}} -""", - "normal_chat_action_prompt", - ) - - -class NormalChatPlanner: - def __init__(self, log_prefix: str, action_manager: ActionManager): - self.log_prefix = log_prefix - # LLM规划器配置 - self.planner_llm = LLMRequest( - model=global_config.model.planner, - request_type="normal.planner", # 用于normal_chat动作规划 - ) - - self.action_manager = action_manager - - async def plan(self, message: MessageThinking) -> Dict[str, Any]: - """ - Normal Chat 规划器: 使用LLM根据上下文决定做出什么动作。 - - 参数: - message: 思考消息对象 - sender_name: 发送者名称 - """ - - action = "no_action" # 默认动作改为no_action - reasoning = "规划器初始化默认" - action_data = {} - - try: - # 设置默认值 - nickname_str = "" - for nicknames in global_config.bot.alias_names: - nickname_str += f"{nicknames}," - name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" - - personality_block = get_individuality().get_personality_prompt(x_person=2, level=2) - identity_block = get_individuality().get_identity_prompt(x_person=2, level=2) - - self_info = name_block + personality_block + identity_block - - # 获取当前可用的动作,使用Normal模式过滤 - current_available_actions = self.action_manager.get_using_actions_for_mode("normal") - - # 注意:动作的激活判定现在在 normal_chat_action_modifier 中完成 - # 这里直接使用经过 action_modifier 处理后的最终动作集 - # 符合职责分离原则:ActionModifier负责动作管理,Planner专注于决策 - - # 如果没有可用动作,直接返回no_action - if not current_available_actions: - logger.debug(f"{self.log_prefix}规划器: 没有可用动作,返回no_action") - return { - "action_result": { - "action_type": action, - "action_data": action_data, - "reasoning": reasoning, - "is_parallel": True, - }, - "chat_context": "", - "action_prompt": "", - } - - # 构建normal_chat的上下文 (使用与normal_chat相同的prompt构建方法) - message_list_before_now = get_raw_msg_before_timestamp_with_chat( - chat_id=message.chat_stream.stream_id, - timestamp=time.time(), - limit=global_config.chat.max_context_size, - ) - - chat_context = build_readable_messages( - message_list_before_now, - replace_bot_name=True, - merge_messages=False, - timestamp_mode="relative", - read_mark=0.0, - show_actions=True, - ) - - # 构建planner的prompt - prompt = await self.build_planner_prompt( - self_info_block=self_info, - chat_context=chat_context, - current_available_actions=current_available_actions, - ) - - if not prompt: - logger.warning(f"{self.log_prefix}规划器: 构建提示词失败") - return { - "action_result": { - "action_type": action, - "action_data": action_data, - "reasoning": reasoning, - "is_parallel": False, - }, - "chat_context": chat_context, - "action_prompt": "", - } - - # 使用LLM生成动作决策 - try: - content, (reasoning_content, model_name) = await self.planner_llm.generate_response_async(prompt) - - logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") - logger.info(f"{self.log_prefix}规划器原始响应: {content}") - if reasoning_content: - logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") - - # 解析JSON响应 - try: - # 尝试修复JSON - fixed_json = repair_json(content) - action_result = json.loads(fixed_json) - - action = action_result.get("action", "no_action") - reasoning = action_result.get("reasoning", "未提供原因") - - # 提取其他参数作为action_data - action_data = {k: v for k, v in action_result.items() if k not in ["action", "reasoning"]} - - # 验证动作是否在可用动作列表中,或者是特殊动作 - if action not in current_available_actions: - logger.warning(f"{self.log_prefix}规划器选择了不可用的动作: {action}, 回退到no_action") - action = "no_action" - reasoning = f"选择的动作{action}不在可用列表中,回退到no_action" - action_data = {} - - except json.JSONDecodeError as e: - logger.warning(f"{self.log_prefix}规划器JSON解析失败: {e}, 内容: {content}") - action = "no_action" - reasoning = "JSON解析失败,使用默认动作" - action_data = {} - - except Exception as e: - logger.error(f"{self.log_prefix}规划器LLM调用失败: {e}") - action = "no_action" - reasoning = "LLM调用失败,使用默认动作" - action_data = {} - - except Exception as outer_e: - logger.error(f"{self.log_prefix}规划器异常: {outer_e}") - # 设置异常时的默认值 - current_available_actions = {} - chat_context = "无法获取聊天上下文" - prompt = "" - action = "no_action" - reasoning = "规划器出现异常,使用默认动作" - action_data = {} - - # 检查动作是否支持并行执行 - is_parallel = False - if action in current_available_actions: - action_info = current_available_actions[action] - is_parallel = action_info.get("parallel_action", False) - - logger.debug( - f"{self.log_prefix}规划器决策动作:{action}, 动作信息: '{action_data}', 理由: {reasoning}, 并行执行: {is_parallel}" - ) - - # 恢复到默认动作集 - self.action_manager.restore_actions() - logger.debug( - f"{self.log_prefix}规划后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}" - ) - - # 构建 action 记录 - action_record = { - "action_type": action, - "action_data": action_data, - "reasoning": reasoning, - "timestamp": time.time(), - "model_name": model_name if "model_name" in locals() else None, - } - - action_result = { - "action_type": action, - "action_data": action_data, - "reasoning": reasoning, - "is_parallel": is_parallel, - "action_record": json.dumps(action_record, ensure_ascii=False), - } - - plan_result = { - "action_result": action_result, - "chat_context": chat_context, - "action_prompt": prompt, - } - - return plan_result - - async def build_planner_prompt( - self, - self_info_block: str, - chat_context: str, - current_available_actions: Dict[str, Any], - ) -> str: - """构建 Normal Chat Planner LLM 的提示词""" - try: - # 构建动作选项文本 - action_options_text = "" - - for action_name, action_info in current_available_actions.items(): - action_description = action_info.get("description", "") - action_parameters = action_info.get("parameters", {}) - action_require = action_info.get("require", []) - - if action_parameters: - param_text = "\n" - # print(action_parameters) - for param_name, param_description in action_parameters.items(): - param_text += f' "{param_name}":"{param_description}"\n' - param_text = param_text.rstrip("\n") - else: - param_text = "" - - require_text = "" - for require_item in action_require: - require_text += f"- {require_item}\n" - require_text = require_text.rstrip("\n") - - # 构建单个动作的提示 - action_prompt = await global_prompt_manager.format_prompt( - "normal_chat_action_prompt", - action_name=action_name, - action_description=action_description, - action_parameters=param_text, - action_require=require_text, - ) - action_options_text += action_prompt + "\n\n" - - # 审核提示 - moderation_prompt = "请确保你的回复符合平台规则,避免不当内容。" - - # 使用模板构建最终提示词 - prompt = await global_prompt_manager.format_prompt( - "normal_chat_planner_prompt", - self_info_block=self_info_block, - action_options_text=action_options_text, - moderation_prompt=moderation_prompt, - chat_context=chat_context, - ) - - return prompt - - except Exception as e: - logger.error(f"{self.log_prefix}构建Planner提示词失败: {e}") - traceback.print_exc() - return "" - - -init_prompt() diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index e269cdddf..b8781cea9 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -61,7 +61,7 @@ enable_relationship = true # 是否启用关系系统 relation_frequency = 1 # 关系频率,麦麦构建关系的速度,仅在normal_chat模式下有效 [chat] #麦麦的聊天通用设置 -chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,在普通模式和专注模式之间自动切换 +chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,auto模式:在普通模式和专注模式之间自动切换 auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天 exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天 # 普通模式下,麦麦会针对感兴趣的消息进行回复,token消耗量较低 From 1518251cc312f03bb2f646d9ee5810f14505c9ff Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 13:45:57 +0000 Subject: [PATCH 51/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat.py | 1 - src/chat/planner_actions/planner.py | 7 ++----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 7f1788591..6a8c73b34 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -18,7 +18,6 @@ from src.chat.planner_actions.action_manager import ActionManager from src.person_info.relationship_builder_manager import relationship_builder_manager from .priority_manager import PriorityManager import traceback -from src.chat.planner_actions.planner_normal import NormalChatPlanner from src.chat.planner_actions.planner_focus import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 2c2fcf007..11d69935e 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -156,7 +156,7 @@ class ActionPlanner: if action == "no_action": action = "no_reply" reasoning = "决定不使用额外动作" - + if action not in current_available_actions and action != "no_action": logger.warning( f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" @@ -176,7 +176,6 @@ class ActionPlanner: action = "no_reply" reasoning = f"Planner 内部处理错误: {outer_e}" - is_parallel = False if action in current_available_actions: action_info = current_available_actions[action] @@ -220,7 +219,6 @@ class ActionPlanner: ) self.last_obs_time_mark = time.time() - if self.mode == "focus": by_what = "聊天内容" @@ -230,7 +228,7 @@ class ActionPlanner: no_action_block = """重要说明: - 'no_action' 表示只进行普通聊天回复,不执行任何额外动作 - 其他action表示在普通回复的基础上,执行相应的额外动作""" - + chat_context_description = "你现在正在一个群聊中" chat_target_name = None # Only relevant for private if not is_group_chat and chat_target_info: @@ -239,7 +237,6 @@ class ActionPlanner: ) chat_context_description = f"你正在和 {chat_target_name} 私聊" - action_options_block = "" for using_actions_name, using_actions_info in current_available_actions.items(): From 318543036e6212d771e97af6a37d2b9e7b221355 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 21:58:23 +0800 Subject: [PATCH 52/63] =?UTF-8?q?better=EF=BC=9B=E7=A7=BB=E9=99=A4?= =?UTF-8?q?=E6=97=A0=E7=94=A8=E5=86=85=E5=AE=B9=EF=BC=8C=E7=8B=AC=E7=AB=8B?= =?UTF-8?q?mute=20action?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 7 +- src/chat/focus_chat/hfc_performance_logger.py | 6 +- src/chat/focus_chat/hfc_version_manager.py | 185 ------ src/chat/normal_chat/normal_chat.py | 27 +- .../built_in/mute_plugin/_manifest.json | 19 - src/plugins/built_in/mute_plugin/plugin.py | 565 ------------------ 6 files changed, 7 insertions(+), 802 deletions(-) delete mode 100644 src/chat/focus_chat/hfc_version_manager.py delete mode 100644 src/plugins/built_in/mute_plugin/_manifest.json delete mode 100644 src/plugins/built_in/mute_plugin/plugin.py diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index e0d679e0f..13b5cc834 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -10,12 +10,11 @@ from src.chat.utils.prompt_builder import global_prompt_manager from src.common.logger import get_logger from src.chat.utils.timer_calculator import Timer from src.chat.focus_chat.focus_loop_info import FocusLoopInfo -from src.chat.planner_actions.planner_focus import ActionPlanner +from src.chat.planner_actions.planner import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.planner_actions.action_manager import ActionManager from src.config.config import global_config from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger -from src.chat.focus_chat.hfc_version_manager import get_hfc_version from src.person_info.relationship_builder_manager import relationship_builder_manager from src.chat.focus_chat.hfc_utils import CycleDetail @@ -87,8 +86,8 @@ class HeartFChatting: # 初始化性能记录器 # 如果没有指定版本号,则使用全局版本管理器的版本号 - actual_version = get_hfc_version() - self.performance_logger = HFCPerformanceLogger(chat_id, actual_version) + + self.performance_logger = HFCPerformanceLogger(chat_id) logger.info( f"{self.log_prefix} HeartFChatting 初始化完成,消息疲惫阈值: {self._message_threshold}条(基于exit_focus_threshold={global_config.chat.exit_focus_threshold}计算,仅在auto模式下生效)" diff --git a/src/chat/focus_chat/hfc_performance_logger.py b/src/chat/focus_chat/hfc_performance_logger.py index 88b4c66a3..64e65ff85 100644 --- a/src/chat/focus_chat/hfc_performance_logger.py +++ b/src/chat/focus_chat/hfc_performance_logger.py @@ -11,11 +11,11 @@ class HFCPerformanceLogger: """HFC性能记录管理器""" # 版本号常量,可在启动时修改 - INTERNAL_VERSION = "v1.0.0" + INTERNAL_VERSION = "v7.0.0" - def __init__(self, chat_id: str, version: str = None): + def __init__(self, chat_id: str): self.chat_id = chat_id - self.version = version or self.INTERNAL_VERSION + self.version = self.INTERNAL_VERSION self.log_dir = Path("log/hfc_loop") self.session_start_time = datetime.now() diff --git a/src/chat/focus_chat/hfc_version_manager.py b/src/chat/focus_chat/hfc_version_manager.py deleted file mode 100644 index c41dff2a8..000000000 --- a/src/chat/focus_chat/hfc_version_manager.py +++ /dev/null @@ -1,185 +0,0 @@ -""" -HFC性能记录版本号管理器 - -用于管理HFC性能记录的内部版本号,支持: -1. 默认版本号设置 -2. 启动时版本号配置 -3. 版本号验证和格式化 -""" - -import os -import re -from datetime import datetime -from typing import Optional -from src.common.logger import get_logger - -logger = get_logger("hfc_version") - - -class HFCVersionManager: - """HFC版本号管理器""" - - # 默认版本号 - DEFAULT_VERSION = "v6.0.0" - - # 当前运行时版本号 - _current_version: Optional[str] = None - - @classmethod - def set_version(cls, version: str) -> bool: - """ - 设置当前运行时版本号 - - 参数: - version: 版本号字符串,格式如 v1.0.0 或 1.0.0 - - 返回: - bool: 设置是否成功 - """ - try: - validated_version = cls._validate_version(version) - if validated_version: - cls._current_version = validated_version - logger.info(f"HFC性能记录版本已设置为: {validated_version}") - return True - else: - logger.warning(f"无效的版本号格式: {version}") - return False - except Exception as e: - logger.error(f"设置版本号失败: {e}") - return False - - @classmethod - def get_version(cls) -> str: - """ - 获取当前版本号 - - 返回: - str: 当前版本号 - """ - if cls._current_version: - return cls._current_version - - # 尝试从环境变量获取 - env_version = os.getenv("HFC_PERFORMANCE_VERSION") - if env_version: - if cls.set_version(env_version): - return cls._current_version - - # 返回默认版本号 - return cls.DEFAULT_VERSION - - @classmethod - def auto_generate_version(cls, base_version: str = None) -> str: - """ - 自动生成版本号(基于时间戳) - - 参数: - base_version: 基础版本号,如果不提供则使用默认版本 - - 返回: - str: 生成的版本号 - """ - if not base_version: - base_version = cls.DEFAULT_VERSION - - # 提取基础版本号的主要部分 - base_match = re.match(r"v?(\d+\.\d+)", base_version) - if base_match: - base_part = base_match.group(1) - else: - base_part = "1.0" - - # 添加时间戳 - timestamp = datetime.now().strftime("%Y%m%d_%H%M") - generated_version = f"v{base_part}.{timestamp}" - - cls.set_version(generated_version) - logger.info(f"自动生成版本号: {generated_version}") - - return generated_version - - @classmethod - def _validate_version(cls, version: str) -> Optional[str]: - """ - 验证版本号格式 - - 参数: - version: 待验证的版本号 - - 返回: - Optional[str]: 验证后的版本号,失败返回None - """ - if not version or not isinstance(version, str): - return None - - version = version.strip() - - # 支持的格式: - # v1.0.0, 1.0.0, v1.0, 1.0, v1.0.0.20241222_1530 等 - patterns = [ - r"^v?(\d+\.\d+\.\d+)$", # v1.0.0 或 1.0.0 - r"^v?(\d+\.\d+)$", # v1.0 或 1.0 - r"^v?(\d+\.\d+\.\d+\.\w+)$", # v1.0.0.build 或 1.0.0.build - r"^v?(\d+\.\d+\.\w+)$", # v1.0.build 或 1.0.build - ] - - for pattern in patterns: - match = re.match(pattern, version) - if match: - # 确保版本号以v开头 - if not version.startswith("v"): - version = "v" + version - return version - - return None - - @classmethod - def reset_version(cls): - """重置版本号为默认值""" - cls._current_version = None - logger.info("HFC版本号已重置为默认值") - - @classmethod - def get_version_info(cls) -> dict: - """ - 获取版本信息 - - 返回: - dict: 版本相关信息 - """ - current = cls.get_version() - return { - "current_version": current, - "default_version": cls.DEFAULT_VERSION, - "is_custom": current != cls.DEFAULT_VERSION, - "env_version": os.getenv("HFC_PERFORMANCE_VERSION"), - "timestamp": datetime.now().isoformat(), - } - - -# 全局函数,方便使用 -def set_hfc_version(version: str) -> bool: - """设置HFC性能记录版本号""" - return HFCVersionManager.set_version(version) - - -def get_hfc_version() -> str: - """获取当前HFC性能记录版本号""" - return HFCVersionManager.get_version() - - -def auto_generate_hfc_version(base_version: str = None) -> str: - """自动生成HFC版本号""" - return HFCVersionManager.auto_generate_version(base_version) - - -def reset_hfc_version(): - """重置HFC版本号""" - HFCVersionManager.reset_version() - - -# 在模块加载时显示当前版本信息 -if __name__ != "__main__": - current_version = HFCVersionManager.get_version() - logger.debug(f"HFC性能记录模块已加载,当前版本: {current_version}") diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 6a8c73b34..e69e2a562 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -18,7 +18,7 @@ from src.chat.planner_actions.action_manager import ActionManager from src.person_info.relationship_builder_manager import relationship_builder_manager from .priority_manager import PriorityManager import traceback -from src.chat.planner_actions.planner_focus import ActionPlanner +from src.chat.planner_actions.planner import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.utils.utils import get_chat_type_and_target_info @@ -773,14 +773,9 @@ class NormalChat: # 尝试优雅取消任务 task_to_cancel.cancel() - # 不等待任务完成,让它自然结束 - # 这样可以避免等待过程中的潜在递归问题 - # 异步清理思考消息,不阻塞当前流程 asyncio.create_task(self._cleanup_thinking_messages_async()) - logger.debug(f"[{self.stream_name}] 聊天任务停止完成") - async def _cleanup_thinking_messages_async(self): """异步清理思考消息,避免阻塞主流程""" try: @@ -799,26 +794,6 @@ class NormalChat: logger.error(f"[{self.stream_name}] 异步清理思考消息时出错: {e}") # 不打印完整栈跟踪,避免日志污染 - # 获取最近回复记录的方法 - def get_recent_replies(self, limit: int = 10) -> List[dict]: - """获取最近的回复记录 - - Args: - limit: 最大返回数量,默认10条 - - Returns: - List[dict]: 最近的回复记录列表,每项包含: - time: 回复时间戳 - user_message: 用户消息内容 - user_info: 用户信息(user_id, user_nickname) - response: 回复内容 - is_mentioned: 是否被提及(@) - is_reference_reply: 是否为引用回复 - timing: 各阶段耗时 - """ - # 返回最近的limit条记录,按时间倒序排列 - return sorted(self.recent_replies[-limit:], key=lambda x: x["time"], reverse=True) - def adjust_reply_frequency(self): """ 根据预设规则动态调整回复意愿(willing_amplifier)。 diff --git a/src/plugins/built_in/mute_plugin/_manifest.json b/src/plugins/built_in/mute_plugin/_manifest.json deleted file mode 100644 index f990ba44e..000000000 --- a/src/plugins/built_in/mute_plugin/_manifest.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "manifest_version": 1, - "name": "群聊禁言管理插件 (Mute Plugin)", - "version": "3.0.0", - "description": "群聊禁言管理插件,提供智能禁言功能", - "author": { - "name": "MaiBot开发团队", - "url": "https://github.com/MaiM-with-u" - }, - "license": "GPL-v3.0-or-later", - "host_application": { - "min_version": "0.8.0", - "max_version": "0.8.10" - }, - "keywords": ["mute", "ban", "moderation", "admin", "management", "group"], - "categories": ["Moderation", "Group Management", "Admin Tools"], - "default_locale": "zh-CN", - "locales_path": "_locales" -} \ No newline at end of file diff --git a/src/plugins/built_in/mute_plugin/plugin.py b/src/plugins/built_in/mute_plugin/plugin.py deleted file mode 100644 index 43f5f81c4..000000000 --- a/src/plugins/built_in/mute_plugin/plugin.py +++ /dev/null @@ -1,565 +0,0 @@ -""" -禁言插件 - -提供智能禁言功能的群聊管理插件。 - -功能特性: -- 智能LLM判定:根据聊天内容智能判断是否需要禁言 -- 灵活的时长管理:支持自定义禁言时长限制 -- 模板化消息:支持自定义禁言提示消息 -- 参数验证:完整的输入参数验证和错误处理 -- 配置文件支持:所有设置可通过配置文件调整 -- 权限管理:支持用户权限和群组权限控制 - -包含组件: -- 智能禁言Action - 基于LLM判断是否需要禁言(支持群组权限控制) -- 禁言命令Command - 手动执行禁言操作(支持用户权限控制) -""" - -from typing import List, Tuple, Type, Optional -import random - -# 导入新插件系统 -from src.plugin_system.base.base_plugin import BasePlugin -from src.plugin_system.base.base_plugin import register_plugin -from src.plugin_system.base.base_action import BaseAction -from src.plugin_system.base.base_command import BaseCommand -from src.plugin_system.base.component_types import ComponentInfo, ActionActivationType, ChatMode -from src.plugin_system.base.config_types import ConfigField -from src.common.logger import get_logger - -# 导入配置API(可选的简便方法) -from src.plugin_system.apis import person_api, generator_api - -logger = get_logger("mute_plugin") - - -# ===== Action组件 ===== - - -class MuteAction(BaseAction): - """智能禁言Action - 基于LLM智能判断是否需要禁言""" - - # 激活设置 - focus_activation_type = ActionActivationType.LLM_JUDGE # Focus模式使用LLM判定,确保谨慎 - normal_activation_type = ActionActivationType.KEYWORD # Normal模式使用关键词激活,快速响应 - mode_enable = ChatMode.ALL - parallel_action = False - - # 动作基本信息 - action_name = "mute" - action_description = "智能禁言系统,基于LLM判断是否需要禁言" - - # 关键词设置(用于Normal模式) - activation_keywords = ["禁言", "mute", "ban", "silence"] - keyword_case_sensitive = False - - # LLM判定提示词(用于Focus模式) - llm_judge_prompt = """ -判定是否需要使用禁言动作的严格条件: - -使用禁言的情况: -1. 用户发送明显违规内容(色情、暴力、政治敏感等) -2. 恶意刷屏或垃圾信息轰炸 -3. 用户主动明确要求被禁言("禁言我"等) -4. 严重违反群规的行为 -5. 恶意攻击他人或群组管理 - -绝对不要使用的情况: -2. 情绪化表达但无恶意 -3. 开玩笑或调侃,除非过分 -4. 单纯的意见分歧或争论 - -""" - - # 动作参数定义 - action_parameters = { - "target": "禁言对象,必填,输入你要禁言的对象的名字,请仔细思考不要弄错禁言对象", - "duration": "禁言时长,必填,输入你要禁言的时长(秒),单位为秒,必须为数字", - "reason": "禁言理由,可选", - } - - # 动作使用场景 - action_require = [ - "当有人违反了公序良俗的内容", - "当有人刷屏时使用", - "当有人发了擦边,或者色情内容时使用", - "当有人要求禁言自己时使用", - "如果某人已经被禁言了,就不要再次禁言了,除非你想追加时间!!", - ] - - # 关联类型 - associated_types = ["text", "command"] - - def _check_group_permission(self) -> Tuple[bool, Optional[str]]: - """检查当前群是否有禁言动作权限 - - Returns: - Tuple[bool, Optional[str]]: (是否有权限, 错误信息) - """ - # 如果不是群聊,直接返回False - if not self.is_group: - return False, "禁言动作只能在群聊中使用" - - # 获取权限配置 - allowed_groups = self.get_config("permissions.allowed_groups", []) - - # 如果配置为空,表示不启用权限控制 - if not allowed_groups: - logger.info(f"{self.log_prefix} 群组权限未配置,允许所有群使用禁言动作") - return True, None - - # 检查当前群是否在允许列表中 - current_group_key = f"{self.platform}:{self.group_id}" - for allowed_group in allowed_groups: - if allowed_group == current_group_key: - logger.info(f"{self.log_prefix} 群组 {current_group_key} 有禁言动作权限") - return True, None - - logger.warning(f"{self.log_prefix} 群组 {current_group_key} 没有禁言动作权限") - return False, "当前群组没有使用禁言动作的权限" - - async def execute(self) -> Tuple[bool, Optional[str]]: - """执行智能禁言判定""" - logger.info(f"{self.log_prefix} 执行智能禁言动作") - - # 首先检查群组权限 - has_permission, permission_error = self._check_group_permission() - - # 获取参数 - target = self.action_data.get("target") - duration = self.action_data.get("duration") - reason = self.action_data.get("reason", "违反群规") - - # 参数验证 - if not target: - error_msg = "禁言目标不能为空" - logger.error(f"{self.log_prefix} {error_msg}") - await self.send_text("没有指定禁言对象呢~") - return False, error_msg - - if not duration: - error_msg = "禁言时长不能为空" - logger.error(f"{self.log_prefix} {error_msg}") - await self.send_text("没有指定禁言时长呢~") - return False, error_msg - - # 获取时长限制配置 - min_duration = self.get_config("mute.min_duration", 60) - max_duration = self.get_config("mute.max_duration", 2592000) - - # 验证时长格式并转换 - try: - duration_int = int(duration) - if duration_int <= 0: - error_msg = "禁言时长必须大于0" - logger.error(f"{self.log_prefix} {error_msg}") - await self.send_text("禁言时长必须是正数哦~") - return False, error_msg - - # 限制禁言时长范围 - if duration_int < min_duration: - duration_int = min_duration - logger.info(f"{self.log_prefix} 禁言时长过短,调整为{min_duration}秒") - elif duration_int > max_duration: - duration_int = max_duration - logger.info(f"{self.log_prefix} 禁言时长过长,调整为{max_duration}秒") - - except (ValueError, TypeError): - error_msg = f"禁言时长格式无效: {duration}" - logger.error(f"{self.log_prefix} {error_msg}") - # await self.send_text("禁言时长必须是数字哦~") - return False, error_msg - - # 获取用户ID - person_id = person_api.get_person_id_by_name(target) - user_id = await person_api.get_person_value(person_id, "user_id") - if not user_id: - error_msg = f"未找到用户 {target} 的ID" - await self.send_text(f"找不到 {target} 这个人呢~") - logger.error(f"{self.log_prefix} {error_msg}") - return False, error_msg - - # 格式化时长显示 - enable_formatting = self.get_config("mute.enable_duration_formatting", True) - time_str = self._format_duration(duration_int) if enable_formatting else f"{duration_int}秒" - - # 获取模板化消息 - message = self._get_template_message(target, time_str, reason) - - if not has_permission: - logger.warning(f"{self.log_prefix} 权限检查失败: {permission_error}") - result_status, result_message = await generator_api.rewrite_reply( - chat_stream=self.chat_stream, - reply_data={ - "raw_reply": "我想禁言{target},但是我没有权限", - "reason": "表达自己没有在这个群禁言的能力", - }, - ) - - if result_status: - for reply_seg in result_message: - data = reply_seg[1] - await self.send_text(data) - - await self.store_action_info( - action_build_into_prompt=True, - action_prompt_display=f"尝试禁言了用户 {target},但是没有权限,无法禁言", - action_done=True, - ) - - # 不发送错误消息,静默拒绝 - return False, permission_error - - result_status, result_message = await generator_api.rewrite_reply( - chat_stream=self.chat_stream, - reply_data={ - "raw_reply": message, - "reason": reason, - }, - ) - - if result_status: - for reply_seg in result_message: - data = reply_seg[1] - await self.send_text(data) - - # 发送群聊禁言命令 - success = await self.send_command( - command_name="GROUP_BAN", args={"qq_id": str(user_id), "duration": str(duration_int)}, storage_message=False - ) - - if success: - logger.info(f"{self.log_prefix} 成功发送禁言命令,用户 {target}({user_id}),时长 {duration_int} 秒") - # 存储动作信息 - await self.store_action_info( - action_build_into_prompt=True, - action_prompt_display=f"尝试禁言了用户 {target},时长 {time_str},原因:{reason}", - action_done=True, - ) - return True, f"成功禁言 {target},时长 {time_str}" - else: - error_msg = "发送禁言命令失败" - logger.error(f"{self.log_prefix} {error_msg}") - - await self.send_text("执行禁言动作失败") - return False, error_msg - - def _get_template_message(self, target: str, duration_str: str, reason: str) -> str: - """获取模板化的禁言消息""" - templates = self.get_config("mute.templates") - - template = random.choice(templates) - return template.format(target=target, duration=duration_str, reason=reason) - - def _format_duration(self, seconds: int) -> str: - """将秒数格式化为可读的时间字符串""" - if seconds < 60: - return f"{seconds}秒" - elif seconds < 3600: - minutes = seconds // 60 - remaining_seconds = seconds % 60 - if remaining_seconds > 0: - return f"{minutes}分{remaining_seconds}秒" - else: - return f"{minutes}分钟" - elif seconds < 86400: - hours = seconds // 3600 - remaining_minutes = (seconds % 3600) // 60 - if remaining_minutes > 0: - return f"{hours}小时{remaining_minutes}分钟" - else: - return f"{hours}小时" - else: - days = seconds // 86400 - remaining_hours = (seconds % 86400) // 3600 - if remaining_hours > 0: - return f"{days}天{remaining_hours}小时" - else: - return f"{days}天" - - -# ===== Command组件 ===== - - -class MuteCommand(BaseCommand): - """禁言命令 - 手动执行禁言操作""" - - # Command基本信息 - command_name = "mute_command" - command_description = "禁言命令,手动执行禁言操作" - - command_pattern = r"^/mute\s+(?P\S+)\s+(?P\d+)(?:\s+(?P.+))?$" - command_help = "禁言指定用户,用法:/mute <用户名> <时长(秒)> [理由]" - command_examples = ["/mute 用户名 300", "/mute 张三 600 刷屏", "/mute @某人 1800 违规内容"] - intercept_message = True # 拦截消息处理 - - def _check_user_permission(self) -> Tuple[bool, Optional[str]]: - """检查当前用户是否有禁言命令权限 - - Returns: - Tuple[bool, Optional[str]]: (是否有权限, 错误信息) - """ - # 获取当前用户信息 - chat_stream = self.message.chat_stream - if not chat_stream: - return False, "无法获取聊天流信息" - - current_platform = chat_stream.platform - current_user_id = str(chat_stream.user_info.user_id) - - # 获取权限配置 - allowed_users = self.get_config("permissions.allowed_users", []) - - # 如果配置为空,表示不启用权限控制 - if not allowed_users: - logger.info(f"{self.log_prefix} 用户权限未配置,允许所有用户使用禁言命令") - return True, None - - # 检查当前用户是否在允许列表中 - current_user_key = f"{current_platform}:{current_user_id}" - for allowed_user in allowed_users: - if allowed_user == current_user_key: - logger.info(f"{self.log_prefix} 用户 {current_user_key} 有禁言命令权限") - return True, None - - logger.warning(f"{self.log_prefix} 用户 {current_user_key} 没有禁言命令权限") - return False, "你没有使用禁言命令的权限" - - async def execute(self) -> Tuple[bool, Optional[str]]: - """执行禁言命令""" - try: - # 首先检查用户权限 - has_permission, permission_error = self._check_user_permission() - if not has_permission: - logger.error(f"{self.log_prefix} 权限检查失败: {permission_error}") - await self.send_text(f"❌ {permission_error}") - return False, permission_error - - target = self.matched_groups.get("target") - duration = self.matched_groups.get("duration") - reason = self.matched_groups.get("reason", "管理员操作") - - if not all([target, duration]): - await self.send_text("❌ 命令参数不完整,请检查格式") - return False, "参数不完整" - - # 获取时长限制配置 - min_duration = self.get_config("mute.min_duration", 60) - max_duration = self.get_config("mute.max_duration", 2592000) - - # 验证时长 - try: - duration_int = int(duration) - if duration_int <= 0: - await self.send_text("❌ 禁言时长必须大于0") - return False, "时长无效" - - # 限制禁言时长范围 - if duration_int < min_duration: - duration_int = min_duration - await self.send_text(f"⚠️ 禁言时长过短,调整为{min_duration}秒") - elif duration_int > max_duration: - duration_int = max_duration - await self.send_text(f"⚠️ 禁言时长过长,调整为{max_duration}秒") - - except ValueError: - await self.send_text("❌ 禁言时长必须是数字") - return False, "时长格式错误" - - # 获取用户ID - person_id = person_api.get_person_id_by_name(target) - user_id = await person_api.get_person_value(person_id, "user_id") - if not user_id or user_id == "unknown": - error_msg = f"未找到用户 {target} 的ID,请输入person_name进行禁言" - await self.send_text(f"❌ 找不到用户 {target} 的ID,请输入person_name进行禁言,而不是qq号或者昵称") - logger.error(f"{self.log_prefix} {error_msg}") - return False, error_msg - - # 格式化时长显示 - enable_formatting = self.get_config("mute.enable_duration_formatting", True) - time_str = self._format_duration(duration_int) if enable_formatting else f"{duration_int}秒" - - logger.info(f"{self.log_prefix} 执行禁言命令: {target}({user_id}) -> {time_str}") - - # 发送群聊禁言命令 - success = await self.send_command( - command_name="GROUP_BAN", - args={"qq_id": str(user_id), "duration": str(duration_int)}, - display_message=f"禁言了 {target} {time_str}", - ) - - if success: - # 获取并发送模板化消息 - message = self._get_template_message(target, time_str, reason) - await self.send_text(message) - - logger.info(f"{self.log_prefix} 成功禁言 {target}({user_id}),时长 {duration_int} 秒") - return True, f"成功禁言 {target},时长 {time_str}" - else: - await self.send_text("❌ 发送禁言命令失败") - return False, "发送禁言命令失败" - - except Exception as e: - logger.error(f"{self.log_prefix} 禁言命令执行失败: {e}") - await self.send_text(f"❌ 禁言命令错误: {str(e)}") - return False, str(e) - - def _get_template_message(self, target: str, duration_str: str, reason: str) -> str: - """获取模板化的禁言消息""" - templates = self.get_config("mute.templates") - - template = random.choice(templates) - return template.format(target=target, duration=duration_str, reason=reason) - - def _format_duration(self, seconds: int) -> str: - """将秒数格式化为可读的时间字符串""" - if seconds < 60: - return f"{seconds}秒" - elif seconds < 3600: - minutes = seconds // 60 - remaining_seconds = seconds % 60 - if remaining_seconds > 0: - return f"{minutes}分{remaining_seconds}秒" - else: - return f"{minutes}分钟" - elif seconds < 86400: - hours = seconds // 3600 - remaining_minutes = (seconds % 3600) // 60 - if remaining_minutes > 0: - return f"{hours}小时{remaining_minutes}分钟" - else: - return f"{hours}小时" - else: - days = seconds // 86400 - remaining_hours = (seconds % 86400) // 3600 - if remaining_hours > 0: - return f"{days}天{remaining_hours}小时" - else: - return f"{days}天" - - -# ===== 插件主类 ===== - - -@register_plugin -class MutePlugin(BasePlugin): - """禁言插件 - - 提供智能禁言功能: - - 智能禁言Action:基于LLM判断是否需要禁言(支持群组权限控制) - - 禁言命令Command:手动执行禁言操作(支持用户权限控制) - """ - - # 插件基本信息 - plugin_name = "mute_plugin" # 内部标识符 - enable_plugin = True - config_file_name = "config.toml" - - # 配置节描述 - config_section_descriptions = { - "plugin": "插件基本信息配置", - "components": "组件启用控制", - "permissions": "权限管理配置", - "mute": "核心禁言功能配置", - "smart_mute": "智能禁言Action的专属配置", - "mute_command": "禁言命令Command的专属配置", - "logging": "日志记录相关配置", - } - - # 配置Schema定义 - config_schema = { - "plugin": { - "enabled": ConfigField(type=bool, default=False, description="是否启用插件"), - "config_version": ConfigField(type=str, default="0.0.2", description="配置文件版本"), - }, - "components": { - "enable_smart_mute": ConfigField(type=bool, default=True, description="是否启用智能禁言Action"), - "enable_mute_command": ConfigField( - type=bool, default=False, description="是否启用禁言命令Command(调试用)" - ), - }, - "permissions": { - "allowed_users": ConfigField( - type=list, - default=[], - description="允许使用禁言命令的用户列表,格式:['platform:user_id'],如['qq:123456789']。空列表表示不启用权限控制", - ), - "allowed_groups": ConfigField( - type=list, - default=[], - description="允许使用禁言动作的群组列表,格式:['platform:group_id'],如['qq:987654321']。空列表表示不启用权限控制", - ), - }, - "mute": { - "min_duration": ConfigField(type=int, default=60, description="最短禁言时长(秒)"), - "max_duration": ConfigField(type=int, default=2592000, description="最长禁言时长(秒),默认30天"), - "default_duration": ConfigField(type=int, default=300, description="默认禁言时长(秒),默认5分钟"), - "enable_duration_formatting": ConfigField( - type=bool, default=True, description="是否启用人性化的时长显示(如 '5分钟' 而非 '300秒')" - ), - "log_mute_history": ConfigField(type=bool, default=True, description="是否记录禁言历史(未来功能)"), - "templates": ConfigField( - type=list, - default=[ - "好的,禁言 {target} {duration},理由:{reason}", - "收到,对 {target} 执行禁言 {duration},因为{reason}", - "明白了,禁言 {target} {duration},原因是{reason}", - "哇哈哈哈哈哈,已禁言 {target} {duration},理由:{reason}", - "哎呦我去,对 {target} 执行禁言 {duration},因为{reason}", - "{target},你完蛋了,我要禁言你 {duration} 秒,原因:{reason}", - ], - description="成功禁言后发送的随机消息模板", - ), - "error_messages": ConfigField( - type=list, - default=[ - "没有指定禁言对象呢~", - "没有指定禁言时长呢~", - "禁言时长必须是正数哦~", - "禁言时长必须是数字哦~", - "找不到 {target} 这个人呢~", - "查找用户信息时出现问题~", - ], - description="执行禁言过程中发生错误时发送的随机消息模板", - ), - }, - "smart_mute": { - "strict_mode": ConfigField(type=bool, default=True, description="LLM判定的严格模式"), - "keyword_sensitivity": ConfigField( - type=str, default="normal", description="关键词激活的敏感度", choices=["low", "normal", "high"] - ), - "allow_parallel": ConfigField(type=bool, default=False, description="是否允许并行执行(暂未启用)"), - }, - "mute_command": { - "max_batch_size": ConfigField(type=int, default=5, description="最大批量禁言数量(未来功能)"), - "cooldown_seconds": ConfigField(type=int, default=3, description="命令冷却时间(秒)"), - }, - "logging": { - "level": ConfigField( - type=str, default="INFO", description="日志记录级别", choices=["DEBUG", "INFO", "WARNING", "ERROR"] - ), - "prefix": ConfigField(type=str, default="[MutePlugin]", description="日志记录前缀"), - "include_user_info": ConfigField(type=bool, default=True, description="日志中是否包含用户信息"), - "include_duration_info": ConfigField(type=bool, default=True, description="日志中是否包含禁言时长信息"), - }, - } - - def get_plugin_components(self) -> List[Tuple[ComponentInfo, Type]]: - """返回插件包含的组件列表""" - - # 从配置获取组件启用状态 - enable_smart_mute = self.get_config("components.enable_smart_mute", True) - enable_mute_command = self.get_config("components.enable_mute_command", True) - - components = [] - - # 添加智能禁言Action - if enable_smart_mute: - components.append((MuteAction.get_action_info(), MuteAction)) - - # 添加禁言命令Command - if enable_mute_command: - components.append((MuteCommand.get_command_info(), MuteCommand)) - - return components From fa1fb35504775d653c41d7cc5e51f750d57e5227 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 13:59:04 +0000 Subject: [PATCH 53/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 13b5cc834..c52e637fc 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -86,7 +86,7 @@ class HeartFChatting: # 初始化性能记录器 # 如果没有指定版本号,则使用全局版本管理器的版本号 - + self.performance_logger = HFCPerformanceLogger(chat_id) logger.info( From a3a3d872fa418e0a9c1d27a6ed8a5213f6da18bd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 14:13:25 +0000 Subject: [PATCH 54/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/message_receive/storage.py | 21 +++++++++++---------- src/chat/utils/utils_image.py | 8 ++++---- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/src/chat/message_receive/storage.py b/src/chat/message_receive/storage.py index 23afe6c87..146a4372a 100644 --- a/src/chat/message_receive/storage.py +++ b/src/chat/message_receive/storage.py @@ -142,23 +142,24 @@ class MessageStorage: def replace_image_descriptions(text: str) -> str: """将[图片:描述]替换为[picid:image_id]""" # 先检查文本中是否有图片标记 - pattern = r'\[图片:([^\]]+)\]' + pattern = r"\[图片:([^\]]+)\]" matches = re.findall(pattern, text) - + if not matches: logger.debug("文本中没有图片标记,直接返回原文本") return text + def replace_match(match): description = match.group(1).strip() try: - image_record = (Images.select() - .where(Images.description == description) - .order_by(Images.timestamp.desc()) - .first()) + image_record = ( + Images.select().where(Images.description == description).order_by(Images.timestamp.desc()).first() + ) if image_record: return f"[picid:{image_record.image_id}]" - else: - return match.group(0) # 保持原样 - except Exception as e: + else: + return match.group(0) # 保持原样 + except Exception: return match.group(0) - return re.sub(r'\[图片:([^\]]+)\]', replace_match, text) + + return re.sub(r"\[图片:([^\]]+)\]", replace_match, text) diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py index eed65ad88..17cfb2323 100644 --- a/src/chat/utils/utils_image.py +++ b/src/chat/utils/utils_image.py @@ -187,12 +187,12 @@ class ImageManager: existing_image = Images.get_or_none(Images.emoji_hash == image_hash) if existing_image: # 更新计数 - if hasattr(existing_image, 'count') and existing_image.count is not None: + if hasattr(existing_image, "count") and existing_image.count is not None: existing_image.count += 1 else: existing_image.count = 1 existing_image.save() - + # 如果已有描述,直接返回 if existing_image.description: return f"[图片:{existing_image.description}]" @@ -229,9 +229,9 @@ class ImageManager: existing_image.path = file_path existing_image.description = description existing_image.timestamp = current_timestamp - if not hasattr(existing_image, 'image_id') or not existing_image.image_id: + if not hasattr(existing_image, "image_id") or not existing_image.image_id: existing_image.image_id = str(uuid.uuid4()) - if not hasattr(existing_image, 'vlm_processed') or existing_image.vlm_processed is None: + if not hasattr(existing_image, "vlm_processed") or existing_image.vlm_processed is None: existing_image.vlm_processed = True existing_image.save() else: From f001eb51fb6e2cc450cc0ecc76550dd9abed41cc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 14:15:13 +0000 Subject: [PATCH 55/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/llm_models/utils_model.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 2734fcea7..345e8ad1d 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -135,11 +135,10 @@ class LLMRequest: custom_params_str = model.get("custom_params", "{}") try: self.custom_params = json.loads(custom_params_str) - except json.JSONDecodeError as e: + except json.JSONDecodeError: logger.error(f"Invalid JSON in custom_params for model '{self.model_name}': {custom_params_str}") self.custom_params = {} - # 获取数据库实例 self._init_database() From e946a127a408e9dd116e038842e7bf472f73deb3 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 22:16:13 +0800 Subject: [PATCH 56/63] Update planner.py --- src/chat/planner_actions/planner.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 11d69935e..02a504a43 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -156,8 +156,7 @@ class ActionPlanner: if action == "no_action": action = "no_reply" reasoning = "决定不使用额外动作" - - if action not in current_available_actions and action != "no_action": + elif action not in current_available_actions: logger.warning( f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" ) From 9683fa8e54473edc0859f349c137962b647c6fbc Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 22:52:52 +0800 Subject: [PATCH 57/63] =?UTF-8?q?better=EF=BC=9A=E4=BC=98=E5=8C=96person?= =?UTF-8?q?=5Finfo=E7=9A=84=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/person_info/relationship_manager.py | 46 ++++++++++++++++++++----- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py index 4b139a6d7..a60b424ea 100644 --- a/src/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -124,6 +124,31 @@ class RelationshipManager: if not person_name or person_name == "none": return "" short_impression = await person_info_manager.get_value(person_id, "short_impression") + + current_points = await person_info_manager.get_value(person_id, "points") or [] + if isinstance(current_points, str): + try: + current_points = json.loads(current_points) + except json.JSONDecodeError: + logger.error(f"解析points JSON失败: {current_points}") + current_points = [] + elif not isinstance(current_points, list): + current_points = [] + + # 按时间排序forgotten_points + current_points.sort(key=lambda x: x[2]) + # 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大 + if len(current_points) > 3: + # point[1] 取值范围1-10,直接作为权重 + weights = [max(1, min(10, int(point[1]))) for point in current_points] + points = random.choices(current_points, weights=weights, k=3) + else: + points = current_points + + # 构建points文本 + points_text = "\n".join( + [f"{point[2]}:{point[0]}\n" for point in points] + ) nickname_str = await person_info_manager.get_value(person_id, "nickname") platform = await person_info_manager.get_value(person_id, "platform") @@ -137,7 +162,10 @@ class RelationshipManager: relation_prompt = f"'{person_name}' ,ta在{platform}上的昵称是{nickname_str}。" if short_impression: - relation_prompt += f"你对ta的印象是:{short_impression}。" + relation_prompt += f"你对ta的印象是:{short_impression}。\n" + + if points_text: + relation_prompt += f"你记得ta最近做的事:{points_text}" return relation_prompt @@ -241,16 +269,16 @@ class RelationshipManager: "weight": 10 }}, {{ - "point": "我让{person_name}帮我写作业,他拒绝了", - "weight": 4 + "point": "我让{person_name}帮我写化学作业,他拒绝了,我感觉他对我有意见,或者ta不喜欢我", + "weight": 3 }}, {{ - "point": "{person_name}居然搞错了我的名字,生气了", + "point": "{person_name}居然搞错了我的名字,我感到生气了,之后不理ta了", "weight": 8 }}, {{ - "point": "{person_name}喜欢吃辣,我和她关系不错", - "weight": 8 + "point": "{person_name}喜欢吃辣,具体来说,没有辣的食物ta都不喜欢吃,可能是因为ta是湖南人。", + "weight": 7 }} }} @@ -456,7 +484,7 @@ class RelationshipManager: 你对{person_name}的了解是: {compressed_summary} -请你用一句话概括你对{person_name}的了解。突出: +请你概括你对{person_name}的了解。突出: 1.对{person_name}的直观印象 2.{global_config.bot.nickname}与{person_name}的关系 3.{person_name}的关键信息 @@ -487,8 +515,8 @@ class RelationshipManager: 2. **好感度 (liking_value)**: 0-100的整数,表示这些信息让你对ta的喜。 - 0: 非常厌恶 - 25: 有点反感 - - 50: 中立/无感 - - 75: 有点喜欢 + - 50: 中立/无感(或者文本中无法明显看出) + - 75: 喜欢这个人 - 100: 非常喜欢/开心对这个人 请严格按照json格式输出,不要有其他多余内容: From c50f2c14ad43d878be11aaaa549c91b82ac462e0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 14:53:11 +0000 Subject: [PATCH 58/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/person_info/relationship_manager.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py index a60b424ea..6a25f8716 100644 --- a/src/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -124,7 +124,7 @@ class RelationshipManager: if not person_name or person_name == "none": return "" short_impression = await person_info_manager.get_value(person_id, "short_impression") - + current_points = await person_info_manager.get_value(person_id, "points") or [] if isinstance(current_points, str): try: @@ -134,7 +134,7 @@ class RelationshipManager: current_points = [] elif not isinstance(current_points, list): current_points = [] - + # 按时间排序forgotten_points current_points.sort(key=lambda x: x[2]) # 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大 @@ -146,9 +146,7 @@ class RelationshipManager: points = current_points # 构建points文本 - points_text = "\n".join( - [f"{point[2]}:{point[0]}\n" for point in points] - ) + points_text = "\n".join([f"{point[2]}:{point[0]}\n" for point in points]) nickname_str = await person_info_manager.get_value(person_id, "nickname") platform = await person_info_manager.get_value(person_id, "platform") @@ -163,7 +161,7 @@ class RelationshipManager: if short_impression: relation_prompt += f"你对ta的印象是:{short_impression}。\n" - + if points_text: relation_prompt += f"你记得ta最近做的事:{points_text}" From 0e982ebcab335986bee3b2aab25829f7288a01c6 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 23:18:12 +0800 Subject: [PATCH 59/63] =?UTF-8?q?better=EF=BC=9A=E4=BC=98=E5=8C=96?= =?UTF-8?q?=E5=85=B3=E7=B3=BBprompt=EF=BC=8C=E5=9B=9E=E9=80=80utils?= =?UTF-8?q?=E7=9A=84=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/llm_models/utils_model.py | 42 +++++++++++++------ src/person_info/relationship_fetcher.py | 55 +++++++++++++++++++++++-- src/person_info/relationship_manager.py | 1 + 3 files changed, 82 insertions(+), 16 deletions(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 345e8ad1d..1077cfa09 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -124,20 +124,14 @@ class LLMRequest: self.model_name: str = model["name"] self.params = kwargs - self.enable_thinking = model.get("enable_thinking", None) + self.enable_thinking = model.get("enable_thinking", False) self.temp = model.get("temp", 0.7) - self.thinking_budget = model.get("thinking_budget", None) + self.thinking_budget = model.get("thinking_budget", 4096) self.stream = model.get("stream", False) self.pri_in = model.get("pri_in", 0) self.pri_out = model.get("pri_out", 0) self.max_tokens = model.get("max_tokens", global_config.model.model_max_output_length) # print(f"max_tokens: {self.max_tokens}") - custom_params_str = model.get("custom_params", "{}") - try: - self.custom_params = json.loads(custom_params_str) - except json.JSONDecodeError: - logger.error(f"Invalid JSON in custom_params for model '{self.model_name}': {custom_params_str}") - self.custom_params = {} # 获取数据库实例 self._init_database() @@ -255,6 +249,28 @@ class LLMRequest: elif payload is None: payload = await self._build_payload(prompt) + if stream_mode: + payload["stream"] = stream_mode + + if self.temp != 0.7: + payload["temperature"] = self.temp + + # 添加enable_thinking参数(如果不是默认值False) + if not self.enable_thinking: + payload["enable_thinking"] = False + + if self.thinking_budget != 4096: + payload["thinking_budget"] = self.thinking_budget + + if self.max_tokens: + payload["max_tokens"] = self.max_tokens + + # if "max_tokens" not in payload and "max_completion_tokens" not in payload: + # payload["max_tokens"] = global_config.model.model_max_output_length + # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 + if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: + payload["max_completion_tokens"] = payload.pop("max_tokens") + return { "policy": policy, "payload": payload, @@ -654,16 +670,18 @@ class LLMRequest: if self.temp != 0.7: payload["temperature"] = self.temp - # 仅当配置文件中存在参数时,添加对应参数 - if self.enable_thinking is not None: - payload["enable_thinking"] = self.enable_thinking + # 添加enable_thinking参数(如果不是默认值False) + if not self.enable_thinking: + payload["enable_thinking"] = False - if self.thinking_budget is not None: + if self.thinking_budget != 4096: payload["thinking_budget"] = self.thinking_budget if self.max_tokens: payload["max_tokens"] = self.max_tokens + # if "max_tokens" not in payload and "max_completion_tokens" not in payload: + # payload["max_tokens"] = global_config.model.model_max_output_length # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: payload["max_completion_tokens"] = payload.pop("max_tokens") diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index f1c62851a..ea220e46a 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -9,7 +9,7 @@ from typing import List, Dict from json_repair import repair_json from src.chat.message_receive.chat_stream import get_chat_manager import json - +import random logger = get_logger("relationship_fetcher") @@ -100,23 +100,70 @@ class RelationshipFetcher: person_info_manager = get_person_info_manager() person_name = await person_info_manager.get_value(person_id, "person_name") short_impression = await person_info_manager.get_value(person_id, "short_impression") + + nickname_str = await person_info_manager.get_value(person_id, "nickname") + platform = await person_info_manager.get_value(person_id, "platform") + + if person_name == nickname_str and not short_impression: + return "" + + current_points = await person_info_manager.get_value(person_id, "points") or [] + + if isinstance(current_points, str): + try: + current_points = json.loads(current_points) + except json.JSONDecodeError: + logger.error(f"解析points JSON失败: {current_points}") + current_points = [] + elif not isinstance(current_points, list): + current_points = [] + + # 按时间排序forgotten_points + current_points.sort(key=lambda x: x[2]) + # 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大 + if len(current_points) > 3: + # point[1] 取值范围1-10,直接作为权重 + weights = [max(1, min(10, int(point[1]))) for point in current_points] + points = random.choices(current_points, weights=weights, k=3) + else: + points = current_points + + # 构建points文本 + points_text = "\n".join([f"{point[2]}:{point[0]}" for point in points]) info_type = await self._build_fetch_query(person_id, target_message, chat_history) if info_type: await self._extract_single_info(person_id, info_type, person_name) relation_info = self._organize_known_info() + + nickname_str = "" + if person_name != nickname_str: + nickname_str = f"(ta在{platform}上的昵称是{nickname_str})" + if short_impression and relation_info: - relation_info = f"你对{person_name}的印象是:{short_impression}。具体来说:{relation_info}" + if points_text: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}。你还记得ta最近做的事:{points_text}" + else: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}" elif short_impression: - relation_info = f"你对{person_name}的印象是:{short_impression}" + if points_text: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。你还记得ta最近做的事:{points_text}" + else: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}" elif relation_info: - relation_info = f"你对{person_name}的了解:{relation_info}" + if points_text: + relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}。你还记得ta最近做的事:{points_text}" + else: + relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}" + elif points_text: + relation_info = f"你记得{person_name}{nickname_str}最近做的事:{points_text}" else: relation_info = "" return relation_info + async def _build_fetch_query(self, person_id, target_message, chat_history): nickname_str = ",".join(global_config.bot.alias_names) name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py index 6a25f8716..2d37bcda8 100644 --- a/src/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -126,6 +126,7 @@ class RelationshipManager: short_impression = await person_info_manager.get_value(person_id, "short_impression") current_points = await person_info_manager.get_value(person_id, "points") or [] + print(f"current_points: {current_points}") if isinstance(current_points, str): try: current_points = json.loads(current_points) From 1643b2f0e8aa8c223c4fbacc6bb2b272ca11dcef Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 15:18:29 +0000 Subject: [PATCH 60/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/person_info/relationship_fetcher.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index ea220e46a..6c6c0a6ed 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -100,13 +100,13 @@ class RelationshipFetcher: person_info_manager = get_person_info_manager() person_name = await person_info_manager.get_value(person_id, "person_name") short_impression = await person_info_manager.get_value(person_id, "short_impression") - + nickname_str = await person_info_manager.get_value(person_id, "nickname") platform = await person_info_manager.get_value(person_id, "platform") - + if person_name == nickname_str and not short_impression: return "" - + current_points = await person_info_manager.get_value(person_id, "points") or [] if isinstance(current_points, str): @@ -136,24 +136,30 @@ class RelationshipFetcher: await self._extract_single_info(person_id, info_type, person_name) relation_info = self._organize_known_info() - + nickname_str = "" if person_name != nickname_str: nickname_str = f"(ta在{platform}上的昵称是{nickname_str})" - + if short_impression and relation_info: if points_text: relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}。你还记得ta最近做的事:{points_text}" else: - relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}" + relation_info = ( + f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}" + ) elif short_impression: if points_text: - relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。你还记得ta最近做的事:{points_text}" + relation_info = ( + f"你对{person_name}的印象是{nickname_str}:{short_impression}。你还记得ta最近做的事:{points_text}" + ) else: relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}" elif relation_info: if points_text: - relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}。你还记得ta最近做的事:{points_text}" + relation_info = ( + f"你对{person_name}的了解{nickname_str}:{relation_info}。你还记得ta最近做的事:{points_text}" + ) else: relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}" elif points_text: @@ -163,7 +169,6 @@ class RelationshipFetcher: return relation_info - async def _build_fetch_query(self, person_id, target_message, chat_history): nickname_str = ",".join(global_config.bot.alias_names) name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" From 0181c26a54598148b4f299a09fee123ad78b1af0 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 23:34:32 +0800 Subject: [PATCH 61/63] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E9=85=8D=E7=BD=AE=E5=BA=94=E7=94=A8=E9=94=99=E8=AF=AF?= =?UTF-8?q?=EF=BC=8C=E4=BF=AE=E5=A4=8Dno=5Faction=E6=89=A7=E8=A1=8C?= =?UTF-8?q?=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/memory_system/memory_activator.py | 10 ++++++---- src/chat/planner_actions/planner.py | 3 +-- src/person_info/relationship_fetcher.py | 6 +++--- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/chat/memory_system/memory_activator.py b/src/chat/memory_system/memory_activator.py index 104d0c88f..b9a6248ff 100644 --- a/src/chat/memory_system/memory_activator.py +++ b/src/chat/memory_system/memory_activator.py @@ -69,11 +69,13 @@ def init_prompt(): class MemoryActivator: def __init__(self): # TODO: API-Adapter修改标记 - self.summary_model = LLMRequest( - model=global_config.model.memory_summary, - temperature=0.7, + + self.key_words_model = LLMRequest( + model=global_config.model.utils_small, + temperature=0.5, request_type="memory_activator", ) + self.running_memory = [] self.cached_keywords = set() # 用于缓存历史关键词 @@ -97,7 +99,7 @@ class MemoryActivator: # logger.debug(f"prompt: {prompt}") - response, (reasoning_content, model_name) = await self.summary_model.generate_response_async(prompt) + response, (reasoning_content, model_name) = await self.key_words_model.generate_response_async(prompt) keywords = list(get_keywords_from_json(response)) diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 02a504a43..8dd4ecdc3 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -154,8 +154,7 @@ class ActionPlanner: action_data[key] = value if action == "no_action": - action = "no_reply" - reasoning = "决定不使用额外动作" + reasoning = "normal决定不使用额外动作" elif action not in current_available_actions: logger.warning( f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index ea220e46a..7f23cf031 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -70,14 +70,14 @@ class RelationshipFetcher: # LLM模型配置 self.llm_model = LLMRequest( - model=global_config.model.relation, - request_type="relation", + model=global_config.model.utils_small, + request_type="relation.fetcher", ) # 小模型用于即时信息提取 self.instant_llm_model = LLMRequest( model=global_config.model.utils_small, - request_type="relation.instant", + request_type="relation.fetch", ) name = get_chat_manager().get_stream_name(self.chat_id) From d0ad70924d3f53c1e3fd612090cf267aeff5f2dc Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 23:49:12 +0800 Subject: [PATCH 62/63] =?UTF-8?q?feat=EF=BC=9A=E5=8F=AF=E9=80=89=E6=89=93?= =?UTF-8?q?=E5=BC=80prompt=E6=98=BE=E7=A4=BA=EF=BC=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat.py | 13 +++++++------ src/chat/planner_actions/planner.py | 9 +++++---- src/chat/replyer/default_generator.py | 10 +++++++--- src/config/config.py | 3 ++- src/config/official_configs.py | 13 ++++++++++--- src/mais4u/mais4u_chat/s4u_chat.py | 2 -- template/bot_config_template.toml | 8 ++++++-- 7 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index e69e2a562..569584eb5 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -224,7 +224,7 @@ class NormalChat: mark_head = False first_bot_msg = None for msg in response_set: - if global_config.experimental.debug_show_chat_mode: + if global_config.debug.debug_show_chat_mode: msg += "ⁿ" message_segment = Seg(type="text", data=msg) bot_message = MessageSending( @@ -434,11 +434,12 @@ class NormalChat: # current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time)) # 使用 self.stream_id # willing_log = f"[激活值:{await willing_manager.get_willing(self.stream_id):.2f}]" if is_willing else "" - logger.info( - f"[{mes_name}]" - f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream - f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]" - ) + if reply_probability > 0.1: + logger.info( + f"[{mes_name}]" + f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream + f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]" + ) do_reply = False response_set = None # 初始化 response_set if random() < reply_probability: diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 8dd4ecdc3..135ea6bac 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -119,10 +119,11 @@ class ActionPlanner: try: llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt) - logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") - logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}") - if reasoning_content: - logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") + logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}") + if reasoning_content: + logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") except Exception as req_e: logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}") diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 51f62ba96..d9a7feda0 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -217,7 +217,9 @@ class DefaultReplyer: request_type=self.request_type, ) - logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n") + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n") + content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt) logger.info(f"最终回复: {content}") @@ -560,7 +562,9 @@ class DefaultReplyer: for name, result, duration in task_results: results_dict[name] = result timing_logs.append(f"{name}: {duration:.4f}s") - logger.info(f"回复生成前信息获取时间: {'; '.join(timing_logs)}") + if duration > 8: + logger.warning(f"回复生成前信息获取耗时过长: {name} 耗时: {duration:.4f}s,请使用更快的模型") + logger.info(f"回复生成前信息获取耗时: {'; '.join(timing_logs)}") expression_habits_block = results_dict["build_expression_habits"] relation_info = results_dict["build_relation_info"] @@ -850,7 +854,7 @@ class DefaultReplyer: type = msg_text[0] data = msg_text[1] - if global_config.experimental.debug_show_chat_mode and type == "text": + if global_config.debug.debug_show_chat_mode and type == "text": data += "ᶠ" part_message_id = f"{thinking_id}_{i}" diff --git a/src/config/config.py b/src/config/config.py index 641353809..ee6e2dbc6 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -35,6 +35,7 @@ from src.config.official_configs import ( LPMMKnowledgeConfig, RelationshipConfig, ToolConfig, + DebugConfig, ) install(extra_lines=3) @@ -165,7 +166,7 @@ class Config(ConfigBase): maim_message: MaimMessageConfig lpmm_knowledge: LPMMKnowledgeConfig tool: ToolConfig - + debug: DebugConfig def load_config(config_path: str) -> Config: """ diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 335b95c77..c1c4bab48 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -529,14 +529,21 @@ class TelemetryConfig(ConfigBase): enable: bool = True """是否启用遥测""" +@dataclass +class DebugConfig(ConfigBase): + """调试配置类""" + + debug_show_chat_mode: bool = False + """是否在回复后显示当前聊天模式""" + + show_prompt: bool = False + """是否显示prompt""" + @dataclass class ExperimentalConfig(ConfigBase): """实验功能配置类""" - debug_show_chat_mode: bool = False - """是否在回复后显示当前聊天模式""" - enable_friend_chat: bool = False """是否启用好友聊天""" diff --git a/src/mais4u/mais4u_chat/s4u_chat.py b/src/mais4u/mais4u_chat/s4u_chat.py index 28c19ab74..825135f62 100644 --- a/src/mais4u/mais4u_chat/s4u_chat.py +++ b/src/mais4u/mais4u_chat/s4u_chat.py @@ -77,8 +77,6 @@ class MessageSenderContainer: msg_id = f"{current_time}_{random.randint(1000, 9999)}" text_to_send = chunk - if global_config.experimental.debug_show_chat_mode: - text_to_send += "ⁿ" message_segment = Seg(type="text", data=text_to_send) bot_message = MessageSending( diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index b8781cea9..50b28d16c 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "3.5.0" +version = "3.6.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -231,6 +231,11 @@ library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别 # enable_thinking = : 用于指定模型是否启用思考 # thinking_budget = : 用于指定模型思考最长长度 +[debug] +show_prompt = false # 是否显示prompt +debug_show_chat_mode = false # 是否在回复后显示当前聊天模式 + + [model] model_max_output_length = 1000 # 模型单次返回的最大token数 @@ -366,7 +371,6 @@ key_file = "" # SSL密钥文件路径,仅在use_wss=true时有效 enable = true [experimental] #实验性功能 -debug_show_chat_mode = false # 是否在回复后显示当前聊天模式 enable_friend_chat = false # 是否启用好友聊天 From 7e1514d20b99a03a0dfe755e554fa3efb95f5017 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sun, 6 Jul 2025 15:49:40 +0000 Subject: [PATCH 63/63] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/config/config.py | 1 + src/config/official_configs.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/config/config.py b/src/config/config.py index ee6e2dbc6..de173a520 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -168,6 +168,7 @@ class Config(ConfigBase): tool: ToolConfig debug: DebugConfig + def load_config(config_path: str) -> Config: """ 加载配置文件 diff --git a/src/config/official_configs.py b/src/config/official_configs.py index c1c4bab48..2a37de09a 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -529,10 +529,11 @@ class TelemetryConfig(ConfigBase): enable: bool = True """是否启用遥测""" + @dataclass class DebugConfig(ConfigBase): """调试配置类""" - + debug_show_chat_mode: bool = False """是否在回复后显示当前聊天模式"""