From a1a81194f12a42675b5a888511c3854d821633ca Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 1 Jul 2025 12:27:14 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E5=90=88=E5=B9=B6normal=E5=92=8Cf?= =?UTF-8?q?ocus=E7=9A=84prompt=E6=9E=84=E5=BB=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 8 + src/chat/focus_chat/heartFC_chat.py | 69 +--- .../expression_selector_processor.py | 107 ----- src/chat/focus_chat/memory_activator.py | 23 +- src/chat/normal_chat/normal_chat.py | 2 - src/chat/normal_chat/normal_chat_generator.py | 110 ++---- src/chat/normal_chat/normal_prompt.py | 372 ------------------ src/chat/replyer/default_generator.py | 311 +++++++++++---- src/chat/replyer/replyer_manager.py | 58 +++ src/plugin_system/apis/generator_api.py | 120 ++++-- src/plugins/built_in/core_actions/plugin.py | 1 + template/bot_config_template.toml | 2 +- 12 files changed, 444 insertions(+), 739 deletions(-) delete mode 100644 src/chat/focus_chat/info_processors/expression_selector_processor.py delete mode 100644 src/chat/normal_chat/normal_prompt.py create mode 100644 src/chat/replyer/replyer_manager.py diff --git a/changelogs/changelog.md b/changelogs/changelog.md index 2c81f150e..92d59d18c 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -1,5 +1,13 @@ # Changelog +## [0.8.1] - 2025-6-27 + +- 修复表情包配置无效问题 +- 合并normal和focus的prompt构建 + + + + ## [0.8.0] - 2025-6-27 MaiBot 0.8.0 现已推出! diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index ba1222650..de8eafb85 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -23,7 +23,6 @@ from src.chat.heart_flow.observation.actions_observation import ActionObservatio from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.info_processors.base_processor import BaseProcessor -from src.chat.focus_chat.info_processors.expression_selector_processor import ExpressionSelectorProcessor from src.chat.focus_chat.planners.planner_factory import PlannerFactory from src.chat.focus_chat.planners.modify_actions import ActionModifier from src.chat.focus_chat.planners.action_manager import ActionManager @@ -31,7 +30,6 @@ from src.config.config import global_config from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger from src.chat.focus_chat.hfc_version_manager import get_hfc_version from src.chat.focus_chat.info.relation_info import RelationInfo -from src.chat.focus_chat.info.expression_selection_info import ExpressionSelectionInfo from src.chat.focus_chat.info.structured_info import StructuredInfo @@ -59,7 +57,6 @@ PROCESSOR_CLASSES = { POST_PLANNING_PROCESSOR_CLASSES = { "ToolProcessor": (ToolProcessor, "tool_use_processor"), "PersonImpressionpProcessor": (PersonImpressionpProcessor, "person_impression_processor"), - "ExpressionSelectorProcessor": (ExpressionSelectorProcessor, "expression_selector_processor"), } logger = get_logger("hfc") # Logger Name Changed @@ -699,30 +696,6 @@ class HeartFChatting: task_start_times[task] = time.time() logger.info(f"{self.log_prefix} 启动后期处理器任务: {processor_name}") - # 添加记忆激活器任务 - async def run_memory_with_timeout_and_timing(): - start_time = time.time() - try: - result = await asyncio.wait_for( - self.memory_activator.activate_memory(observations), - timeout=MEMORY_ACTIVATION_TIMEOUT, - ) - end_time = time.time() - post_processor_time_costs["MemoryActivator"] = end_time - start_time - logger.debug(f"{self.log_prefix} 记忆激活器耗时: {end_time - start_time:.3f}秒") - return result - except Exception as e: - end_time = time.time() - post_processor_time_costs["MemoryActivator"] = end_time - start_time - logger.warning(f"{self.log_prefix} 记忆激活器执行异常,耗时: {end_time - start_time:.3f}秒") - raise e - - memory_task = asyncio.create_task(run_memory_with_timeout_and_timing()) - task_list.append(memory_task) - task_to_name_map[memory_task] = ("memory", "MemoryActivator") - task_start_times[memory_task] = time.time() - logger.info(f"{self.log_prefix} 启动记忆激活器任务") - # 如果没有任何后期任务,直接返回 if not task_list: logger.info(f"{self.log_prefix} 没有启用的后期处理器或记忆激活器") @@ -731,7 +704,6 @@ class HeartFChatting: # 等待所有任务完成 pending_tasks = set(task_list) all_post_plan_info = [] - running_memorys = [] while pending_tasks: done, pending_tasks = await asyncio.wait(pending_tasks, return_when=asyncio.FIRST_COMPLETED) @@ -748,13 +720,6 @@ class HeartFChatting: all_post_plan_info.extend(result) else: logger.warning(f"{self.log_prefix} 后期处理器 {task_name} 返回了 None") - elif task_type == "memory": - logger.info(f"{self.log_prefix} 记忆激活器已完成!") - if result is not None: - running_memorys = result - else: - logger.warning(f"{self.log_prefix} 记忆激活器返回了 None") - running_memorys = [] except asyncio.TimeoutError: # 对于超时任务,记录已用时间 @@ -764,12 +729,6 @@ class HeartFChatting: logger.warning( f"{self.log_prefix} 后期处理器 {task_name} 超时(>{global_config.focus_chat.processor_max_time}s),已跳过,耗时: {elapsed_time:.3f}秒" ) - elif task_type == "memory": - post_processor_time_costs["MemoryActivator"] = elapsed_time - logger.warning( - f"{self.log_prefix} 记忆激活器超时(>{MEMORY_ACTIVATION_TIMEOUT}s),已跳过,耗时: {elapsed_time:.3f}秒" - ) - running_memorys = [] except Exception as e: # 对于异常任务,记录已用时间 elapsed_time = time.time() - task_start_times[task] @@ -779,49 +738,29 @@ class HeartFChatting: f"{self.log_prefix} 后期处理器 {task_name} 执行失败,耗时: {elapsed_time:.3f}秒. 错误: {e}", exc_info=True, ) - elif task_type == "memory": - post_processor_time_costs["MemoryActivator"] = elapsed_time - logger.error( - f"{self.log_prefix} 记忆激活器执行失败,耗时: {elapsed_time:.3f}秒. 错误: {e}", - exc_info=True, - ) - running_memorys = [] # 将后期处理器的结果整合到 action_data 中 updated_action_data = action_data.copy() relation_info = "" - selected_expressions = [] structured_info = "" for info in all_post_plan_info: if isinstance(info, RelationInfo): relation_info = info.get_processed_info() - elif isinstance(info, ExpressionSelectionInfo): - selected_expressions = info.get_expressions_for_action_data() elif isinstance(info, StructuredInfo): structured_info = info.get_processed_info() if relation_info: - updated_action_data["relation_info_block"] = relation_info + updated_action_data["relation_info"] = relation_info - if selected_expressions: - updated_action_data["selected_expressions"] = selected_expressions if structured_info: updated_action_data["structured_info"] = structured_info - # 特殊处理running_memorys - if running_memorys: - memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" - for running_memory in running_memorys: - memory_str += f"{running_memory['content']}\n" - updated_action_data["memory_block"] = memory_str - logger.info(f"{self.log_prefix} 添加了 {len(running_memorys)} 个激活的记忆到action_data") - - if all_post_plan_info or running_memorys: + if all_post_plan_info: logger.info( - f"{self.log_prefix} 后期处理完成,产生了 {len(all_post_plan_info)} 个信息项和 {len(running_memorys)} 个记忆" + f"{self.log_prefix} 后期处理完成,产生了 {len(all_post_plan_info)} 个信息项" ) # 输出详细统计信息 @@ -908,7 +847,7 @@ class HeartFChatting: logger.debug(f"{self.log_prefix} 并行阶段完成,准备进入规划器,plan_info数量: {len(all_plan_info)}") with Timer("规划器", cycle_timers): - plan_result = await self.action_planner.plan(all_plan_info, [], loop_start_time) + plan_result = await self.action_planner.plan(all_plan_info, self.observations, loop_start_time) loop_plan_info = { "action_result": plan_result.get("action_result", {}), diff --git a/src/chat/focus_chat/info_processors/expression_selector_processor.py b/src/chat/focus_chat/info_processors/expression_selector_processor.py deleted file mode 100644 index 66b199718..000000000 --- a/src/chat/focus_chat/info_processors/expression_selector_processor.py +++ /dev/null @@ -1,107 +0,0 @@ -import time -import random -from typing import List -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation -from src.chat.heart_flow.observation.observation import Observation -from src.common.logger import get_logger -from src.chat.message_receive.chat_stream import get_chat_manager -from .base_processor import BaseProcessor -from src.chat.focus_chat.info.info_base import InfoBase -from src.chat.focus_chat.info.expression_selection_info import ExpressionSelectionInfo -from src.chat.express.expression_selector import expression_selector - -logger = get_logger("processor") - - -class ExpressionSelectorProcessor(BaseProcessor): - log_prefix = "表达选择器" - - def __init__(self, subheartflow_id: str): - super().__init__() - - self.subheartflow_id = subheartflow_id - self.last_selection_time = 0 - self.selection_interval = 10 # 40秒间隔 - self.cached_expressions = [] # 缓存上一次选择的表达方式 - - name = get_chat_manager().get_stream_name(self.subheartflow_id) - self.log_prefix = f"[{name}] 表达选择器" - - async def process_info( - self, - observations: List[Observation] = None, - action_type: str = None, - action_data: dict = None, - **kwargs, - ) -> List[InfoBase]: - """处理信息对象 - - Args: - observations: 观察对象列表 - - Returns: - List[InfoBase]: 处理后的表达选择信息列表 - """ - current_time = time.time() - - # 检查频率限制 - if current_time - self.last_selection_time < self.selection_interval: - logger.debug(f"{self.log_prefix} 距离上次选择不足{self.selection_interval}秒,使用缓存的表达方式") - # 使用缓存的表达方式 - if self.cached_expressions: - # 从缓存的15个中随机选5个 - final_expressions = random.sample(self.cached_expressions, min(5, len(self.cached_expressions))) - - # 创建表达选择信息 - expression_info = ExpressionSelectionInfo() - expression_info.set_selected_expressions(final_expressions) - - logger.info(f"{self.log_prefix} 使用缓存选择了{len(final_expressions)}个表达方式") - return [expression_info] - else: - logger.debug(f"{self.log_prefix} 没有缓存的表达方式,跳过选择") - return [] - - # 获取聊天内容 - chat_info = "" - if observations: - for observation in observations: - if isinstance(observation, ChattingObservation): - # chat_info = observation.get_observe_info() - chat_info = observation.talking_message_str_truncate_short - break - - if not chat_info: - logger.debug(f"{self.log_prefix} 没有聊天内容,跳过表达方式选择") - return [] - - try: - if action_type == "reply": - target_message = action_data.get("reply_to", "") - else: - target_message = "" - - # LLM模式:调用LLM选择5-10个,然后随机选5个 - selected_expressions = await expression_selector.select_suitable_expressions_llm( - self.subheartflow_id, chat_info, max_num=12, min_num=2, target_message=target_message - ) - cache_size = len(selected_expressions) if selected_expressions else 0 - mode_desc = f"LLM模式(已缓存{cache_size}个)" - - if selected_expressions: - self.cached_expressions = selected_expressions - self.last_selection_time = current_time - - # 创建表达选择信息 - expression_info = ExpressionSelectionInfo() - expression_info.set_selected_expressions(selected_expressions) - - logger.info(f"{self.log_prefix} 为当前聊天选择了{len(selected_expressions)}个表达方式({mode_desc})") - return [expression_info] - else: - logger.debug(f"{self.log_prefix} 未选择任何表达方式") - return [] - - except Exception as e: - logger.error(f"{self.log_prefix} 处理表达方式选择时出错: {e}") - return [] diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py index fb92c0024..029120497 100644 --- a/src/chat/focus_chat/memory_activator.py +++ b/src/chat/focus_chat/memory_activator.py @@ -10,6 +10,7 @@ from typing import List, Dict import difflib import json from json_repair import repair_json +from src.person_info.person_info import get_person_info_manager logger = get_logger("memory_activator") @@ -75,8 +76,8 @@ class MemoryActivator: ) self.running_memory = [] self.cached_keywords = set() # 用于缓存历史关键词 - - async def activate_memory(self, observations) -> List[Dict]: + + async def activate_memory_with_chat_history(self, chat_id, target_message, chat_history_prompt) -> List[Dict]: """ 激活记忆 @@ -90,14 +91,14 @@ class MemoryActivator: if not global_config.memory.enable_memory: return [] - obs_info_text = "" - for observation in observations: - if isinstance(observation, ChattingObservation): - obs_info_text += observation.talking_message_str_truncate_short - elif isinstance(observation, StructureObservation): - working_info = observation.get_observe_info() - for working_info_item in working_info: - obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n" + # obs_info_text = "" + # for observation in observations: + # if isinstance(observation, ChattingObservation): + # obs_info_text += observation.talking_message_str_truncate_short + # elif isinstance(observation, StructureObservation): + # working_info = observation.get_observe_info() + # for working_info_item in working_info: + # obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n" # logger.info(f"回忆待检索内容:obs_info_text: {obs_info_text}") @@ -106,7 +107,7 @@ class MemoryActivator: prompt = await global_prompt_manager.format_prompt( "memory_activator_prompt", - obs_info_text=obs_info_text, + obs_info_text=chat_history_prompt, cached_keywords=cached_keywords_str, ) diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 2b9777fba..4d5342416 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -685,8 +685,6 @@ class NormalChat: try: return await self.gpt.generate_response( message=message, - thinking_id=thinking_id, - enable_planner=self.enable_planner, available_actions=available_actions, ) except Exception as e: diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py index 6a3c8cc52..62388c6db 100644 --- a/src/chat/normal_chat/normal_chat_generator.py +++ b/src/chat/normal_chat/normal_chat_generator.py @@ -1,13 +1,12 @@ from typing import List, Optional, Union -import random from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.message_receive.message import MessageThinking -from src.chat.normal_chat.normal_prompt import prompt_builder -from src.chat.utils.timer_calculator import Timer from src.common.logger import get_logger from src.person_info.person_info import PersonInfoManager, get_person_info_manager from src.chat.utils.utils import process_llm_response +from src.plugin_system.apis import generator_api +from src.chat.focus_chat.memory_activator import MemoryActivator logger = get_logger("normal_chat_response") @@ -15,90 +14,61 @@ logger = get_logger("normal_chat_response") class NormalChatGenerator: def __init__(self): - # TODO: API-Adapter修改标记 - self.model_reasoning = LLMRequest( - model=global_config.model.replyer_1, - request_type="normal.chat_1", - ) - self.model_normal = LLMRequest( - model=global_config.model.replyer_2, - request_type="normal.chat_2", - ) + model_config_1 = global_config.model.replyer_1.copy() + model_config_2 = global_config.model.replyer_2.copy() + prob_first = global_config.normal_chat.normal_chat_first_probability + + model_config_1['weight'] = prob_first + model_config_2['weight'] = 1.0 - prob_first + + self.model_configs = [model_config_1, model_config_2] + self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation") - self.current_model_type = "r1" # 默认使用 R1 - self.current_model_name = "unknown model" + self.memory_activator = MemoryActivator() async def generate_response( - self, message: MessageThinking, thinking_id: str, enable_planner: bool = False, available_actions=None - ) -> Optional[Union[str, List[str]]]: - """根据当前模型类型选择对应的生成函数""" - # 从global_config中获取模型概率值并选择模型 - if random.random() < global_config.normal_chat.normal_chat_first_probability: - current_model = self.model_reasoning - self.current_model_name = current_model.model_name - else: - current_model = self.model_normal - self.current_model_name = current_model.model_name - - logger.info( - f"{self.current_model_name}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}" - ) # noqa: E501 - - model_response = await self._generate_response_with_model( - message, current_model, thinking_id, enable_planner, available_actions - ) - - if model_response: - logger.debug(f"{global_config.bot.nickname}的备选回复是:{model_response}") - model_response = process_llm_response(model_response) - - return model_response - else: - logger.info(f"{self.current_model_name}思考,失败") - return None - - async def _generate_response_with_model( self, message: MessageThinking, - model: LLMRequest, - thinking_id: str, - enable_planner: bool = False, available_actions=None, ): + logger.info( + f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}" + ) person_id = PersonInfoManager.get_person_id( message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id ) person_info_manager = get_person_info_manager() person_name = await person_info_manager.get_value(person_id, "person_name") - - if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname: - sender_name = ( - f"[{message.chat_stream.user_info.user_nickname}]" - f"[群昵称:{message.chat_stream.user_info.user_cardname}](你叫ta{person_name})" - ) - elif message.chat_stream.user_info.user_nickname: - sender_name = f"[{message.chat_stream.user_info.user_nickname}](你叫ta{person_name})" - else: - sender_name = f"用户({message.chat_stream.user_info.user_id})" - - # 构建prompt - with Timer() as t_build_prompt: - prompt = await prompt_builder.build_prompt_normal( - message_txt=message.processed_plain_text, - sender_name=sender_name, - chat_stream=message.chat_stream, - enable_planner=enable_planner, - available_actions=available_actions, - ) - logger.debug(f"构建prompt时间: {t_build_prompt.human_readable}") + relation_info = await person_info_manager.get_value(person_id, "short_impression") + reply_to_str = f"{person_name}:{message.processed_plain_text}" + + structured_info = "" try: - content, (reasoning_content, model_name) = await model.generate_response_async(prompt) + success, reply_set, prompt = await generator_api.generate_reply( + chat_stream=message.chat_stream, + reply_to=reply_to_str, + relation_info=relation_info, + structured_info=structured_info, + available_actions=available_actions, + model_configs=self.model_configs, + request_type="normal.replyer", + return_prompt=True + ) - logger.info(f"prompt:{prompt}\n生成回复:{content}") + if not success or not reply_set: + logger.info(f"对 {message.processed_plain_text} 的回复生成失败") + return None - logger.info(f"对 {message.processed_plain_text} 的回复:{content}") + content = " ".join([item[1] for item in reply_set if item[0] == "text"]) + logger.debug(f"对 {message.processed_plain_text} 的回复:{content}") + + if content: + logger.info(f"{global_config.bot.nickname}的备选回复是:{content}") + content = process_llm_response(content) + + return content except Exception: logger.exception("生成回复时出错") diff --git a/src/chat/normal_chat/normal_prompt.py b/src/chat/normal_chat/normal_prompt.py deleted file mode 100644 index 75a237882..000000000 --- a/src/chat/normal_chat/normal_prompt.py +++ /dev/null @@ -1,372 +0,0 @@ -from src.config.config import global_config -from src.common.logger import get_logger -from src.chat.utils.prompt_builder import Prompt, global_prompt_manager -from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat -import time -from src.chat.utils.utils import get_recent_group_speaker -from src.manager.mood_manager import mood_manager -from src.chat.memory_system.Hippocampus import hippocampus_manager -from src.chat.knowledge.knowledge_lib import qa_manager -import random -from src.person_info.person_info import get_person_info_manager -from src.chat.express.expression_selector import expression_selector -import re -import ast - -from src.person_info.relationship_manager import get_relationship_manager - -logger = get_logger("prompt") - - -def init_prompt(): - Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1") - Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") - Prompt("在群里聊天", "chat_target_group2") - Prompt("和{sender_name}私聊", "chat_target_private2") - - Prompt( - """ -你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中: -{style_habbits} -请你根据情景使用以下,不要盲目使用,不要生硬使用,而是结合到表达中: -{grammar_habbits} - -{memory_prompt} -{relation_prompt} -{prompt_info} -{chat_target} -现在时间是:{now_time} -{chat_talking_prompt} -现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言或者回复这条消息。\n -你的网名叫{bot_name},有人也叫你{bot_other_names},{prompt_personality}。 - -{action_descriptions}你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},请你给出回复 -尽量简短一些。请注意把握聊天内容。 -请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。 -{keywords_reaction_prompt} -请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容。 -{moderation_prompt} -不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容""", - "reasoning_prompt_main", - ) - - Prompt( - "你回忆起:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n", - "memory_prompt", - ) - - Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt") - - Prompt( - """ -你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中: -{style_habbits} -请你根据情景使用以下句法,不要盲目使用,不要生硬使用,而是结合到表达中: -{grammar_habbits} -{memory_prompt} -{prompt_info} -你正在和 {sender_name} 聊天。 -{relation_prompt} -你们之前的聊天记录如下: -{chat_talking_prompt} -现在 {sender_name} 说的: {message_txt} 引起了你的注意,针对这条消息回复他。 -你的网名叫{bot_name},{sender_name}也叫你{bot_other_names},{prompt_personality}。 -{action_descriptions}你正在和 {sender_name} 聊天, 现在请你读读你们之前的聊天记录,给出回复。量简短一些。请注意把握聊天内容。 -{keywords_reaction_prompt} -{moderation_prompt} -请说中文。不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容""", - "reasoning_prompt_private_main", # New template for private CHAT chat - ) - - -class PromptBuilder: - def __init__(self): - self.prompt_built = "" - self.activate_messages = "" - - async def build_prompt_normal( - self, - chat_stream, - message_txt: str, - sender_name: str = "某人", - enable_planner: bool = False, - available_actions=None, - ) -> str: - person_info_manager = get_person_info_manager() - bot_person_id = person_info_manager.get_person_id("system", "bot_id") - - short_impression = await person_info_manager.get_value(bot_person_id, "short_impression") - - # 解析字符串形式的Python列表 - try: - if isinstance(short_impression, str) and short_impression.strip(): - short_impression = ast.literal_eval(short_impression) - elif not short_impression: - logger.warning("short_impression为空,使用默认值") - short_impression = ["友好活泼", "人类"] - except (ValueError, SyntaxError) as e: - logger.error(f"解析short_impression失败: {e}, 原始值: {short_impression}") - short_impression = ["友好活泼", "人类"] - - # 确保short_impression是列表格式且有足够的元素 - if not isinstance(short_impression, list) or len(short_impression) < 2: - logger.warning(f"short_impression格式不正确: {short_impression}, 使用默认值") - short_impression = ["友好活泼", "人类"] - - personality = short_impression[0] - identity = short_impression[1] - prompt_personality = personality + "," + identity - - is_group_chat = bool(chat_stream.group_info) - - who_chat_in_group = [] - if is_group_chat: - who_chat_in_group = get_recent_group_speaker( - chat_stream.stream_id, - (chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None, - limit=global_config.normal_chat.max_context_size, - ) - who_chat_in_group.append( - (chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname) - ) - - relation_prompt = "" - if global_config.relationship.enable_relationship: - for person in who_chat_in_group: - relationship_manager = get_relationship_manager() - relation_prompt += f"{await relationship_manager.build_relationship_info(person)}\n" - - mood_prompt = mood_manager.get_mood_prompt() - - memory_prompt = "" - if global_config.memory.enable_memory: - related_memory = await hippocampus_manager.get_memory_from_text( - text=message_txt, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False - ) - - related_memory_info = "" - if related_memory: - for memory in related_memory: - related_memory_info += memory[1] - memory_prompt = await global_prompt_manager.format_prompt( - "memory_prompt", related_memory_info=related_memory_info - ) - - message_list_before_now = get_raw_msg_before_timestamp_with_chat( - chat_id=chat_stream.stream_id, - timestamp=time.time(), - limit=global_config.focus_chat.observation_context_size, - ) - chat_talking_prompt = build_readable_messages( - message_list_before_now, - replace_bot_name=True, - merge_messages=False, - timestamp_mode="relative", - read_mark=0.0, - show_actions=True, - ) - - message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( - chat_id=chat_stream.stream_id, - timestamp=time.time(), - limit=int(global_config.focus_chat.observation_context_size * 0.5), - ) - chat_talking_prompt_half = build_readable_messages( - message_list_before_now_half, - replace_bot_name=True, - merge_messages=False, - timestamp_mode="relative", - read_mark=0.0, - show_actions=True, - ) - - expressions = await expression_selector.select_suitable_expressions_llm( - chat_stream.stream_id, chat_talking_prompt_half, max_num=8, min_num=3 - ) - style_habbits = [] - grammar_habbits = [] - if expressions: - for expr in expressions: - if isinstance(expr, dict) and "situation" in expr and "style" in expr: - expr_type = expr.get("type", "style") - if expr_type == "grammar": - grammar_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}") - else: - style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}") - else: - logger.debug("没有从处理器获得表达方式,将使用空的表达方式") - - style_habbits_str = "\n".join(style_habbits) - grammar_habbits_str = "\n".join(grammar_habbits) - - # 关键词检测与反应 - keywords_reaction_prompt = "" - try: - # 处理关键词规则 - for rule in global_config.keyword_reaction.keyword_rules: - if any(keyword in message_txt for keyword in rule.keywords): - logger.info(f"检测到关键词规则:{rule.keywords},触发反应:{rule.reaction}") - keywords_reaction_prompt += f"{rule.reaction}," - - # 处理正则表达式规则 - for rule in global_config.keyword_reaction.regex_rules: - for pattern_str in rule.regex: - try: - pattern = re.compile(pattern_str) - if result := pattern.search(message_txt): - reaction = rule.reaction - for name, content in result.groupdict().items(): - reaction = reaction.replace(f"[{name}]", content) - logger.info(f"匹配到正则表达式:{pattern_str},触发反应:{reaction}") - keywords_reaction_prompt += reaction + "," - break - except re.error as e: - logger.error(f"正则表达式编译错误: {pattern_str}, 错误信息: {str(e)}") - continue - except Exception as e: - logger.error(f"关键词检测与反应时发生异常: {str(e)}", exc_info=True) - - moderation_prompt_block = ( - "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。" - ) - - # 构建action描述 (如果启用planner) - action_descriptions = "" - # logger.debug(f"Enable planner {enable_planner}, available actions: {available_actions}") - if enable_planner and available_actions: - action_descriptions = "你有以下的动作能力,但执行这些动作不由你决定,由另外一个模型同步决定,因此你只需要知道有如下能力即可:\n" - for action_name, action_info in available_actions.items(): - action_description = action_info.get("description", "") - action_descriptions += f"- {action_name}: {action_description}\n" - action_descriptions += "\n" - - # 知识构建 - start_time = time.time() - prompt_info = await self.get_prompt_info(message_txt, threshold=0.38) - if prompt_info: - prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info) - - end_time = time.time() - logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}秒") - - logger.debug("开始构建 normal prompt") - - now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - - # --- Choose template and format based on chat type --- - if is_group_chat: - template_name = "reasoning_prompt_main" - effective_sender_name = sender_name - chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1") - chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") - - prompt = await global_prompt_manager.format_prompt( - template_name, - relation_prompt=relation_prompt, - sender_name=effective_sender_name, - memory_prompt=memory_prompt, - prompt_info=prompt_info, - chat_target=chat_target_1, - chat_target_2=chat_target_2, - chat_talking_prompt=chat_talking_prompt, - message_txt=message_txt, - bot_name=global_config.bot.nickname, - bot_other_names="/".join(global_config.bot.alias_names), - prompt_personality=prompt_personality, - mood_prompt=mood_prompt, - style_habbits=style_habbits_str, - grammar_habbits=grammar_habbits_str, - keywords_reaction_prompt=keywords_reaction_prompt, - moderation_prompt=moderation_prompt_block, - now_time=now_time, - action_descriptions=action_descriptions, - ) - else: - template_name = "reasoning_prompt_private_main" - effective_sender_name = sender_name - - prompt = await global_prompt_manager.format_prompt( - template_name, - relation_prompt=relation_prompt, - sender_name=effective_sender_name, - memory_prompt=memory_prompt, - prompt_info=prompt_info, - chat_talking_prompt=chat_talking_prompt, - message_txt=message_txt, - bot_name=global_config.bot.nickname, - bot_other_names="/".join(global_config.bot.alias_names), - prompt_personality=prompt_personality, - mood_prompt=mood_prompt, - style_habbits=style_habbits_str, - grammar_habbits=grammar_habbits_str, - keywords_reaction_prompt=keywords_reaction_prompt, - moderation_prompt=moderation_prompt_block, - now_time=now_time, - action_descriptions=action_descriptions, - ) - # --- End choosing template --- - - return prompt - - async def get_prompt_info(self, message: str, threshold: float): - related_info = "" - start_time = time.time() - - logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") - # 从LPMM知识库获取知识 - try: - found_knowledge_from_lpmm = qa_manager.get_knowledge(message) - - end_time = time.time() - if found_knowledge_from_lpmm is not None: - logger.debug( - f"从LPMM知识库获取知识,相关信息:{found_knowledge_from_lpmm[:100]}...,信息长度: {len(found_knowledge_from_lpmm)}" - ) - related_info += found_knowledge_from_lpmm - logger.debug(f"获取知识库内容耗时: {(end_time - start_time):.3f}秒") - logger.debug(f"获取知识库内容,相关信息:{related_info[:100]}...,信息长度: {len(related_info)}") - return related_info - else: - logger.debug("从LPMM知识库获取知识失败,可能是从未导入过知识,返回空知识...") - return "未检索到知识" - except Exception as e: - logger.error(f"获取知识库内容时发生异常: {str(e)}") - return "未检索到知识" - - -def weighted_sample_no_replacement(items, weights, k) -> list: - """ - 加权且不放回地随机抽取k个元素。 - - 参数: - items: 待抽取的元素列表 - weights: 每个元素对应的权重(与items等长,且为正数) - k: 需要抽取的元素个数 - 返回: - selected: 按权重加权且不重复抽取的k个元素组成的列表 - - 如果 items 中的元素不足 k 个,就只会返回所有可用的元素 - - 实现思路: - 每次从当前池中按权重加权随机选出一个元素,选中后将其从池中移除,重复k次。 - 这样保证了: - 1. count越大被选中概率越高 - 2. 不会重复选中同一个元素 - """ - selected = [] - pool = list(zip(items, weights)) - for _ in range(min(k, len(pool))): - total = sum(w for _, w in pool) - r = random.uniform(0, total) - upto = 0 - for idx, (item, weight) in enumerate(pool): - upto += weight - if upto >= r: - selected.append(item) - pool.pop(idx) - break - return selected - - -init_prompt() -prompt_builder = PromptBuilder() diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index c301ce31c..f923d9965 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -10,7 +10,6 @@ from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.utils.timer_calculator import Timer # <--- Import Timer from src.chat.focus_chat.heartFC_sender import HeartFCSender -from src.chat.utils.utils import process_llm_response from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from src.chat.message_receive.chat_stream import ChatStream from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp @@ -18,16 +17,29 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat from src.chat.express.exprssion_learner import get_expression_learner import time +from src.chat.express.expression_selector import expression_selector +from src.manager.mood_manager import mood_manager import random import ast from src.person_info.person_info import get_person_info_manager from datetime import datetime import re +from src.chat.knowledge.knowledge_lib import qa_manager +from src.chat.focus_chat.memory_activator import MemoryActivator logger = get_logger("replyer") def init_prompt(): + + Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1") + Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") + Prompt("在群里聊天", "chat_target_group2") + Prompt("和{sender_name}私聊", "chat_target_private2") + Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt") + + + Prompt( """ {expression_habits_block} @@ -35,19 +47,21 @@ def init_prompt(): {memory_block} {relation_info_block} {extra_info_block} -{time_block} + {chat_target} +{time_block} {chat_info} {reply_target_block} {identity} -你需要使用合适的语言习惯和句法,参考聊天内容,组织一条日常且口语化的回复。注意不要复读你说过的话。 -{config_expression_style}。回复不要浮夸,不要用夸张修辞,平淡一些。 +{action_descriptions} +你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},请你给出回复 +{config_expression_style}。 +请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,注意不要复读你说过的话。 {keywords_reaction_prompt} -请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。 -不要浮夸,不要夸张修辞,请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出一条回复就好。 -现在,你说: -""", +请注意不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。只输出回复内容。 +{moderation_prompt} +不要浮夸,不要夸张修辞,不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容""", "default_generator_prompt", ) @@ -120,18 +134,41 @@ def init_prompt(): class DefaultReplyer: - def __init__(self, chat_stream: ChatStream): + def __init__(self, chat_stream: ChatStream, model_configs: Optional[List[Dict[str, Any]]] = None, request_type: str = "focus.replyer"): self.log_prefix = "replyer" - # TODO: API-Adapter修改标记 - self.express_model = LLMRequest( - model=global_config.model.replyer_1, - request_type="focus.replyer", - ) + self.request_type = request_type + + if model_configs: + self.express_model_configs = model_configs + else: + # 当未提供配置时,使用默认配置并赋予默认权重 + default_config = global_config.model.replyer_1.copy() + default_config.setdefault('weight', 1.0) + self.express_model_configs = [default_config] + + if not self.express_model_configs: + logger.warning("未找到有效的模型配置,回复生成可能会失败。") + # 提供一个最终的回退,以防止在空列表上调用 random.choice + fallback_config = global_config.model.replyer_1.copy() + fallback_config.setdefault('weight', 1.0) + self.express_model_configs = [fallback_config] + self.heart_fc_sender = HeartFCSender() + self.memory_activator = MemoryActivator() self.chat_stream = chat_stream self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id) + def _select_weighted_model_config(self) -> Dict[str, Any]: + """使用加权随机选择来挑选一个模型配置""" + configs = self.express_model_configs + # 提取权重,如果模型配置中没有'weight'键,则默认为1.0 + weights = [config.get('weight', 1.0) for config in configs] + + # random.choices 返回一个列表,我们取第一个元素 + selected_config = random.choices(population=configs, weights=weights, k=1)[0] + return selected_config + async def _create_thinking_message(self, anchor_message: Optional[MessageRecv], thinking_id: str): """创建思考消息 (尝试锚定到 anchor_message)""" if not anchor_message or not anchor_message.chat_stream: @@ -160,17 +197,36 @@ class DefaultReplyer: return None async def generate_reply_with_context( - self, reply_data: Dict[str, Any], enable_splitter: bool = True, enable_chinese_typo: bool = True - ) -> Tuple[bool, Optional[List[str]]]: + self, + reply_data: Dict[str, Any] = {}, + reply_to: str = "", + relation_info: str = "", + structured_info: str = "", + extra_info: str = "", + available_actions: List[str] = [], + + ) -> Tuple[bool, Optional[str]]: """ 回复器 (Replier): 核心逻辑,负责生成回复文本。 (已整合原 HeartFCGenerator 的功能) """ try: + if not reply_data: + reply_data = { + "reply_to": reply_to, + "relation_info": relation_info, + "structured_info": structured_info, + "extra_info": extra_info, + } + for key, value in reply_data.items(): + if not value: + logger.info(f"{self.log_prefix} 回复数据跳过{key},生成回复时将忽略。") + # 3. 构建 Prompt with Timer("构建Prompt", {}): # 内部计时器,可选保留 prompt = await self.build_prompt_reply_context( reply_data=reply_data, # 传递action_data + available_actions=available_actions ) # 4. 调用 LLM 生成回复 @@ -180,8 +236,17 @@ class DefaultReplyer: try: with Timer("LLM生成", {}): # 内部计时器,可选保留 + # 加权随机选择一个模型配置 + selected_model_config = self._select_weighted_model_config() + logger.info(f"{self.log_prefix} 使用模型配置: {selected_model_config.get('model_name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})") + + express_model = LLMRequest( + model=selected_model_config, + request_type=self.request_type, + ) + logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n") - content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt) + content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt) logger.info(f"最终回复: {content}") @@ -190,22 +255,7 @@ class DefaultReplyer: logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}") return False, None # LLM 调用失败则无法生成回复 - processed_response = process_llm_response(content, enable_splitter, enable_chinese_typo) - - # 5. 处理 LLM 响应 - if not content: - logger.warning(f"{self.log_prefix}LLM 生成了空内容。") - return False, None - if not processed_response: - logger.warning(f"{self.log_prefix}处理后的回复为空。") - return False, None - - reply_set = [] - for str in processed_response: - reply_seg = ("text", str) - reply_set.append(reply_seg) - - return True, reply_set + return True, content, prompt except Exception as e: logger.error(f"{self.log_prefix}回复生成意外失败: {e}") @@ -213,8 +263,8 @@ class DefaultReplyer: return False, None async def rewrite_reply_with_context( - self, reply_data: Dict[str, Any], enable_splitter: bool = True, enable_chinese_typo: bool = True - ) -> Tuple[bool, Optional[List[str]]]: + self, reply_data: Dict[str, Any] + ) -> Tuple[bool, Optional[str]]: """ 表达器 (Expressor): 核心逻辑,负责生成回复文本。 """ @@ -239,8 +289,16 @@ class DefaultReplyer: try: with Timer("LLM生成", {}): # 内部计时器,可选保留 - # TODO: API-Adapter修改标记 - content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt) + # 加权随机选择一个模型配置 + selected_model_config = self._select_weighted_model_config() + logger.info(f"{self.log_prefix} 使用模型配置进行重写: {selected_model_config.get('model_name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})") + + express_model = LLMRequest( + model=selected_model_config, + request_type=self.request_type, + ) + + content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt) logger.info(f"想要表达:{raw_reply}||理由:{reason}") logger.info(f"最终回复: {content}\n") @@ -250,22 +308,7 @@ class DefaultReplyer: logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}") return False, None # LLM 调用失败则无法生成回复 - processed_response = process_llm_response(content, enable_splitter, enable_chinese_typo) - - # 5. 处理 LLM 响应 - if not content: - logger.warning(f"{self.log_prefix}LLM 生成了空内容。") - return False, None - if not processed_response: - logger.warning(f"{self.log_prefix}处理后的回复为空。") - return False, None - - reply_set = [] - for str in processed_response: - reply_seg = ("text", str) - reply_set.append(reply_seg) - - return True, reply_set + return True, content except Exception as e: logger.error(f"{self.log_prefix}回复生成意外失败: {e}") @@ -275,22 +318,38 @@ class DefaultReplyer: async def build_prompt_reply_context( self, reply_data=None, + available_actions: List[str] = [] ) -> str: + """ + 构建回复器上下文 + + Args: + reply_data: 回复数据 + replay_data 包含以下字段: + structured_info: 结构化信息,一般是工具调用获得的信息 + relation_info: 人物关系信息 + reply_to: 回复对象 + memory_info: 记忆信息 + extra_info/extra_info_block: 额外信息 + available_actions: 可用动作 + + Returns: + str: 构建好的上下文 + """ chat_stream = self.chat_stream + chat_id = chat_stream.stream_id person_info_manager = get_person_info_manager() bot_person_id = person_info_manager.get_person_id("system", "bot_id") is_group_chat = bool(chat_stream.group_info) - self_info_block = reply_data.get("self_info_block", "") structured_info = reply_data.get("structured_info", "") - relation_info_block = reply_data.get("relation_info_block", "") + relation_info = reply_data.get("relation_info", "") reply_to = reply_data.get("reply_to", "none") - memory_block = reply_data.get("memory_block", "") # 优先使用 extra_info_block,没有则用 extra_info - extra_info_block = reply_data.get("extra_info_block", "") or reply_data.get("extra_info", "") - + extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "") + sender = "" target = "" if ":" in reply_to or ":" in reply_to: @@ -299,9 +358,19 @@ class DefaultReplyer: if len(parts) == 2: sender = parts[0].strip() target = parts[1].strip() + + # 构建action描述 (如果启用planner) + action_descriptions = "" + # logger.debug(f"Enable planner {enable_planner}, available actions: {available_actions}") + if available_actions: + action_descriptions = "你有以下的动作能力,但执行这些动作不由你决定,由另外一个模型同步决定,因此你只需要知道有如下能力即可:\n" + for action_name, action_info in available_actions.items(): + action_description = action_info.get("description", "") + action_descriptions += f"- {action_name}: {action_description}\n" + action_descriptions += "\n" message_list_before_now = get_raw_msg_before_timestamp_with_chat( - chat_id=chat_stream.stream_id, + chat_id=chat_id, timestamp=time.time(), limit=global_config.focus_chat.observation_context_size, ) @@ -316,12 +385,36 @@ class DefaultReplyer: show_actions=True, ) # print(f"chat_talking_prompt: {chat_talking_prompt}") + + message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( + chat_id=chat_id, + timestamp=time.time(), + limit=int(global_config.focus_chat.observation_context_size * 0.5), + ) + chat_talking_prompt_half = build_readable_messages( + message_list_before_now_half, + replace_bot_name=True, + merge_messages=False, + timestamp_mode="relative", + read_mark=0.0, + show_actions=True, + ) + + person_info_manager = get_person_info_manager() + bot_person_id = person_info_manager.get_person_id("system", "bot_id") + + + is_group_chat = bool(chat_stream.group_info) style_habbits = [] grammar_habbits = [] # 使用从处理器传来的选中表达方式 - selected_expressions = reply_data.get("selected_expressions", []) if reply_data else [] + # LLM模式:调用LLM选择5-10个,然后随机选5个 + selected_expressions = await expression_selector.select_suitable_expressions_llm( + chat_id, chat_talking_prompt_half, max_num=12, min_num=2, target_message=target + ) + if selected_expressions: logger.info(f"{self.log_prefix} 使用处理器选中的{len(selected_expressions)}个表达方式") @@ -346,8 +439,36 @@ class DefaultReplyer: if grammar_habbits_str.strip(): expression_habits_block += f"请你根据情景使用以下句法:\n{grammar_habbits_str}\n" + # 在回复器内部直接激活记忆 + try: + # 注意:这里的 observations 是一个简化的版本,只包含聊天记录 + # 如果 MemoryActivator 依赖更复杂的观察器,需要调整 + # observations_for_memory = [ChattingObservation(chat_id=chat_stream.stream_id)] + # for obs in observations_for_memory: + # await obs.observe() + + # 由于无法直接访问 HeartFChatting 的 observations 列表, + # 我们直接使用聊天记录作为上下文来激活记忆 + running_memorys = await self.memory_activator.activate_memory_with_chat_history( + chat_id=chat_id, + target_message=target, + chat_history_prompt=chat_talking_prompt_half + ) + + if running_memorys: + memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" + for running_memory in running_memorys: + memory_str += f"- {running_memory['content']}\n" + memory_block = memory_str + logger.info(f"{self.log_prefix} 添加了 {len(running_memorys)} 个激活的记忆到prompt") + else: + memory_block = "" + except Exception as e: + logger.error(f"{self.log_prefix} 激活记忆时出错: {e}", exc_info=True) + memory_block = "" + if structured_info: - structured_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{structured_info}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策" + structured_info_block = f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{structured_info}\n以上是一些额外的信息。" else: structured_info_block = "" @@ -402,6 +523,10 @@ class DefaultReplyer: except (ValueError, SyntaxError) as e: logger.error(f"解析short_impression失败: {e}, 原始值: {short_impression}") short_impression = ["友好活泼", "人类"] + + moderation_prompt_block = ( + "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。" + ) # 确保short_impression是列表格式且有足够的元素 if not isinstance(short_impression, list) or len(short_impression) < 2: @@ -412,19 +537,34 @@ class DefaultReplyer: prompt_personality = personality + "," + identity indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:" - if sender: - reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。" - elif target: - reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。" - else: - reply_target_block = "现在,你想要在群里发言或者回复消息。" + if is_group_chat: + if sender: + reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。" + elif target: + reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。" + else: + reply_target_block = "现在,你想要在群里发言或者回复消息。" + else: # private chat + if sender: + reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复。" + elif target: + reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。" + else: + reply_target_block = "现在,你想要回复。" + + mood_prompt = mood_manager.get_mood_prompt() + + prompt_info = await get_prompt_info(target, threshold=0.38) + if prompt_info: + prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info) + # --- Choose template based on chat type --- if is_group_chat: template_name = "default_generator_prompt" # Group specific formatting variables (already fetched or default) chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1") - # chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") + chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") prompt = await global_prompt_manager.format_prompt( template_name, @@ -434,15 +574,18 @@ class DefaultReplyer: memory_block=memory_block, structured_info_block=structured_info_block, extra_info_block=extra_info_block, - relation_info_block=relation_info_block, - self_info_block=self_info_block, + relation_info_block=relation_info, time_block=time_block, reply_target_block=reply_target_block, + moderation_prompt=moderation_prompt_block, keywords_reaction_prompt=keywords_reaction_prompt, identity=indentify_block, target_message=target, sender_name=sender, config_expression_style=global_config.expression.expression_style, + action_descriptions=action_descriptions, + chat_target_2=chat_target_2, + mood_prompt=mood_prompt, ) else: # Private chat template_name = "default_generator_private_prompt" @@ -460,7 +603,7 @@ class DefaultReplyer: chat_info=chat_talking_prompt, memory_block=memory_block, structured_info_block=structured_info_block, - relation_info_block=relation_info_block, + relation_info_block=relation_info, extra_info_block=extra_info_block, time_block=time_block, keywords_reaction_prompt=keywords_reaction_prompt, @@ -762,4 +905,30 @@ def weighted_sample_no_replacement(items, weights, k) -> list: return selected +async def get_prompt_info(message: str, threshold: float): + related_info = "" + start_time = time.time() + + logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") + # 从LPMM知识库获取知识 + try: + found_knowledge_from_lpmm = qa_manager.get_knowledge(message) + + end_time = time.time() + if found_knowledge_from_lpmm is not None: + logger.debug( + f"从LPMM知识库获取知识,相关信息:{found_knowledge_from_lpmm[:100]}...,信息长度: {len(found_knowledge_from_lpmm)}" + ) + related_info += found_knowledge_from_lpmm + logger.debug(f"获取知识库内容耗时: {(end_time - start_time):.3f}秒") + logger.debug(f"获取知识库内容,相关信息:{related_info[:100]}...,信息长度: {len(related_info)}") + return related_info + else: + logger.debug("从LPMM知识库获取知识失败,可能是从未导入过知识,返回空知识...") + return "" + except Exception as e: + logger.error(f"获取知识库内容时发生异常: {str(e)}") + return "" + + init_prompt() diff --git a/src/chat/replyer/replyer_manager.py b/src/chat/replyer/replyer_manager.py new file mode 100644 index 000000000..0a970d26e --- /dev/null +++ b/src/chat/replyer/replyer_manager.py @@ -0,0 +1,58 @@ +from typing import Dict, Any, Optional, List +from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager +from src.chat.replyer.default_generator import DefaultReplyer +from src.common.logger import get_logger + +logger = get_logger("ReplyerManager") + +class ReplyerManager: + def __init__(self): + self._replyers: Dict[str, DefaultReplyer] = {} + + def get_replyer( + self, + chat_stream: Optional[ChatStream] = None, + chat_id: Optional[str] = None, + model_configs: Optional[List[Dict[str, Any]]] = None, + request_type: str = "replyer" + ) -> Optional[DefaultReplyer]: + """ + 获取或创建回复器实例。 + + model_configs 仅在首次为某个 chat_id/stream_id 创建实例时有效。 + 后续调用将返回已缓存的实例,忽略 model_configs 参数。 + """ + stream_id = chat_stream.stream_id if chat_stream else chat_id + if not stream_id: + logger.warning("[ReplyerManager] 缺少 stream_id,无法获取回复器。") + return None + + # 如果已有缓存实例,直接返回 + if stream_id in self._replyers: + logger.debug(f"[ReplyerManager] 为 stream_id '{stream_id}' 返回已存在的回复器实例。") + return self._replyers[stream_id] + + # 如果没有缓存,则创建新实例(首次初始化) + logger.debug(f"[ReplyerManager] 为 stream_id '{stream_id}' 创建新的回复器实例并缓存。") + + target_stream = chat_stream + if not target_stream: + chat_manager = get_chat_manager() + if chat_manager: + target_stream = chat_manager.get_stream(stream_id) + + if not target_stream: + logger.warning(f"[ReplyerManager] 未找到 stream_id='{stream_id}' 的聊天流,无法创建回复器。") + return None + + # model_configs 只在此时(初始化时)生效 + replyer = DefaultReplyer( + chat_stream=target_stream, + model_configs=model_configs, # 可以是None,此时使用默认模型 + request_type=request_type + ) + self._replyers[stream_id] = replyer + return replyer + +# 创建一个全局实例 +replyer_manager = ReplyerManager() \ No newline at end of file diff --git a/src/plugin_system/apis/generator_api.py b/src/plugin_system/apis/generator_api.py index c537d9d95..c5a416466 100644 --- a/src/plugin_system/apis/generator_api.py +++ b/src/plugin_system/apis/generator_api.py @@ -8,10 +8,12 @@ success, reply_set = await generator_api.generate_reply(chat_stream, action_data, reasoning) """ -from typing import Tuple, Any, Dict, List +from typing import Tuple, Any, Dict, List, Optional from src.common.logger import get_logger from src.chat.replyer.default_generator import DefaultReplyer -from src.chat.message_receive.chat_stream import get_chat_manager +from src.chat.message_receive.chat_stream import ChatStream +from src.chat.utils.utils import process_llm_response +from src.chat.replyer.replyer_manager import replyer_manager logger = get_logger("generator_api") @@ -21,46 +23,36 @@ logger = get_logger("generator_api") # ============================================================================= -def get_replyer(chat_stream=None, chat_id: str = None) -> DefaultReplyer: +def get_replyer( + chat_stream: Optional[ChatStream] = None, + chat_id: Optional[str] = None, + model_configs: Optional[List[Dict[str, Any]]] = None, + request_type: str = "replyer" +) -> Optional[DefaultReplyer]: """获取回复器对象 - 优先使用chat_stream,如果没有则使用chat_id直接查找 + 优先使用chat_stream,如果没有则使用chat_id直接查找。 + 使用 ReplyerManager 来管理实例,避免重复创建。 Args: chat_stream: 聊天流对象(优先) chat_id: 聊天ID(实际上就是stream_id) + model_configs: 模型配置列表 + request_type: 请求类型 Returns: - Optional[Any]: 回复器对象,如果获取失败则返回None + Optional[DefaultReplyer]: 回复器对象,如果获取失败则返回None """ try: - # 优先使用聊天流 - if chat_stream: - logger.debug("[GeneratorAPI] 使用聊天流获取回复器") - return DefaultReplyer(chat_stream=chat_stream) - - # 使用chat_id直接查找(chat_id即为stream_id) - if chat_id: - logger.debug("[GeneratorAPI] 使用chat_id获取回复器") - chat_manager = get_chat_manager() - if not chat_manager: - logger.warning("[GeneratorAPI] 无法获取聊天管理器") - return None - - # 直接使用chat_id作为stream_id查找 - target_stream = chat_manager.get_stream(chat_id) - - if target_stream is None: - logger.warning(f"[GeneratorAPI] 未找到匹配的聊天流 chat_id={chat_id}") - return None - - return DefaultReplyer(chat_stream=target_stream) - - logger.warning("[GeneratorAPI] 缺少必要参数,无法获取回复器") - return None - + logger.debug(f"[GeneratorAPI] 正在获取回复器,chat_id: {chat_id}, chat_stream: {'有' if chat_stream else '无'}") + return replyer_manager.get_replyer( + chat_stream=chat_stream, + chat_id=chat_id, + model_configs=model_configs, + request_type=request_type + ) except Exception as e: - logger.error(f"[GeneratorAPI] 获取回复器失败: {e}") + logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True) return None @@ -71,10 +63,18 @@ def get_replyer(chat_stream=None, chat_id: str = None) -> DefaultReplyer: async def generate_reply( chat_stream=None, - action_data: Dict[str, Any] = None, chat_id: str = None, + action_data: Dict[str, Any] = None, + reply_to: str = "", + relation_info: str = "", + structured_info: str = "", + extra_info: str = "", + available_actions: List[str] = None, enable_splitter: bool = True, enable_chinese_typo: bool = True, + return_prompt: bool = False, + model_configs: Optional[List[Dict[str, Any]]] = None, + request_type: str = "", ) -> Tuple[bool, List[Tuple[str, Any]]]: """生成回复 @@ -84,13 +84,13 @@ async def generate_reply( chat_id: 聊天ID(备用) enable_splitter: 是否启用消息分割器 enable_chinese_typo: 是否启用错字生成器 - + return_prompt: 是否返回提示词 Returns: Tuple[bool, List[Tuple[str, Any]]]: (是否成功, 回复集合) """ try: # 获取回复器 - replyer = get_replyer(chat_stream, chat_id) + replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") return False, [] @@ -98,16 +98,26 @@ async def generate_reply( logger.info("[GeneratorAPI] 开始生成回复") # 调用回复器生成回复 - success, reply_set = await replyer.generate_reply_with_context( - reply_data=action_data or {}, enable_splitter=enable_splitter, enable_chinese_typo=enable_chinese_typo + success, content, prompt = await replyer.generate_reply_with_context( + reply_data=action_data or {}, + reply_to=reply_to, + relation_info=relation_info, + structured_info=structured_info, + extra_info=extra_info, + available_actions=available_actions, ) + + reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo) if success: logger.info(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项") else: logger.warning("[GeneratorAPI] 回复生成失败") - return success, reply_set or [] + if return_prompt: + return success, reply_set or [], prompt + else: + return success, reply_set or [] except Exception as e: logger.error(f"[GeneratorAPI] 生成回复时出错: {e}") @@ -120,6 +130,7 @@ async def rewrite_reply( chat_id: str = None, enable_splitter: bool = True, enable_chinese_typo: bool = True, + model_configs: Optional[List[Dict[str, Any]]] = None, ) -> Tuple[bool, List[Tuple[str, Any]]]: """重写回复 @@ -135,7 +146,7 @@ async def rewrite_reply( """ try: # 获取回复器 - replyer = get_replyer(chat_stream, chat_id) + replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") return False, [] @@ -143,9 +154,11 @@ async def rewrite_reply( logger.info("[GeneratorAPI] 开始重写回复") # 调用回复器重写回复 - success, reply_set = await replyer.rewrite_reply_with_context( - reply_data=reply_data or {}, enable_splitter=enable_splitter, enable_chinese_typo=enable_chinese_typo + success, content = await replyer.rewrite_reply_with_context( + reply_data=reply_data or {} ) + + reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo) if success: logger.info(f"[GeneratorAPI] 重写回复成功,生成了 {len(reply_set)} 个回复项") @@ -157,3 +170,30 @@ async def rewrite_reply( except Exception as e: logger.error(f"[GeneratorAPI] 重写回复时出错: {e}") return False, [] + + +async def process_human_text( + content:str, + enable_splitter:bool, + enable_chinese_typo:bool +) -> List[Tuple[str, Any]]: + """将文本处理为更拟人化的文本 + + Args: + content: 文本内容 + enable_splitter: 是否启用消息分割器 + enable_chinese_typo: 是否启用错字生成器 + """ + try: + processed_response = process_llm_response(content, enable_splitter, enable_chinese_typo) + + reply_set = [] + for str in processed_response: + reply_seg = ("text", str) + reply_set.append(reply_seg) + + return reply_set + + except Exception as e: + logger.error(f"[GeneratorAPI] 处理人形文本时出错: {e}") + return [] \ No newline at end of file diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index 98c668d5c..145a0bb54 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -62,6 +62,7 @@ class ReplyAction(BaseAction): success, reply_set = await generator_api.generate_reply( action_data=self.action_data, chat_id=self.chat_id, + request_type="focus.replyer", ) # 检查从start_time以来的新消息数量 diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index c7ac59492..5605dea53 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -44,7 +44,7 @@ compress_indentity = true # 是否压缩身份,压缩后会精简身份信息 [expression] # 表达方式 -expression_style = "描述麦麦说话的表达风格,表达习惯,例如:(回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。不要有额外的符号,尽量简单简短)" +expression_style = "描述麦麦说话的表达风格,表达习惯,例如:(请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。)" enable_expression_learning = false # 是否启用表达学习,麦麦会学习不同群里人类说话风格(群之间不互通) learning_interval = 600 # 学习间隔 单位秒