diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py index 5495912d3..1673f2d9d 100644 --- a/src/chat/planner_actions/action_modifier.py +++ b/src/chat/planner_actions/action_modifier.py @@ -188,7 +188,7 @@ class ActionModifier: elif activation_type == ActionActivationType.LLM_JUDGE: llm_judge_actions[action_name] = action_info - elif activation_type == "never": + elif activation_type == ActionActivationType.NEVER: reason = "激活类型为never" deactivated_actions.append((action_name, reason)) logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: 激活类型为never") diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 082fafc62..ddc54bc62 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -87,6 +87,41 @@ def init_prompt(): "default_expressor_prompt", ) + # s4u 风格的 prompt 模板 + Prompt( + """ +{expression_habits_block} +{tool_info_block} +{knowledge_prompt} +{memory_block} +{relation_info_block} +{extra_info_block} + +{identity} + +{action_descriptions} +你现在的主要任务是和 {sender_name} 聊天。同时,也有其他用户会参与你们的聊天,你可以参考他们的回复内容,但是你主要还是关注你和{sender_name}的聊天内容。你现在的心情是:{mood_state} + +{background_dialogue_prompt} +-------------------------------- +{time_block} +这是你和{sender_name}的对话,你们正在交流中: +{core_dialogue_prompt} + +{reply_target_block} +对方最新发送的内容:{message_txt} +回复可以简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。 +{config_expression_style}。注意不要复读你说过的话 +{keywords_reaction_prompt} +请注意不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。只输出回复内容。 +{moderation_prompt} +不要浮夸,不要夸张修辞,不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容,现在{sender_name}正在等待你的回复。 +你的回复风格不要浮夸,有逻辑和条理,请你继续回复{sender_name}。 +你的发言: +""", + "s4u_style_prompt", + ) + class DefaultReplyer: def __init__( @@ -441,6 +476,65 @@ class DefaultReplyer: duration = end_time - start_time return name, result, duration + def build_s4u_chat_history_prompts(self, message_list_before_now: list, target_user_id: str) -> tuple[str, str]: + """ + 构建 s4u 风格的分离对话 prompt + + Args: + message_list_before_now: 历史消息列表 + target_user_id: 目标用户ID(当前对话对象) + + Returns: + tuple: (核心对话prompt, 背景对话prompt) + """ + core_dialogue_list = [] + background_dialogue_list = [] + bot_id = str(global_config.bot.qq_account) + + # 过滤消息:分离bot和目标用户的对话 vs 其他用户的对话 + for msg_dict in message_list_before_now: + try: + msg_user_id = str(msg_dict.get("user_id")) + if msg_user_id == bot_id or msg_user_id == target_user_id: + # bot 和目标用户的对话 + core_dialogue_list.append(msg_dict) + else: + # 其他用户的对话 + background_dialogue_list.append(msg_dict) + except Exception as e: + logger.error(f"无法处理历史消息记录: {msg_dict}, 错误: {e}") + + # 构建背景对话 prompt + background_dialogue_prompt = "" + if background_dialogue_list: + latest_25_msgs = background_dialogue_list[-int(global_config.chat.max_context_size*0.6):] + background_dialogue_prompt_str = build_readable_messages( + latest_25_msgs, + replace_bot_name=True, + merge_messages=True, + timestamp_mode="normal_no_YMD", + show_pic=False, + ) + background_dialogue_prompt = f"这是其他用户的发言:\n{background_dialogue_prompt_str}" + + # 构建核心对话 prompt + core_dialogue_prompt = "" + if core_dialogue_list: + core_dialogue_list = core_dialogue_list[-int(global_config.chat.max_context_size*2):] # 限制消息数量 + + core_dialogue_prompt_str = build_readable_messages( + core_dialogue_list, + replace_bot_name=True, + merge_messages=False, + timestamp_mode="normal_no_YMD", + read_mark=0.0, + truncate=True, + show_actions=True, + ) + core_dialogue_prompt = core_dialogue_prompt_str + + return core_dialogue_prompt, background_dialogue_prompt + async def build_prompt_reply_context( self, reply_data: Dict[str, Any], @@ -485,6 +579,14 @@ class DefaultReplyer: action_description = action_info.description action_descriptions += f"- {action_name}: {action_description}\n" action_descriptions += "\n" + + message_list_before_now_long = get_raw_msg_before_timestamp_with_chat( + chat_id=chat_id, + timestamp=time.time(), + limit=global_config.chat.max_context_size * 2, + ) + + message_list_before_now = get_raw_msg_before_timestamp_with_chat( chat_id=chat_id, timestamp=time.time(), @@ -630,28 +732,78 @@ class DefaultReplyer: "chat_target_private2", sender_name=chat_target_name ) - return await global_prompt_manager.format_prompt( - template_name, - expression_habits_block=expression_habits_block, - chat_target=chat_target_1, - chat_info=chat_talking_prompt, - memory_block=memory_block, - tool_info_block=tool_info_block, - knowledge_prompt=prompt_info, - extra_info_block=extra_info_block, - relation_info_block=relation_info, - time_block=time_block, - reply_target_block=reply_target_block, - moderation_prompt=moderation_prompt_block, - keywords_reaction_prompt=keywords_reaction_prompt, - identity=identity_block, - target_message=target, - sender_name=sender, - config_expression_style=global_config.expression.expression_style, - action_descriptions=action_descriptions, - chat_target_2=chat_target_2, - mood_state=mood_prompt, - ) + # 根据配置选择使用哪种 prompt 构建模式 + if global_config.chat.use_s4u_prompt_mode: + # 使用 s4u 对话构建模式:分离当前对话对象和其他对话 + + # 获取目标用户ID用于消息过滤 + target_user_id = "" + if sender: + # 根据sender通过person_info_manager反向查找person_id,再获取user_id + person_id = person_info_manager.get_person_id_by_person_name(sender) + if person_id: + # 通过person_info_manager获取person_id对应的user_id字段 + try: + user_id_value = await person_info_manager.get_value(person_id, "user_id") + if user_id_value: + target_user_id = str(user_id_value) + except Exception as e: + logger.warning(f"无法从person_id {person_id} 获取user_id: {e}") + target_user_id = "" + + # 构建分离的对话 prompt + core_dialogue_prompt, background_dialogue_prompt = self.build_s4u_chat_history_prompts( + message_list_before_now_long, target_user_id + ) + + # 使用 s4u 风格的模板 + template_name = "s4u_style_prompt" + + return await global_prompt_manager.format_prompt( + template_name, + expression_habits_block=expression_habits_block, + tool_info_block=tool_info_block, + knowledge_prompt=prompt_info, + memory_block=memory_block, + relation_info_block=relation_info, + extra_info_block=extra_info_block, + identity=identity_block, + action_descriptions=action_descriptions, + sender_name=sender, + mood_state=mood_prompt, + background_dialogue_prompt=background_dialogue_prompt, + time_block=time_block, + core_dialogue_prompt=core_dialogue_prompt, + reply_target_block=reply_target_block, + message_txt=target, + config_expression_style=global_config.expression.expression_style, + keywords_reaction_prompt=keywords_reaction_prompt, + moderation_prompt=moderation_prompt_block, + ) + else: + # 使用原有的模式 + return await global_prompt_manager.format_prompt( + template_name, + expression_habits_block=expression_habits_block, + chat_target=chat_target_1, + chat_info=chat_talking_prompt, + memory_block=memory_block, + tool_info_block=tool_info_block, + knowledge_prompt=prompt_info, + extra_info_block=extra_info_block, + relation_info_block=relation_info, + time_block=time_block, + reply_target_block=reply_target_block, + moderation_prompt=moderation_prompt_block, + keywords_reaction_prompt=keywords_reaction_prompt, + identity=identity_block, + target_message=target, + sender_name=sender, + config_expression_style=global_config.expression.expression_style, + action_descriptions=action_descriptions, + chat_target_2=chat_target_2, + mood_state=mood_prompt, + ) async def build_prompt_rewrite_context( self, diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 3ff3f7b62..b2cec9511 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -83,6 +83,9 @@ class ChatConfig(ConfigBase): talk_frequency: float = 1 """回复频率阈值""" + use_s4u_prompt_mode: bool = False + """是否使用 s4u 对话构建模式,该模式会分开处理当前对话对象和其他所有对话的内容进行 prompt 构建""" + # 修改:基于时段的回复频率配置,改为数组格式 time_based_talk_frequency: list[str] = field(default_factory=lambda: []) """ diff --git a/src/mood/mood_manager.py b/src/mood/mood_manager.py index a577f2dd9..a6f3baa59 100644 --- a/src/mood/mood_manager.py +++ b/src/mood/mood_manager.py @@ -9,6 +9,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive from src.llm_models.utils_model import LLMRequest from src.manager.async_task_manager import AsyncTask, async_task_manager +from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager logger = get_logger("mood") @@ -45,6 +46,12 @@ def init_prompt(): class ChatMood: def __init__(self, chat_id: str): self.chat_id: str = chat_id + + chat_manager = get_chat_manager() + self.chat_stream = chat_manager.get_stream(self.chat_id) + + self.log_prefix = f"[{self.chat_stream.group_info.group_name if self.chat_stream.group_info else self.chat_stream.user_info.user_nickname}]" + self.mood_state: str = "感觉很平静" self.regression_count: int = 0 @@ -78,14 +85,14 @@ class ChatMood: if random.random() > update_probability: return - logger.info(f"更新情绪状态,感兴趣度: {interested_rate}, 更新概率: {update_probability}") + logger.info(f"{self.log_prefix} 更新情绪状态,感兴趣度: {interested_rate}, 更新概率: {update_probability}") message_time: float = message.message_info.time # type: ignore message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive( chat_id=self.chat_id, timestamp_start=self.last_change_time, timestamp_end=message_time, - limit=15, + limit=int(global_config.chat.max_context_size/3), limit_mode="last", ) chat_talking_prompt = build_readable_messages( @@ -114,10 +121,15 @@ class ChatMood: mood_state=self.mood_state, ) - logger.info(f"prompt: {prompt}") + + response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt) - logger.info(f"response: {response}") - logger.info(f"reasoning_content: {reasoning_content}") + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix} prompt: {prompt}") + logger.info(f"{self.log_prefix} response: {response}") + logger.info(f"{self.log_prefix} reasoning_content: {reasoning_content}") + + logger.info(f"{self.log_prefix} 情绪状态更新为: {response}") self.mood_state = response @@ -158,10 +170,15 @@ class ChatMood: mood_state=self.mood_state, ) - logger.info(f"prompt: {prompt}") + response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt) - logger.info(f"response: {response}") - logger.info(f"reasoning_content: {reasoning_content}") + + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix} prompt: {prompt}") + logger.info(f"{self.log_prefix} response: {response}") + logger.info(f"{self.log_prefix} reasoning_content: {reasoning_content}") + + logger.info(f"{self.log_prefix} 情绪状态回归为: {response}") self.mood_state = response diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 7ab5195d1..923ad49f8 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "4.0.1" +version = "4.0.2" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -73,6 +73,7 @@ thinking_timeout = 20 # 麦麦一次回复最长思考规划时间,超过这 replyer_random_probability = 0.5 # 首要replyer模型被选择的概率 talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁 +use_s4u_prompt_mode = false # 是否使用 s4u 对话构建模式,该模式会更好的把握当前对话对象的对话内容,但是对群聊整理理解能力较差 time_based_talk_frequency = ["8:00,1", "12:00,1.5", "18:00,2", "01:00,0.5"] # 基于时段的回复频率配置(可选) @@ -156,8 +157,8 @@ consolidation_check_percentage = 0.05 # 检查节点比例 #不希望记忆的词,已经记忆的不会受到影响,需要手动清理 memory_ban_words = [ "表情包", "图片", "回复", "聊天记录" ] -[mood] # 暂时不再有效,请不要使用 -enable_mood = false # 是否启用情绪系统 +[mood] +enable_mood = true # 是否启用情绪系统 mood_update_interval = 1.0 # 情绪更新间隔 单位秒 mood_decay_rate = 0.95 # 情绪衰减率 mood_intensity_factor = 1.0 # 情绪强度因子