From 84e0dda891b473e37108ce452487adc7a93680ee Mon Sep 17 00:00:00 2001 From: 114514 <2514624910@qq.com> Date: Tue, 29 Apr 2025 17:21:32 +0800 Subject: [PATCH 01/16] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E7=BB=93=E6=9D=9F?= =?UTF-8?q?=E8=AF=AD=EF=BC=8C=E4=BC=BC=E4=B9=8E=E5=BE=88=E4=BC=98=E9=9B=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/PFC/action_planner.py | 121 +++++++++++++++++++++++------ src/plugins/PFC/conversation.py | 42 +++++++++- src/plugins/PFC/reply_generator.py | 23 +++++- 3 files changed, 159 insertions(+), 27 deletions(-) diff --git a/src/plugins/PFC/action_planner.py b/src/plugins/PFC/action_planner.py index 8d2c7ea90..4b48f127f 100644 --- a/src/plugins/PFC/action_planner.py +++ b/src/plugins/PFC/action_planner.py @@ -81,6 +81,24 @@ block_and_ignore: 更加极端的结束对话方式,直接结束对话并在 注意:请严格按照JSON格式输出,不要包含任何其他内容。""" +# 新增:Prompt(3): 决定是否在结束对话前发送告别语 +PROMPT_END_DECISION = """{persona_text}。刚刚你决定结束一场 QQ 私聊。 + +【你们之前的聊天记录】 +{chat_history_text} + +你觉得你们的对话已经完整结束了吗?有时候,在对话自然结束后再说点什么可能会有点奇怪,但有时也可能需要一条简短的消息来圆满结束。 +如果觉得确实有必要再发一条简短、自然、符合你人设的告别消息(比如 "好,下次再聊~" 或 "嗯,先这样吧"),就输出 "yes"。 +如果觉得当前状态下直接结束对话更好,没有必要再发消息,就输出 "no"。 + +请以 JSON 格式输出你的选择: +{{ + "say_bye": "yes/no", + "reason": "选择 yes 或 no 的原因和内心想法 (简要说明)" +}} + +注意:请严格按照 JSON 格式输出,不要包含任何其他内容。""" + # ActionPlanner 类定义,顶格 class ActionPlanner: @@ -336,9 +354,10 @@ class ActionPlanner: logger.debug(f"[私聊][{self.private_name}]发送到LLM的最终提示词:\n------\n{prompt}\n------") try: content, _ = await self.llm.generate_response_async(prompt) - logger.debug(f"[私聊][{self.private_name}]LLM原始返回内容: {content}") + logger.debug(f"[私聊][{self.private_name}]LLM (行动规划) 原始返回内容: {content}") - success, result = get_items_from_json( + # --- 初始行动规划解析 --- + success, initial_result = get_items_from_json( content, self.private_name, "action", @@ -346,30 +365,84 @@ class ActionPlanner: default_values={"action": "wait", "reason": "LLM返回格式错误或未提供原因,默认等待"}, ) - action = result.get("action", "wait") - reason = result.get("reason", "LLM未提供原因,默认等待") + initial_action = initial_result.get("action", "wait") + initial_reason = initial_result.get("reason", "LLM未提供原因,默认等待") - # 验证action类型 - # 更新 valid_actions 列表以包含 send_new_message - valid_actions = [ - "direct_reply", - "send_new_message", # 添加新动作 - "fetch_knowledge", - "wait", - "listening", - "rethink_goal", - "end_conversation", - "block_and_ignore", - ] - if action not in valid_actions: - logger.warning(f"[私聊][{self.private_name}]LLM返回了未知的行动类型: '{action}',强制改为 wait") - reason = f"(原始行动'{action}'无效,已强制改为wait) {reason}" - action = "wait" + # 检查是否需要进行结束对话决策 --- + if initial_action == "end_conversation": + logger.info(f"[私聊][{self.private_name}]初步规划结束对话,进入告别决策...") - logger.info(f"[私聊][{self.private_name}]规划的行动: {action}") - logger.info(f"[私聊][{self.private_name}]行动原因: {reason}") - return action, reason + # 使用新的 PROMPT_END_DECISION + end_decision_prompt = PROMPT_END_DECISION.format( + persona_text=persona_text, # 复用之前的 persona_text + chat_history_text=chat_history_text # 复用之前的 chat_history_text + ) + + logger.debug(f"[私聊][{self.private_name}]发送到LLM的结束决策提示词:\n------\n{end_decision_prompt}\n------") + try: + end_content, _ = await self.llm.generate_response_async(end_decision_prompt) # 再次调用LLM + logger.debug(f"[私聊][{self.private_name}]LLM (结束决策) 原始返回内容: {end_content}") + + # 解析结束决策的JSON + end_success, end_result = get_items_from_json( + end_content, + self.private_name, + "say_bye", + "reason", + default_values={"say_bye": "no", "reason": "结束决策LLM返回格式错误,默认不告别"}, + required_types={"say_bye": str, "reason": str} # 明确类型 + ) + + say_bye_decision = end_result.get("say_bye", "no").lower() # 转小写方便比较 + end_decision_reason = end_result.get("reason", "未提供原因") + + if end_success and say_bye_decision == "yes": + # 决定要告别,返回新的 'say_goodbye' 动作 + logger.info(f"[私聊][{self.private_name}]结束决策: yes, 准备生成告别语. 原因: {end_decision_reason}") + # 注意:这里的 reason 可以考虑拼接初始原因和结束决策原因,或者只用结束决策原因 + final_action = "say_goodbye" + final_reason = f"决定发送告别语。决策原因: {end_decision_reason} (原结束理由: {initial_reason})" + return final_action, final_reason + else: + # 决定不告别 (包括解析失败或明确说no) + logger.info(f"[私聊][{self.private_name}]结束决策: no, 直接结束对话. 原因: {end_decision_reason}") + # 返回原始的 'end_conversation' 动作 + final_action = "end_conversation" + final_reason = initial_reason # 保持原始的结束理由 + return final_action, final_reason + + except Exception as end_e: + logger.error(f"[私聊][{self.private_name}]调用结束决策LLM或处理结果时出错: {str(end_e)}") + # 出错时,默认执行原始的结束对话 + logger.warning(f"[私聊][{self.private_name}]结束决策出错,将按原计划执行 end_conversation") + return "end_conversation", initial_reason # 返回原始动作和原因 + + else: + action = initial_action + reason = initial_reason + + # 验证action类型 (保持不变) + valid_actions = [ + "direct_reply", + "send_new_message", + "fetch_knowledge", + "wait", + "listening", + "rethink_goal", + "end_conversation", # 仍然需要验证,因为可能从上面决策后返回 + "block_and_ignore", + "say_goodbye" # 也要验证这个新动作 + ] + if action not in valid_actions: + logger.warning(f"[私聊][{self.private_name}]LLM返回了未知的行动类型: '{action}',强制改为 wait") + reason = f"(原始行动'{action}'无效,已强制改为wait) {reason}" + action = "wait" + + logger.info(f"[私聊][{self.private_name}]规划的行动: {action}") + logger.info(f"[私聊][{self.private_name}]行动原因: {reason}") + return action, reason except Exception as e: + # 外层异常处理保持不变 logger.error(f"[私聊][{self.private_name}]规划行动时调用 LLM 或处理结果出错: {str(e)}") - return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}" + return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}" \ No newline at end of file diff --git a/src/plugins/PFC/conversation.py b/src/plugins/PFC/conversation.py index 6a8636e18..638427e6d 100644 --- a/src/plugins/PFC/conversation.py +++ b/src/plugins/PFC/conversation.py @@ -564,10 +564,48 @@ class Conversation: ) self.conversation_info.last_successful_reply_action = None # 重置状态 + elif action == "say_goodbye": + self.state = ConversationState.GENERATING # 也可以定义一个新的状态,如 ENDING + logger.info(f"[私聊][{self.private_name}]执行行动: 生成并发送告别语...") + try: + # 1. 生成告别语 (使用 'say_goodbye' action_type) + self.generated_reply = await self.reply_generator.generate( + observation_info, conversation_info, action_type="say_goodbye" + ) + logger.info(f"[私聊][{self.private_name}]生成的告别语: {self.generated_reply}") + + # 2. 直接发送告别语 (不经过检查) + if self.generated_reply: # 确保生成了内容 + await self._send_reply() # 调用发送方法 + # 发送成功后,标记动作成功 + action_successful = True + logger.info(f"[私聊][{self.private_name}]告别语已发送。") + else: + logger.warning(f"[私聊][{self.private_name}]未能生成告别语内容,无法发送。") + action_successful = False # 标记动作失败 + conversation_info.done_action[action_index].update( + {"status": "recall", "final_reason": "未能生成告别语内容"} + ) + + # 3. 无论是否发送成功,都准备结束对话 + self.should_continue = False + logger.info(f"[私聊][{self.private_name}]发送告别语流程结束,即将停止对话实例。") + + except Exception as goodbye_err: + logger.error(f"[私聊][{self.private_name}]生成或发送告别语时出错: {goodbye_err}") + logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}") + # 即使出错,也结束对话 + self.should_continue = False + action_successful = False # 标记动作失败 + conversation_info.done_action[action_index].update( + {"status": "recall", "final_reason": f"生成或发送告别语时出错: {goodbye_err}"} + ) + elif action == "end_conversation": + # 这个分支现在只会在 action_planner 最终决定不告别时被调用 self.should_continue = False - logger.info(f"[私聊][{self.private_name}]决定结束对话...") - action_successful = True # 标记动作成功 + logger.info(f"[私聊][{self.private_name}]收到最终结束指令,停止对话...") + action_successful = True # 标记这个指令本身是成功的 elif action == "block_and_ignore": logger.info(f"[私聊][{self.private_name}]不想再理你了...") diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py index 0c257a938..6be09902d 100644 --- a/src/plugins/PFC/reply_generator.py +++ b/src/plugins/PFC/reply_generator.py @@ -57,6 +57,23 @@ PROMPT_SEND_NEW_MESSAGE = """{persona_text}。现在你在参与一场QQ私聊 请直接输出回复内容,不需要任何额外格式。""" +# Prompt for say_goodbye (告别语生成) +PROMPT_FAREWELL = """{persona_text}。你在参与一场 QQ 私聊,现在对话似乎已经结束,你决定再发一条最后的消息来圆满结束。 + +最近的聊天记录: +{chat_history_text} + +请根据上述信息,结合聊天记录,构思一条**简短、自然、符合你人设**的最后的消息。 +这条消息应该: +1. 从你自己的角度发言。 +2. 符合你的性格特征和身份细节。 +3. 通俗易懂,自然流畅,通常很简短。 +4. 自然地为这场对话画上句号,避免开启新话题或显得冗长、刻意。 + +请像真人一样随意自然,**简洁是关键**。 +不要输出多余内容(包括前后缀、冒号、引号、括号、表情包、at或@等)。 + +请直接输出最终的告别消息内容,不需要任何额外格式。""" class ReplyGenerator: """回复生成器""" @@ -135,10 +152,14 @@ class ReplyGenerator: if action_type == "send_new_message": prompt_template = PROMPT_SEND_NEW_MESSAGE logger.info(f"[私聊][{self.private_name}]使用 PROMPT_SEND_NEW_MESSAGE (追问生成)") - else: # 默认使用 direct_reply 的 prompt + elif action_type == "say_goodbye": # 处理告别动作 + prompt_template = PROMPT_FAREWELL + logger.info(f"[私聊][{self.private_name}]使用 PROMPT_FAREWELL (告别语生成)") + else: # 默认使用 direct_reply 的 prompt (包括 'direct_reply' 或其他未明确处理的类型) prompt_template = PROMPT_DIRECT_REPLY logger.info(f"[私聊][{self.private_name}]使用 PROMPT_DIRECT_REPLY (首次/非连续回复生成)") + # --- 格式化最终的 Prompt --- prompt = prompt_template.format( persona_text=persona_text, goals_str=goals_str, chat_history_text=chat_history_text From 853ca47a7eabbc708cc52884e3c4586da9771177 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 29 Apr 2025 10:07:04 +0000 Subject: [PATCH 02/16] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/PFC/action_planner.py | 32 ++++++++++++++++++------------ src/plugins/PFC/conversation.py | 10 +++++----- src/plugins/PFC/reply_generator.py | 4 ++-- 3 files changed, 26 insertions(+), 20 deletions(-) diff --git a/src/plugins/PFC/action_planner.py b/src/plugins/PFC/action_planner.py index 4b48f127f..a80e96b15 100644 --- a/src/plugins/PFC/action_planner.py +++ b/src/plugins/PFC/action_planner.py @@ -374,13 +374,15 @@ class ActionPlanner: # 使用新的 PROMPT_END_DECISION end_decision_prompt = PROMPT_END_DECISION.format( - persona_text=persona_text, # 复用之前的 persona_text - chat_history_text=chat_history_text # 复用之前的 chat_history_text + persona_text=persona_text, # 复用之前的 persona_text + chat_history_text=chat_history_text, # 复用之前的 chat_history_text ) - logger.debug(f"[私聊][{self.private_name}]发送到LLM的结束决策提示词:\n------\n{end_decision_prompt}\n------") + logger.debug( + f"[私聊][{self.private_name}]发送到LLM的结束决策提示词:\n------\n{end_decision_prompt}\n------" + ) try: - end_content, _ = await self.llm.generate_response_async(end_decision_prompt) # 再次调用LLM + end_content, _ = await self.llm.generate_response_async(end_decision_prompt) # 再次调用LLM logger.debug(f"[私聊][{self.private_name}]LLM (结束决策) 原始返回内容: {end_content}") # 解析结束决策的JSON @@ -390,32 +392,36 @@ class ActionPlanner: "say_bye", "reason", default_values={"say_bye": "no", "reason": "结束决策LLM返回格式错误,默认不告别"}, - required_types={"say_bye": str, "reason": str} # 明确类型 + required_types={"say_bye": str, "reason": str}, # 明确类型 ) - say_bye_decision = end_result.get("say_bye", "no").lower() # 转小写方便比较 + say_bye_decision = end_result.get("say_bye", "no").lower() # 转小写方便比较 end_decision_reason = end_result.get("reason", "未提供原因") if end_success and say_bye_decision == "yes": # 决定要告别,返回新的 'say_goodbye' 动作 - logger.info(f"[私聊][{self.private_name}]结束决策: yes, 准备生成告别语. 原因: {end_decision_reason}") + logger.info( + f"[私聊][{self.private_name}]结束决策: yes, 准备生成告别语. 原因: {end_decision_reason}" + ) # 注意:这里的 reason 可以考虑拼接初始原因和结束决策原因,或者只用结束决策原因 final_action = "say_goodbye" final_reason = f"决定发送告别语。决策原因: {end_decision_reason} (原结束理由: {initial_reason})" return final_action, final_reason else: # 决定不告别 (包括解析失败或明确说no) - logger.info(f"[私聊][{self.private_name}]结束决策: no, 直接结束对话. 原因: {end_decision_reason}") + logger.info( + f"[私聊][{self.private_name}]结束决策: no, 直接结束对话. 原因: {end_decision_reason}" + ) # 返回原始的 'end_conversation' 动作 final_action = "end_conversation" - final_reason = initial_reason # 保持原始的结束理由 + final_reason = initial_reason # 保持原始的结束理由 return final_action, final_reason except Exception as end_e: logger.error(f"[私聊][{self.private_name}]调用结束决策LLM或处理结果时出错: {str(end_e)}") # 出错时,默认执行原始的结束对话 logger.warning(f"[私聊][{self.private_name}]结束决策出错,将按原计划执行 end_conversation") - return "end_conversation", initial_reason # 返回原始动作和原因 + return "end_conversation", initial_reason # 返回原始动作和原因 else: action = initial_action @@ -429,9 +435,9 @@ class ActionPlanner: "wait", "listening", "rethink_goal", - "end_conversation", # 仍然需要验证,因为可能从上面决策后返回 + "end_conversation", # 仍然需要验证,因为可能从上面决策后返回 "block_and_ignore", - "say_goodbye" # 也要验证这个新动作 + "say_goodbye", # 也要验证这个新动作 ] if action not in valid_actions: logger.warning(f"[私聊][{self.private_name}]LLM返回了未知的行动类型: '{action}',强制改为 wait") @@ -445,4 +451,4 @@ class ActionPlanner: except Exception as e: # 外层异常处理保持不变 logger.error(f"[私聊][{self.private_name}]规划行动时调用 LLM 或处理结果出错: {str(e)}") - return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}" \ No newline at end of file + return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}" diff --git a/src/plugins/PFC/conversation.py b/src/plugins/PFC/conversation.py index 638427e6d..c1b314266 100644 --- a/src/plugins/PFC/conversation.py +++ b/src/plugins/PFC/conversation.py @@ -565,7 +565,7 @@ class Conversation: self.conversation_info.last_successful_reply_action = None # 重置状态 elif action == "say_goodbye": - self.state = ConversationState.GENERATING # 也可以定义一个新的状态,如 ENDING + self.state = ConversationState.GENERATING # 也可以定义一个新的状态,如 ENDING logger.info(f"[私聊][{self.private_name}]执行行动: 生成并发送告别语...") try: # 1. 生成告别语 (使用 'say_goodbye' action_type) @@ -575,14 +575,14 @@ class Conversation: logger.info(f"[私聊][{self.private_name}]生成的告别语: {self.generated_reply}") # 2. 直接发送告别语 (不经过检查) - if self.generated_reply: # 确保生成了内容 - await self._send_reply() # 调用发送方法 + if self.generated_reply: # 确保生成了内容 + await self._send_reply() # 调用发送方法 # 发送成功后,标记动作成功 action_successful = True logger.info(f"[私聊][{self.private_name}]告别语已发送。") else: logger.warning(f"[私聊][{self.private_name}]未能生成告别语内容,无法发送。") - action_successful = False # 标记动作失败 + action_successful = False # 标记动作失败 conversation_info.done_action[action_index].update( {"status": "recall", "final_reason": "未能生成告别语内容"} ) @@ -596,7 +596,7 @@ class Conversation: logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}") # 即使出错,也结束对话 self.should_continue = False - action_successful = False # 标记动作失败 + action_successful = False # 标记动作失败 conversation_info.done_action[action_index].update( {"status": "recall", "final_reason": f"生成或发送告别语时出错: {goodbye_err}"} ) diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py index 6be09902d..9b497ef28 100644 --- a/src/plugins/PFC/reply_generator.py +++ b/src/plugins/PFC/reply_generator.py @@ -75,6 +75,7 @@ PROMPT_FAREWELL = """{persona_text}。你在参与一场 QQ 私聊,现在对 请直接输出最终的告别消息内容,不需要任何额外格式。""" + class ReplyGenerator: """回复生成器""" @@ -152,14 +153,13 @@ class ReplyGenerator: if action_type == "send_new_message": prompt_template = PROMPT_SEND_NEW_MESSAGE logger.info(f"[私聊][{self.private_name}]使用 PROMPT_SEND_NEW_MESSAGE (追问生成)") - elif action_type == "say_goodbye": # 处理告别动作 + elif action_type == "say_goodbye": # 处理告别动作 prompt_template = PROMPT_FAREWELL logger.info(f"[私聊][{self.private_name}]使用 PROMPT_FAREWELL (告别语生成)") else: # 默认使用 direct_reply 的 prompt (包括 'direct_reply' 或其他未明确处理的类型) prompt_template = PROMPT_DIRECT_REPLY logger.info(f"[私聊][{self.private_name}]使用 PROMPT_DIRECT_REPLY (首次/非连续回复生成)") - # --- 格式化最终的 Prompt --- prompt = prompt_template.format( persona_text=persona_text, goals_str=goals_str, chat_history_text=chat_history_text From c5811381bc9597e4a31963a258fddc9cc0299098 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 29 Apr 2025 19:47:47 +0800 Subject: [PATCH 03/16] =?UTF-8?q?fix=EF=BC=9A=E5=86=8D=E5=BA=A6=E4=BC=98?= =?UTF-8?q?=E5=8C=96=E5=85=B3=E7=B3=BBPrompt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../heartFC_chat/heartflow_prompt_builder.py | 3 +- .../person_info/relationship_manager.py | 40 ++++++++++++++----- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index 009474224..4e2f44530 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -108,7 +108,7 @@ def init_prompt(): Prompt( """ {memory_prompt} -{relation_prompt_all} +{relation_prompt} {prompt_info} {schedule_prompt} {chat_target} @@ -373,7 +373,6 @@ class PromptBuilder: prompt = await global_prompt_manager.format_prompt( "reasoning_prompt_main", - relation_prompt_all=await global_prompt_manager.get_prompt_async("relationship_prompt"), relation_prompt=relation_prompt, sender_name=sender_name, memory_prompt=memory_prompt, diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py index 640c12b8b..b66d74d5c 100644 --- a/src/plugins/person_info/relationship_manager.py +++ b/src/plugins/person_info/relationship_manager.py @@ -4,6 +4,7 @@ import math from bson.decimal128 import Decimal128 from .person_info import person_info_manager import time +import random # import re # import traceback @@ -282,17 +283,34 @@ class RelationshipManager: person_name = await person_info_manager.get_value(person_id, "person_name") relationship_value = await person_info_manager.get_value(person_id, "relationship_value") level_num = self.calculate_level_num(relationship_value) - relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] - relation_prompt2_list = [ - "忽视的回应", - "冷淡回复", - "保持理性", - "愿意回复", - "积极回复", - "友善和包容的回复", - ] - - return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n" + + if level_num == 0 or level_num == 5: + relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] + relation_prompt2_list = [ + "忽视的回应", + "冷淡回复", + "保持理性", + "愿意回复", + "积极回复", + "友善和包容的回复", + ] + return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n" + elif level_num == 2: + return "" + else: + if random.random() < 0.5: + relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] + relation_prompt2_list = [ + "忽视的回应", + "冷淡回复", + "保持理性", + "愿意回复", + "积极回复", + "友善和包容的回复", + ] + return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n" + else: + return "" @staticmethod def calculate_level_num(relationship_value) -> int: From 663ed1fc113ca3ddc3d05ba3cfe3d66d584ab66e Mon Sep 17 00:00:00 2001 From: Bakadax Date: Tue, 29 Apr 2025 20:09:33 +0800 Subject: [PATCH 04/16] =?UTF-8?q?ai=E5=93=A5=E8=AF=B4=E5=BE=97=E5=AF=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/emoji_system/emoji_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/emoji_system/emoji_manager.py b/src/plugins/emoji_system/emoji_manager.py index 211a4a835..bcabf104e 100644 --- a/src/plugins/emoji_system/emoji_manager.py +++ b/src/plugins/emoji_system/emoji_manager.py @@ -22,6 +22,7 @@ logger = get_logger("emoji") BASE_DIR = os.path.join("data") EMOJI_DIR = os.path.join(BASE_DIR, "emoji") # 表情包存储目录 EMOJI_REGISTED_DIR = os.path.join(BASE_DIR, "emoji_registed") # 已注册的表情包注册目录 +MAX_EMOJI_FOR_PROMPT = 20 # 最大允许的表情包描述数量于图片替换的 prompt 中 """ @@ -634,7 +635,7 @@ class EmojiManager: # 使用概率分布选择最多20个表情包 selected_emojis = random.choices( - emoji_objects, weights=normalized_probabilities, k=min(20, len(emoji_objects)) + emoji_objects, weights=normalized_probabilities, k=min(MAX_EMOJI_FOR_PROMPT, len(emoji_objects)) ) # 将表情包信息转换为可读的字符串 From dec2e4f44254dee57299fad8c2620657c713c5ea Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 29 Apr 2025 20:38:12 +0800 Subject: [PATCH 05/16] =?UTF-8?q?feat=EF=BC=9A=E5=85=81=E8=AE=B8=E5=85=B3?= =?UTF-8?q?=E9=97=ADFOCUS=E6=A8=A1=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/remove_chong.py | 95 +++++++++++++++++++++++ src/config/config.py | 9 +-- src/heart_flow/subheartflow_manager.py | 7 ++ src/plugins/utils/chat_message_builder.py | 21 +++-- template/bot_config_template.toml | 16 ++-- 5 files changed, 126 insertions(+), 22 deletions(-) create mode 100644 scripts/remove_chong.py diff --git a/scripts/remove_chong.py b/scripts/remove_chong.py new file mode 100644 index 000000000..42b0a4cb1 --- /dev/null +++ b/scripts/remove_chong.py @@ -0,0 +1,95 @@ +import difflib +import random + +def ji_suan_xiang_si_du(wen_ben_yi: str, wen_ben_er: str) -> float: + """ + 计算两个文本字符串的相似度。 + + 参数: + wen_ben_yi (str): 第一个文本字符串。 + wen_ben_er (str): 第二个文本字符串。 + + 返回: + float: 两个文本的相似度比率 (0 到 1 之间)。 + """ + xu_lie_pi_pei_qi = difflib.SequenceMatcher(None, wen_ben_yi, wen_ben_er) + # 获取相似度比率 + xiang_si_bi_lv = xu_lie_pi_pei_qi.ratio() + return xiang_si_bi_lv + +def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: + """ + 根据相似度计算替换的概率。 + 规则: + - 相似度 <= 0.4: 概率 = 0 + - 相似度 >= 0.9: 概率 = 1 + - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.5) + - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.5) 到 (0.9, 1.0) + """ + if xiang_si_du <= 0.4: + return 0.0 + elif xiang_si_du >= 0.9: + return 1.0 + elif 0.4 < xiang_si_du <= 0.6: + # p = 2.5 * s - 1.0 (线性方程 y - 0 = (0.5-0)/(0.6-0.4) * (x - 0.4)) + gai_lv = 2.5 * xiang_si_du - 1.0 + return max(0.0, gai_lv) # 确保概率不小于0 + elif 0.6 < xiang_si_du < 0.9: + # p = (5/3) * s - 0.5 (线性方程 y - 0.5 = (1-0.5)/(0.9-0.6) * (x - 0.6)) + gai_lv = (5 / 3) * xiang_si_du - 0.5 + return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 + +# 获取用户输入 +shu_ru_yi = "豆豆刚刚回复了我的问候 现在可以等待对方的回应 不需要再主动发言 目前情绪满足 不需要使用工具" + +shu_ru_er = "豆豆刚刚回复了我的问候 现在可以等待对方的回应 不需要再主动发言 目前情绪满足 不需要使用工具 群主突然提到复活的事情 感觉有点莫名其妙 但情绪上还是满足的 暂时不需要回复" + +# 计算相似度 +xiang_si_du = ji_suan_xiang_si_du(shu_ru_yi, shu_ru_er) + +# 计算替换概率 +ti_huan_gai_lv = ji_suan_ti_huan_gai_lv(xiang_si_du) +print(f"文本相似度: {xiang_si_du:.2f}, 执行替换操作的概率: {ti_huan_gai_lv:.2f}") + +# 根据概率决定是否执行替换 +if random.random() < ti_huan_gai_lv: + print(f"执行替换操作 (基于概率 {ti_huan_gai_lv:.2f})...") + pi_pei_qi = difflib.SequenceMatcher(None, shu_ru_yi, shu_ru_er) + qu_chong_hou_de_er = [] + last_match_end_in_b = 0 + # 获取匹配块 (i, j, n) 其中 a[i:i+n] == b[j:j+n] + # 注意:get_matching_blocks 最后会有一个 (len(a), len(b), 0) 的虚拟块 + for i, j, n in pi_pei_qi.get_matching_blocks(): + # 添加上一个匹配块结束到当前匹配块开始之间的非匹配部分 (来自文本二) + if last_match_end_in_b < j: + qu_chong_hou_de_er.append(shu_ru_er[last_match_end_in_b:j]) + # 更新下一个非匹配部分的起始位置 + last_match_end_in_b = j + n + + jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 + + if jie_guo: + # 定义词语列表 + yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] + zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] + cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] + zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao + + # 根据概率决定是否添加词语 + qian_zhui_str = "" + if random.random() < 0.3: # 30% 概率添加语气词 + qian_zhui_str += random.choice(yu_qi_ci_liebiao) + if random.random() < 0.7: # 70% 概率添加转折/承接词 + qian_zhui_str += random.choice(zhuan_jie_ci_liebiao) + + # 组合最终结果 + if qian_zhui_str: + zui_zhong_jie_guo = f"{qian_zhui_str},{jie_guo}" + print(f"移除重复部分并添加引导词后的文本二: {zui_zhong_jie_guo}") + else: + # 如果没有添加任何前缀词,直接输出去重结果 + print(f"移除重复部分后的文本二: {jie_guo}") + else: + print("移除重复部分后文本二为空。") +else: + print(f"未执行替换操作 (基于概率 {ti_huan_gai_lv:.2f})。原始相似度为: {xiang_si_du:.2f}") diff --git a/src/config/config.py b/src/config/config.py index 28de20538..d9d5ce162 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -182,10 +182,10 @@ class BotConfig: # [heartflow] # 启用启用heart_flowC(心流聊天)模式时生效, 需要填写token消耗量巨大的相关模型 # 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间), 进行长时间高质量的聊天 - enable_heart_flowC: bool = True # 是否启用heart_flowC(心流聊天, HFC)模式 reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发 probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快 default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢 + allow_focus_mode: bool = True # 是否允许子心流进入 FOCUSED 状态 # sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒 # sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒 @@ -417,11 +417,6 @@ class BotConfig: config.model_normal_probability = response_config.get( "model_normal_probability", config.model_normal_probability ) - - # 添加 enable_heart_flowC 的加载逻辑 (假设它在 [response] 部分) - if config.INNER_VERSION in SpecifierSet(">=1.4.0"): - config.enable_heart_flowC = response_config.get("enable_heart_flowC", config.enable_heart_flowC) - def heartflow(parent: dict): heartflow_config = parent["heartflow"] config.sub_heart_flow_stop_time = heartflow_config.get( @@ -445,6 +440,8 @@ class BotConfig: config.default_decay_rate_per_second = heartflow_config.get( "default_decay_rate_per_second", config.default_decay_rate_per_second ) + if config.INNER_VERSION in SpecifierSet(">=1.5.1"): + config.allow_focus_mode = heartflow_config.get("allow_focus_mode", config.allow_focus_mode) def willing(parent: dict): willing_config = parent["willing"] diff --git a/src/heart_flow/subheartflow_manager.py b/src/heart_flow/subheartflow_manager.py index db520f6df..c355867c5 100644 --- a/src/heart_flow/subheartflow_manager.py +++ b/src/heart_flow/subheartflow_manager.py @@ -264,6 +264,13 @@ class SubHeartflowManager: current_state = self.mai_state_info.get_current_state() focused_limit = current_state.get_focused_chat_max_num() + # --- 新增:检查是否允许进入 FOCUS 模式 --- # + if not global_config.allow_focus_mode: + if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 + logger.debug(f"{log_prefix} 配置不允许进入 FOCUSED 状态 (allow_focus_mode=False)") + return # 如果不允许,直接返回 + # --- 结束新增 --- + logger.debug(f"{log_prefix} 当前状态 ({current_state.value}) 开始尝试提升到FOCUSED状态") if int(time.time()) % 20 == 0: # 每20秒输出一次 diff --git a/src/plugins/utils/chat_message_builder.py b/src/plugins/utils/chat_message_builder.py index 87ffb2d1f..4e807ffaf 100644 --- a/src/plugins/utils/chat_message_builder.py +++ b/src/plugins/utils/chat_message_builder.py @@ -213,17 +213,22 @@ async def _build_readable_messages_internal( original_len = len(content) limit = -1 # 默认不截断 - if percentile < 0.6: # 60% 之前的消息 (即最旧的 60%) - limit = 170 - elif percentile < 0.8: # 60% 到 80% 之前的消息 (即中间的 20%) - limit = 250 + if percentile < 0.2: # 60% 之前的消息 (即最旧的 60%) + limit = 50 + replace_content = "......(记不清了)" + elif percentile < 0.5: # 60% 之前的消息 (即最旧的 60%) + limit = 100 + replace_content = "......(有点记不清了)" + elif percentile < 0.7: # 60% 到 80% 之前的消息 (即中间的 20%) + limit = 200 + replace_content = "......(内容太长了)" elif percentile < 1.0: # 80% 到 100% 之前的消息 (即较新的 20%) - limit = 500 - # 最新的 20% (理论上 percentile 会趋近 1,但这里不需要显式处理,因为 limit 默认为 -1) + limit = 300 + replace_content = "......(太长了)" truncated_content = content if limit > 0 and original_len > limit: - truncated_content = f"{content[:limit]}......(内容太长)" + truncated_content = f"{content[:limit]}{replace_content}" message_details.append((timestamp, name, truncated_content)) else: @@ -343,7 +348,7 @@ async def build_readable_messages( messages_before_mark, replace_bot_name, merge_messages, timestamp_mode, truncate ) formatted_after, _ = await _build_readable_messages_internal( - messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, truncate + messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, ) readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 486c150ff..8ca6aaa31 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -17,7 +17,7 @@ version = "1.5.1" # 主版本号:当你做了不兼容的 API 修改, # 次版本号:当你做了向下兼容的功能性新增, # 修订号:当你做了向下兼容的问题修正。 -# 先行版本号及版本编译信息可以加到“主版本号.次版本号.修订号”的后面,作为延伸。 +# 先行版本号及版本编译信息可以加到"主版本号.次版本号.修订号"的后面,作为延伸。 #----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- [bot] @@ -66,20 +66,20 @@ time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运 nonebot-qq="http://127.0.0.1:18002/api/message" [response] #群聊的回复策略 -enable_heart_flowC = true -# 该功能还在完善中 -# 是否启用heart_flowC(心流聊天,HFC)模式 -# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token - #一般回复参数 model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率 model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率 -[heartflow] #启用启用heart_flowC(心流聊天)模式时生效,需要填写以下参数 +[heartflow] +allow_focus_mode = true # 是否允许进入FOCUSED状态 +# 是否启用heart_flowC(心流聊天,HFC)模式 +# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token reply_trigger_threshold = 3.0 # 心流聊天触发阈值,越低越容易进入心流聊天 probability_decay_factor_per_second = 0.2 # 概率衰减因子,越大衰减越快,越高越容易退出心流聊天 default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入心流聊天 + + sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒 observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩 @@ -161,7 +161,7 @@ reaction = "有人说你是人机或者机器人,否定这一事实,攻击 [[keywords_reaction.rules]] # 就像这样复制 enable = false # 仅作示例,不会触发 keywords = ["测试关键词回复","test",""] -reaction = "回答“测试成功”" +reaction = "回答“测试成功”" # 修复错误的引号 [[keywords_reaction.rules]] # 使用正则表达式匹配句式 enable = false # 仅作示例,不会触发 From 4b7ce01c0d6801811d6168bd5d202b089a17b184 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 29 Apr 2025 20:38:49 +0800 Subject: [PATCH 06/16] Update remove_chong.py --- scripts/remove_chong.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/remove_chong.py b/scripts/remove_chong.py index 42b0a4cb1..40ba7428b 100644 --- a/scripts/remove_chong.py +++ b/scripts/remove_chong.py @@ -59,7 +59,7 @@ if random.random() < ti_huan_gai_lv: last_match_end_in_b = 0 # 获取匹配块 (i, j, n) 其中 a[i:i+n] == b[j:j+n] # 注意:get_matching_blocks 最后会有一个 (len(a), len(b), 0) 的虚拟块 - for i, j, n in pi_pei_qi.get_matching_blocks(): + for _i, j, n in pi_pei_qi.get_matching_blocks(): # 添加上一个匹配块结束到当前匹配块开始之间的非匹配部分 (来自文本二) if last_match_end_in_b < j: qu_chong_hou_de_er.append(shu_ru_er[last_match_end_in_b:j]) From 0e31276bdec55f1b1fa4c302d63c0249fc0ca8a4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 29 Apr 2025 12:39:04 +0000 Subject: [PATCH 07/16] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/remove_chong.py | 13 ++++++++----- src/config/config.py | 3 ++- src/heart_flow/subheartflow_manager.py | 4 ++-- src/plugins/person_info/relationship_manager.py | 2 +- src/plugins/utils/chat_message_builder.py | 5 ++++- 5 files changed, 17 insertions(+), 10 deletions(-) diff --git a/scripts/remove_chong.py b/scripts/remove_chong.py index 40ba7428b..95c01389c 100644 --- a/scripts/remove_chong.py +++ b/scripts/remove_chong.py @@ -1,6 +1,7 @@ import difflib import random + def ji_suan_xiang_si_du(wen_ben_yi: str, wen_ben_er: str) -> float: """ 计算两个文本字符串的相似度。 @@ -17,6 +18,7 @@ def ji_suan_xiang_si_du(wen_ben_yi: str, wen_ben_er: str) -> float: xiang_si_bi_lv = xu_lie_pi_pei_qi.ratio() return xiang_si_bi_lv + def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: """ 根据相似度计算替换的概率。 @@ -33,11 +35,12 @@ def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: elif 0.4 < xiang_si_du <= 0.6: # p = 2.5 * s - 1.0 (线性方程 y - 0 = (0.5-0)/(0.6-0.4) * (x - 0.4)) gai_lv = 2.5 * xiang_si_du - 1.0 - return max(0.0, gai_lv) # 确保概率不小于0 + return max(0.0, gai_lv) # 确保概率不小于0 elif 0.6 < xiang_si_du < 0.9: # p = (5/3) * s - 0.5 (线性方程 y - 0.5 = (1-0.5)/(0.9-0.6) * (x - 0.6)) gai_lv = (5 / 3) * xiang_si_du - 0.5 - return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 + return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 + # 获取用户输入 shu_ru_yi = "豆豆刚刚回复了我的问候 现在可以等待对方的回应 不需要再主动发言 目前情绪满足 不需要使用工具" @@ -66,7 +69,7 @@ if random.random() < ti_huan_gai_lv: # 更新下一个非匹配部分的起始位置 last_match_end_in_b = j + n - jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 + jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 if jie_guo: # 定义词语列表 @@ -77,9 +80,9 @@ if random.random() < ti_huan_gai_lv: # 根据概率决定是否添加词语 qian_zhui_str = "" - if random.random() < 0.3: # 30% 概率添加语气词 + if random.random() < 0.3: # 30% 概率添加语气词 qian_zhui_str += random.choice(yu_qi_ci_liebiao) - if random.random() < 0.7: # 70% 概率添加转折/承接词 + if random.random() < 0.7: # 70% 概率添加转折/承接词 qian_zhui_str += random.choice(zhuan_jie_ci_liebiao) # 组合最终结果 diff --git a/src/config/config.py b/src/config/config.py index d9d5ce162..f09da9a7e 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -185,7 +185,7 @@ class BotConfig: reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发 probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快 default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢 - allow_focus_mode: bool = True # 是否允许子心流进入 FOCUSED 状态 + allow_focus_mode: bool = True # 是否允许子心流进入 FOCUSED 状态 # sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒 # sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒 @@ -417,6 +417,7 @@ class BotConfig: config.model_normal_probability = response_config.get( "model_normal_probability", config.model_normal_probability ) + def heartflow(parent: dict): heartflow_config = parent["heartflow"] config.sub_heart_flow_stop_time = heartflow_config.get( diff --git a/src/heart_flow/subheartflow_manager.py b/src/heart_flow/subheartflow_manager.py index c355867c5..afa0328e6 100644 --- a/src/heart_flow/subheartflow_manager.py +++ b/src/heart_flow/subheartflow_manager.py @@ -266,9 +266,9 @@ class SubHeartflowManager: # --- 新增:检查是否允许进入 FOCUS 模式 --- # if not global_config.allow_focus_mode: - if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 + if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 logger.debug(f"{log_prefix} 配置不允许进入 FOCUSED 状态 (allow_focus_mode=False)") - return # 如果不允许,直接返回 + return # 如果不允许,直接返回 # --- 结束新增 --- logger.debug(f"{log_prefix} 当前状态 ({current_state.value}) 开始尝试提升到FOCUSED状态") diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py index b66d74d5c..47caaea01 100644 --- a/src/plugins/person_info/relationship_manager.py +++ b/src/plugins/person_info/relationship_manager.py @@ -283,7 +283,7 @@ class RelationshipManager: person_name = await person_info_manager.get_value(person_id, "person_name") relationship_value = await person_info_manager.get_value(person_id, "relationship_value") level_num = self.calculate_level_num(relationship_value) - + if level_num == 0 or level_num == 5: relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relation_prompt2_list = [ diff --git a/src/plugins/utils/chat_message_builder.py b/src/plugins/utils/chat_message_builder.py index 4e807ffaf..a49f4ffa3 100644 --- a/src/plugins/utils/chat_message_builder.py +++ b/src/plugins/utils/chat_message_builder.py @@ -348,7 +348,10 @@ async def build_readable_messages( messages_before_mark, replace_bot_name, merge_messages, timestamp_mode, truncate ) formatted_after, _ = await _build_readable_messages_internal( - messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, + messages_after_mark, + replace_bot_name, + merge_messages, + timestamp_mode, ) readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode) From 894fe8463c154a8efd6432c490df67ccc6be3a27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com> Date: Tue, 29 Apr 2025 23:35:41 +0800 Subject: [PATCH 08/16] logger add init style --- src/common/logger.py | 18 +++++++++++++++++- src/common/logger_manager.py | 2 ++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/common/logger.py b/src/common/logger.py index 5b909744b..be272eb38 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -793,6 +793,22 @@ LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG = { }, } +INIT_STYLE_CONFIG = { + "advanced": { + "console_format": ( + "{time:YYYY-MM-DD HH:mm:ss} | " + "{level: <8} | " + "初始化 | " + "{message}" + ), + "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 初始化 | {message}", + }, + "simple": { + "console_format": "{time:MM-DD HH:mm} | 初始化 | {message}", + "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 初始化 | {message}", + }, +} + # 根据SIMPLE_OUTPUT选择配置 MAIN_STYLE_CONFIG = MAIN_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MAIN_STYLE_CONFIG["advanced"] @@ -862,7 +878,7 @@ CHAT_MESSAGE_STYLE_CONFIG = ( CHAT_MESSAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_MESSAGE_STYLE_CONFIG["advanced"] ) CHAT_IMAGE_STYLE_CONFIG = CHAT_IMAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_IMAGE_STYLE_CONFIG["advanced"] - +INIT_STYLE_CONFIG = INIT_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else INIT_STYLE_CONFIG["advanced"] def is_registered_module(record: dict) -> bool: """检查是否为已注册的模块""" diff --git a/src/common/logger_manager.py b/src/common/logger_manager.py index b93f56d7e..ab1861e2b 100644 --- a/src/common/logger_manager.py +++ b/src/common/logger_manager.py @@ -40,6 +40,7 @@ from src.common.logger import ( MESSAGE_BUFFER_STYLE_CONFIG, CHAT_MESSAGE_STYLE_CONFIG, CHAT_IMAGE_STYLE_CONFIG, + INIT_STYLE_CONFIG, ) # 可根据实际需要补充更多模块配置 @@ -84,6 +85,7 @@ MODULE_LOGGER_CONFIGS = { "message_buffer": MESSAGE_BUFFER_STYLE_CONFIG, # 消息缓冲 "chat_message": CHAT_MESSAGE_STYLE_CONFIG, # 聊天消息 "chat_image": CHAT_IMAGE_STYLE_CONFIG, # 聊天图片 + "init": INIT_STYLE_CONFIG, # 初始化 # ...如有更多模块,继续添加... } From 46efe44d58ca535236886ef51e63d084db6da030 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 29 Apr 2025 15:35:57 +0000 Subject: [PATCH 09/16] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/common/logger.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/common/logger.py b/src/common/logger.py index be272eb38..43972e1f2 100644 --- a/src/common/logger.py +++ b/src/common/logger.py @@ -880,6 +880,7 @@ CHAT_MESSAGE_STYLE_CONFIG = ( CHAT_IMAGE_STYLE_CONFIG = CHAT_IMAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_IMAGE_STYLE_CONFIG["advanced"] INIT_STYLE_CONFIG = INIT_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else INIT_STYLE_CONFIG["advanced"] + def is_registered_module(record: dict) -> bool: """检查是否为已注册的模块""" return record["extra"].get("module") in _handler_registry From be8401797285a9f9d06739409f1a4173476e6990 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 29 Apr 2025 23:47:22 +0800 Subject: [PATCH 10/16] =?UTF-8?q?feat=EF=BC=9A=E5=BF=83=E6=B5=81=E6=9F=A5?= =?UTF-8?q?=E9=87=8D=E5=92=8C=E5=BF=83=E6=B5=81=E5=85=B3=E7=B3=BB=E5=90=AF?= =?UTF-8?q?=E7=94=A8=EF=BC=8C=E5=85=B3=E7=B3=BBprompt=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- changelogs/changelog.md | 6 +- scripts/remove_chong.py | 13 +- src/do_tool/tool_can_use/get_memory.py | 8 +- src/heart_flow/observation.py | 9 ++ src/heart_flow/sub_mind.py | 135 ++++++++++++++++-- .../heartFC_chat/heartflow_prompt_builder.py | 3 + src/plugins/person_info/person_info.py | 63 +++++--- .../person_info/relationship_manager.py | 13 +- src/plugins/utils/chat_message_builder.py | 30 ++++ 9 files changed, 237 insertions(+), 43 deletions(-) diff --git a/changelogs/changelog.md b/changelogs/changelog.md index 4fed6fb17..0d6608b02 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -20,6 +20,7 @@ - **流程优化**: 拆分了子心流的思考模块,使整体对话流程更加清晰。 - **状态判断改进**: 将 CHAT 状态判断交给 LLM 处理,使对话更自然。 - **回复机制**: 实现更为灵活的概率回复机制,使机器人能够自然地融入群聊环境。 +- **重复性检查**: 加入心流回复重复性检查机制,防止麦麦陷入固定回复模式。 #### 全新知识库系统 (New Knowledge Base System - LPMM) - **引入 LPMM**: 新增了 **LPMM (Large Psychology Model Maker)** 知识库系统,具有强大的信息检索能力,能显著提升麦麦获取和利用知识的效率。 @@ -32,8 +33,11 @@ #### 记忆与上下文增强 (Memory and Context Enhancement) - **聊天记录压缩**: 大幅优化聊天记录压缩系统,使机器人能够处理5倍于之前的上下文记忆量。 +- **长消息截断**: 新增了长消息自动截断与模糊化功能,随着时间推移降低超长消息的权重,避免被特定冗余信息干扰。 - **记忆提取**: 优化记忆提取功能,提高对历史对话的理解和引用能力。 +- **记忆整合**: 为记忆系统加入了合并与整合机制,优化长期记忆的结构与效率。 - **中期记忆调用**: 完善中期记忆调用机制,使机器人能够更自然地回忆和引用较早前的对话。 +- **Prompt 优化**: 进一步优化了关系系统和记忆系统相关的提示词(prompt)。 #### 私聊 PFC 功能增强 (Private Chat PFC Enhancement) - **功能修复与优化**: 修复了私聊 PFC 载入聊天记录缺失的 bug,优化了 prompt 构建,增加了审核机制,调整了重试次数,并将机器人发言存入数据库。 @@ -41,9 +45,9 @@ #### 情感与互动增强 (Emotion and Interaction Enhancement) - **全新表情包系统**: 新的表情包系统上线,表情含义更丰富,发送更快速。 +- **表情包使用优化**: 优化了表情包的选择逻辑,减少重复使用特定表情包的情况,使表达更生动。 - **提示词优化**: 优化提示词(prompt)构建,增强对话质量和情感表达。 - **积极性配置**: 优化"让麦麦更愿意说话"的相关配置,使机器人更积极参与对话。 -- **命名统一**: 实现统一命名功能,自动替换 prompt 内唯一标识符,优化 prompt 效果。 - **颜文字保护**: 保护颜文字处理机制,确保表情正确显示。 #### 工具与集成 (Tools and Integration) diff --git a/scripts/remove_chong.py b/scripts/remove_chong.py index 40ba7428b..ac8ceca79 100644 --- a/scripts/remove_chong.py +++ b/scripts/remove_chong.py @@ -23,20 +23,21 @@ def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: 规则: - 相似度 <= 0.4: 概率 = 0 - 相似度 >= 0.9: 概率 = 1 - - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.5) - - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.5) 到 (0.9, 1.0) + - 相似度 == 0.6: 概率 = 0.7 + - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) + - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) """ if xiang_si_du <= 0.4: return 0.0 elif xiang_si_du >= 0.9: return 1.0 elif 0.4 < xiang_si_du <= 0.6: - # p = 2.5 * s - 1.0 (线性方程 y - 0 = (0.5-0)/(0.6-0.4) * (x - 0.4)) - gai_lv = 2.5 * xiang_si_du - 1.0 + # p = 3.5 * s - 1.4 (线性方程 y - 0 = (0.7-0)/(0.6-0.4) * (x - 0.4)) + gai_lv = 3.5 * xiang_si_du - 1.4 return max(0.0, gai_lv) # 确保概率不小于0 elif 0.6 < xiang_si_du < 0.9: - # p = (5/3) * s - 0.5 (线性方程 y - 0.5 = (1-0.5)/(0.9-0.6) * (x - 0.6)) - gai_lv = (5 / 3) * xiang_si_du - 0.5 + # p = s + 0.1 (线性方程 y - 0.7 = (1-0.7)/(0.9-0.6) * (x - 0.6)) + gai_lv = xiang_si_du + 0.1 return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 # 获取用户输入 diff --git a/src/do_tool/tool_can_use/get_memory.py b/src/do_tool/tool_can_use/get_memory.py index 98a4e85e2..2ac550f49 100644 --- a/src/do_tool/tool_can_use/get_memory.py +++ b/src/do_tool/tool_can_use/get_memory.py @@ -46,11 +46,13 @@ class GetMemoryTool(BaseTool): if related_memory: for memory in related_memory: memory_info += memory[1] + "\n" - + if memory_info: - content = f"你记得这些事情: {memory_info}" + content = f"你记得这些事情: {memory_info}\n" + content += "以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n" + else: - content = f"你不太记得有关{topic}的记忆,你对此不太了解" + content = f"{topic}的记忆,你记不太清" return {"name": "get_memory", "content": content} except Exception as e: diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py index 6cd6ed24e..5eeb64d39 100644 --- a/src/heart_flow/observation.py +++ b/src/heart_flow/observation.py @@ -10,6 +10,7 @@ from src.plugins.utils.chat_message_builder import ( build_readable_messages, get_raw_msg_by_timestamp_with_chat, num_new_messages_since, + get_person_id_list, ) logger = get_logger("observation") @@ -45,6 +46,8 @@ class ChattingObservation(Observation): self.mid_memorys = [] self.max_mid_memory_len = global_config.compress_length_limit self.mid_memory_info = "" + + self.person_list = [] self.llm_summary = LLMRequest( model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" @@ -153,6 +156,12 @@ class ChattingObservation(Observation): truncate=True, ) + self.person_list = await get_person_id_list(self.talking_message) + + # print(f"self.11111person_list: {self.person_list}") + + + logger.trace( f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}" ) diff --git a/src/heart_flow/sub_mind.py b/src/heart_flow/sub_mind.py index 861bf598b..d167f42ea 100644 --- a/src/heart_flow/sub_mind.py +++ b/src/heart_flow/sub_mind.py @@ -12,6 +12,9 @@ from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls from src.heart_flow.chat_state_info import ChatStateInfo from src.plugins.chat.chat_stream import chat_manager from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo +import difflib +from src.plugins.person_info.relationship_manager import relationship_manager + logger = get_logger("sub_heartflow") @@ -20,6 +23,7 @@ logger = get_logger("sub_heartflow") def init_prompt(): prompt = "" prompt += "{extra_info}\n" + prompt += "{relation_prompt}\n" prompt += "你的名字是{bot_name},{prompt_personality}\n" prompt += "{last_loop_prompt}\n" prompt += "{cycle_info_block}\n" @@ -47,6 +51,39 @@ def init_prompt(): Prompt(prompt, "last_loop") +def calculate_similarity(text_a: str, text_b: str) -> float: + """ + 计算两个文本字符串的相似度。 + """ + if not text_a or not text_b: + return 0.0 + matcher = difflib.SequenceMatcher(None, text_a, text_b) + return matcher.ratio() + +def calculate_replacement_probability(similarity: float) -> float: + """ + 根据相似度计算替换的概率。 + 规则: + - 相似度 <= 0.4: 概率 = 0 + - 相似度 >= 0.9: 概率 = 1 + - 相似度 == 0.6: 概率 = 0.7 + - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) + - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) + """ + if similarity <= 0.4: + return 0.0 + elif similarity >= 0.9: + return 1.0 + elif 0.4 < similarity <= 0.6: + # p = 3.5 * s - 1.4 + probability = 3.5 * similarity - 1.4 + return max(0.0, probability) + elif 0.6 < similarity < 0.9: + # p = s + 0.1 + probability = similarity + 0.1 + return min(1.0, max(0.0, probability)) + + class SubMind: def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: Observation): self.subheartflow_id = subheartflow_id @@ -80,7 +117,7 @@ class SubMind: # ---------- 1. 准备基础数据 ---------- # 获取现有想法和情绪状态 - current_thinking_info = self.current_mind + previous_mind = self.current_mind if self.current_mind else "" mood_info = self.chat_state.mood # 获取观察对象 @@ -92,6 +129,7 @@ class SubMind: # 获取观察内容 chat_observe_info = observation.get_observe_info() + person_list = observation.person_list # ---------- 2. 准备工具和个性化数据 ---------- # 初始化工具 @@ -100,6 +138,14 @@ class SubMind: # 获取个性化信息 individuality = Individuality.get_instance() + + + relation_prompt = "" + print(f"person_list: {person_list}") + for person in person_list: + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) + + print(f"relat22222ion_prompt: {relation_prompt}") # 构建个性部分 prompt_personality = individuality.get_prompt(x_person=2, level=2) @@ -136,9 +182,9 @@ class SubMind: last_reasoning = "" is_replan = False if_replan_prompt = "" - if current_thinking_info: + if previous_mind: last_loop_prompt = (await global_prompt_manager.get_prompt_async("last_loop")).format( - current_thinking_info=current_thinking_info, if_replan_prompt=if_replan_prompt + current_thinking_info=previous_mind, if_replan_prompt=if_replan_prompt ) else: last_loop_prompt = "" @@ -196,6 +242,7 @@ class SubMind: prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format( extra_info="", # 可以在这里添加额外信息 prompt_personality=prompt_personality, + relation_prompt=relation_prompt, bot_name=individuality.name, time_now=time_now, chat_observe_info=chat_observe_info, @@ -205,8 +252,6 @@ class SubMind: cycle_info_block=cycle_info_block, ) - # logger.debug(f"[{self.subheartflow_id}] 心流思考提示词构建完成") - # ---------- 5. 执行LLM请求并处理响应 ---------- content = "" # 初始化内容变量 _reasoning_content = "" # 初始化推理内容变量 @@ -240,7 +285,7 @@ class SubMind: elif not success: logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}") else: - logger.info(f"{self.log_prefix} 心流未使用工具") # 修改日志信息,明确是未使用工具而不是未处理 + logger.info(f"{self.log_prefix} 心流未使用工具") except Exception as e: # 处理总体异常 @@ -248,15 +293,87 @@ class SubMind: logger.error(traceback.format_exc()) content = "思考过程中出现错误" - # 记录最终思考结果 - logger.debug(f"{self.log_prefix} \nPrompt:\n{prompt}\n\n心流思考结果:\n{content}\n") + # 记录初步思考结果 + logger.debug(f"{self.log_prefix} 初步心流思考结果: {content}\nprompt: {prompt}\n") # 处理空响应情况 if not content: content = "(不知道该想些什么...)" logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。") - # ---------- 6. 更新思考状态并返回结果 ---------- + # ---------- 6. 应用概率性去重和修饰 ---------- + new_content = content # 保存 LLM 直接输出的结果 + try: + similarity = calculate_similarity(previous_mind, new_content) + replacement_prob = calculate_replacement_probability(similarity) + logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}") + + # 定义词语列表 (移到判断之前) + yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] + zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] + cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] + zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao + + if random.random() < replacement_prob: + # 相似度非常高时,尝试去重或特殊处理 + if similarity == 1.0: + logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...") + # 随机截取大约一半内容 + if len(new_content) > 1: # 避免内容过短无法截取 + split_point = max(1, len(new_content) // 2 + random.randint(-len(new_content)//4, len(new_content)//4)) + truncated_content = new_content[:split_point] + else: + truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 + + # 添加语气词和转折/承接词 + yu_qi_ci = random.choice(yu_qi_ci_liebiao) + zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao) + content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}" + logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}") + + else: + # 相似度较高但非100%,执行标准去重逻辑 + logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...") + matcher = difflib.SequenceMatcher(None, previous_mind, new_content) + deduplicated_parts = [] + last_match_end_in_b = 0 + for _i, j, n in matcher.get_matching_blocks(): + if last_match_end_in_b < j: + deduplicated_parts.append(new_content[last_match_end_in_b:j]) + last_match_end_in_b = j + n + + deduplicated_content = "".join(deduplicated_parts).strip() + + if deduplicated_content: + # 根据概率决定是否添加词语 + prefix_str = "" + if random.random() < 0.3: # 30% 概率添加语气词 + prefix_str += random.choice(yu_qi_ci_liebiao) + if random.random() < 0.7: # 70% 概率添加转折/承接词 + prefix_str += random.choice(zhuan_jie_ci_liebiao) + + # 组合最终结果 + if prefix_str: + content = f"{prefix_str},{deduplicated_content}" # 更新 content + logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}") + else: + content = deduplicated_content # 更新 content + logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}") + else: + logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}") + content = new_content # 保留原始 content + else: + logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})") + # content 保持 new_content 不变 + + except Exception as e: + logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}") + logger.error(traceback.format_exc()) + # 出错时保留原始 content + content = new_content + + # ---------- 7. 更新思考状态并返回结果 ---------- + logger.info(f"{self.log_prefix} 最终心流思考结果: {content}") # 更新当前思考内容 self.update_current_mind(content) diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index 4e2f44530..db4979e35 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -260,6 +260,9 @@ class PromptBuilder: relation_prompt = "" for person in who_chat_in_group: relation_prompt += await relationship_manager.build_relationship_info(person) + print(f"relation_prompt: {relation_prompt}") + + print(f"relat11111111ion_prompt: {relation_prompt}") # 心情 mood_manager = MoodManager.get_instance() diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index 2c9fb72bb..cde4ca932 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -137,34 +137,55 @@ class PersonInfoManager: @staticmethod def _extract_json_from_text(text: str) -> dict: """从文本中提取JSON数据的高容错方法""" + parsed_json = None try: # 尝试直接解析 - return json.loads(text) + parsed_json = json.loads(text) + # 如果解析结果是列表,尝试取第一个元素 + if isinstance(parsed_json, list): + if parsed_json: # 检查列表是否为空 + parsed_json = parsed_json[0] + else: # 如果列表为空,重置为 None,走后续逻辑 + parsed_json = None + # 确保解析结果是字典 + if isinstance(parsed_json, dict): + return parsed_json + except json.JSONDecodeError: - try: - # 尝试找到JSON格式的部分 - json_pattern = r"\{[^{}]*\}" - matches = re.findall(json_pattern, text) - if matches: - return json.loads(matches[0]) + # 解析失败,继续尝试其他方法 + pass + except Exception as e: + logger.warning(f"尝试直接解析JSON时发生意外错误: {e}") + pass # 继续尝试其他方法 - # 如果上面都失败了,尝试提取键值对 - nickname_pattern = r'"nickname"[:\s]+"([^"]+)"' - reason_pattern = r'"reason"[:\s]+"([^"]+)"' + # 如果直接解析失败或结果不是字典 + try: + # 尝试找到JSON对象格式的部分 + json_pattern = r"\{[^{}]*\}" + matches = re.findall(json_pattern, text) + if matches: + parsed_obj = json.loads(matches[0]) + if isinstance(parsed_obj, dict): # 确保是字典 + return parsed_obj - nickname_match = re.search(nickname_pattern, text) - reason_match = re.search(reason_pattern, text) + # 如果上面都失败了,尝试提取键值对 + nickname_pattern = r'"nickname"[:\s]+"([^"]+)"' + reason_pattern = r'"reason"[:\s]+"([^"]+)"' - if nickname_match: - return { - "nickname": nickname_match.group(1), - "reason": reason_match.group(1) if reason_match else "未提供理由", - } - except Exception as e: - logger.error(f"JSON提取失败: {str(e)}") + nickname_match = re.search(nickname_pattern, text) + reason_match = re.search(reason_pattern, text) - # 如果所有方法都失败了,返回空结果 - return {"nickname": "", "reason": ""} + if nickname_match: + return { + "nickname": nickname_match.group(1), + "reason": reason_match.group(1) if reason_match else "未提供理由", + } + except Exception as e: + logger.error(f"后备JSON提取失败: {str(e)}") + + # 如果所有方法都失败了,返回默认字典 + logger.warning(f"无法从文本中提取有效的JSON字典: {text}") + return {"nickname": "", "reason": ""} async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str): """给某个用户取名""" diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py index b66d74d5c..34c284330 100644 --- a/src/plugins/person_info/relationship_manager.py +++ b/src/plugins/person_info/relationship_manager.py @@ -278,12 +278,19 @@ class RelationshipManager: return chat_stream.user_info.user_nickname, value, relationship_level[level_num] - async def build_relationship_info(self, person) -> str: - person_id = person_info_manager.get_person_id(person[0], person[1]) + async def build_relationship_info(self, person, is_id: bool = False) -> str: + if is_id: + person_id = person + else: + print(f"person: {person}") + person_id = person_info_manager.get_person_id(person[0], person[1]) person_name = await person_info_manager.get_value(person_id, "person_name") + print(f"person_name: {person_name}") relationship_value = await person_info_manager.get_value(person_id, "relationship_value") level_num = self.calculate_level_num(relationship_value) + print(f"person_name: {person_name}, relationship_value: {relationship_value}, level_num: {level_num}") + if level_num == 0 or level_num == 5: relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relation_prompt2_list = [ @@ -298,7 +305,7 @@ class RelationshipManager: elif level_num == 2: return "" else: - if random.random() < 0.5: + if random.random() < 0.6: relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relation_prompt2_list = [ "忽视的回应", diff --git a/src/plugins/utils/chat_message_builder.py b/src/plugins/utils/chat_message_builder.py index 4e807ffaf..eb2994f47 100644 --- a/src/plugins/utils/chat_message_builder.py +++ b/src/plugins/utils/chat_message_builder.py @@ -364,3 +364,33 @@ async def build_readable_messages( else: # 理论上不应该发生,但作为保险 return read_mark_line.strip() # 如果前后都无消息,只返回标记行 + + +async def get_person_id_list(messages: List[Dict[str, Any]]) -> List[str]: + """ + 从消息列表中提取不重复的 person_id 列表 (忽略机器人自身)。 + + Args: + messages: 消息字典列表。 + + Returns: + 一个包含唯一 person_id 的列表。 + """ + person_ids_set = set() # 使用集合来自动去重 + + for msg in messages: + user_info = msg.get("user_info", {}) + platform = user_info.get("platform") + user_id = user_info.get("user_id") + + # 检查必要信息是否存在 且 不是机器人自己 + if not all([platform, user_id]) or user_id == global_config.BOT_QQ: + continue + + person_id = person_info_manager.get_person_id(platform, user_id) + + # 只有当获取到有效 person_id 时才添加 + if person_id: + person_ids_set.add(person_id) + + return list(person_ids_set) # 将集合转换为列表返回 From 8dcfe04642be5d733a3a349c1725162e83874ac9 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 29 Apr 2025 23:47:37 +0800 Subject: [PATCH 11/16] fixruad --- scripts/remove_chong.py | 13 ++++++---- src/config/config.py | 3 ++- src/do_tool/tool_can_use/get_memory.py | 4 +-- src/heart_flow/observation.py | 8 +++--- src/heart_flow/sub_mind.py | 25 ++++++++++--------- src/heart_flow/subheartflow_manager.py | 4 +-- .../heartFC_chat/heartflow_prompt_builder.py | 2 +- src/plugins/person_info/person_info.py | 8 +++--- .../person_info/relationship_manager.py | 4 +-- src/plugins/utils/chat_message_builder.py | 5 +++- 10 files changed, 41 insertions(+), 35 deletions(-) diff --git a/scripts/remove_chong.py b/scripts/remove_chong.py index ac8ceca79..024acb3fd 100644 --- a/scripts/remove_chong.py +++ b/scripts/remove_chong.py @@ -1,6 +1,7 @@ import difflib import random + def ji_suan_xiang_si_du(wen_ben_yi: str, wen_ben_er: str) -> float: """ 计算两个文本字符串的相似度。 @@ -17,6 +18,7 @@ def ji_suan_xiang_si_du(wen_ben_yi: str, wen_ben_er: str) -> float: xiang_si_bi_lv = xu_lie_pi_pei_qi.ratio() return xiang_si_bi_lv + def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: """ 根据相似度计算替换的概率。 @@ -34,11 +36,12 @@ def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: elif 0.4 < xiang_si_du <= 0.6: # p = 3.5 * s - 1.4 (线性方程 y - 0 = (0.7-0)/(0.6-0.4) * (x - 0.4)) gai_lv = 3.5 * xiang_si_du - 1.4 - return max(0.0, gai_lv) # 确保概率不小于0 + return max(0.0, gai_lv) # 确保概率不小于0 elif 0.6 < xiang_si_du < 0.9: # p = s + 0.1 (线性方程 y - 0.7 = (1-0.7)/(0.9-0.6) * (x - 0.6)) gai_lv = xiang_si_du + 0.1 - return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 + return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 + # 获取用户输入 shu_ru_yi = "豆豆刚刚回复了我的问候 现在可以等待对方的回应 不需要再主动发言 目前情绪满足 不需要使用工具" @@ -67,7 +70,7 @@ if random.random() < ti_huan_gai_lv: # 更新下一个非匹配部分的起始位置 last_match_end_in_b = j + n - jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 + jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 if jie_guo: # 定义词语列表 @@ -78,9 +81,9 @@ if random.random() < ti_huan_gai_lv: # 根据概率决定是否添加词语 qian_zhui_str = "" - if random.random() < 0.3: # 30% 概率添加语气词 + if random.random() < 0.3: # 30% 概率添加语气词 qian_zhui_str += random.choice(yu_qi_ci_liebiao) - if random.random() < 0.7: # 70% 概率添加转折/承接词 + if random.random() < 0.7: # 70% 概率添加转折/承接词 qian_zhui_str += random.choice(zhuan_jie_ci_liebiao) # 组合最终结果 diff --git a/src/config/config.py b/src/config/config.py index d9d5ce162..f09da9a7e 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -185,7 +185,7 @@ class BotConfig: reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发 probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快 default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢 - allow_focus_mode: bool = True # 是否允许子心流进入 FOCUSED 状态 + allow_focus_mode: bool = True # 是否允许子心流进入 FOCUSED 状态 # sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒 # sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒 @@ -417,6 +417,7 @@ class BotConfig: config.model_normal_probability = response_config.get( "model_normal_probability", config.model_normal_probability ) + def heartflow(parent: dict): heartflow_config = parent["heartflow"] config.sub_heart_flow_stop_time = heartflow_config.get( diff --git a/src/do_tool/tool_can_use/get_memory.py b/src/do_tool/tool_can_use/get_memory.py index 2ac550f49..b38423ed0 100644 --- a/src/do_tool/tool_can_use/get_memory.py +++ b/src/do_tool/tool_can_use/get_memory.py @@ -46,11 +46,11 @@ class GetMemoryTool(BaseTool): if related_memory: for memory in related_memory: memory_info += memory[1] + "\n" - + if memory_info: content = f"你记得这些事情: {memory_info}\n" content += "以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n" - + else: content = f"{topic}的记忆,你记不太清" diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py index 5eeb64d39..5793e772f 100644 --- a/src/heart_flow/observation.py +++ b/src/heart_flow/observation.py @@ -46,7 +46,7 @@ class ChattingObservation(Observation): self.mid_memorys = [] self.max_mid_memory_len = global_config.compress_length_limit self.mid_memory_info = "" - + self.person_list = [] self.llm_summary = LLMRequest( @@ -157,11 +157,9 @@ class ChattingObservation(Observation): ) self.person_list = await get_person_id_list(self.talking_message) - - # print(f"self.11111person_list: {self.person_list}") - - + # print(f"self.11111person_list: {self.person_list}") + logger.trace( f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}" ) diff --git a/src/heart_flow/sub_mind.py b/src/heart_flow/sub_mind.py index d167f42ea..e59ee855c 100644 --- a/src/heart_flow/sub_mind.py +++ b/src/heart_flow/sub_mind.py @@ -16,7 +16,6 @@ import difflib from src.plugins.person_info.relationship_manager import relationship_manager - logger = get_logger("sub_heartflow") @@ -60,6 +59,7 @@ def calculate_similarity(text_a: str, text_b: str) -> float: matcher = difflib.SequenceMatcher(None, text_a, text_b) return matcher.ratio() + def calculate_replacement_probability(similarity: float) -> float: """ 根据相似度计算替换的概率。 @@ -138,13 +138,12 @@ class SubMind: # 获取个性化信息 individuality = Individuality.get_instance() - relation_prompt = "" print(f"person_list: {person_list}") for person in person_list: relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) - + print(f"relat22222ion_prompt: {relation_prompt}") # 构建个性部分 @@ -302,7 +301,7 @@ class SubMind: logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。") # ---------- 6. 应用概率性去重和修饰 ---------- - new_content = content # 保存 LLM 直接输出的结果 + new_content = content # 保存 LLM 直接输出的结果 try: similarity = calculate_similarity(previous_mind, new_content) replacement_prob = calculate_replacement_probability(similarity) @@ -319,11 +318,13 @@ class SubMind: if similarity == 1.0: logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...") # 随机截取大约一半内容 - if len(new_content) > 1: # 避免内容过短无法截取 - split_point = max(1, len(new_content) // 2 + random.randint(-len(new_content)//4, len(new_content)//4)) + if len(new_content) > 1: # 避免内容过短无法截取 + split_point = max( + 1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4) + ) truncated_content = new_content[:split_point] else: - truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 + truncated_content = new_content # 如果只有一个字符或者为空,就不截取了 # 添加语气词和转折/承接词 yu_qi_ci = random.choice(yu_qi_ci_liebiao) @@ -347,21 +348,21 @@ class SubMind: if deduplicated_content: # 根据概率决定是否添加词语 prefix_str = "" - if random.random() < 0.3: # 30% 概率添加语气词 + if random.random() < 0.3: # 30% 概率添加语气词 prefix_str += random.choice(yu_qi_ci_liebiao) - if random.random() < 0.7: # 70% 概率添加转折/承接词 + if random.random() < 0.7: # 70% 概率添加转折/承接词 prefix_str += random.choice(zhuan_jie_ci_liebiao) # 组合最终结果 if prefix_str: - content = f"{prefix_str},{deduplicated_content}" # 更新 content + content = f"{prefix_str},{deduplicated_content}" # 更新 content logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}") else: - content = deduplicated_content # 更新 content + content = deduplicated_content # 更新 content logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}") else: logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}") - content = new_content # 保留原始 content + content = new_content # 保留原始 content else: logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})") # content 保持 new_content 不变 diff --git a/src/heart_flow/subheartflow_manager.py b/src/heart_flow/subheartflow_manager.py index c355867c5..afa0328e6 100644 --- a/src/heart_flow/subheartflow_manager.py +++ b/src/heart_flow/subheartflow_manager.py @@ -266,9 +266,9 @@ class SubHeartflowManager: # --- 新增:检查是否允许进入 FOCUS 模式 --- # if not global_config.allow_focus_mode: - if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 + if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏 logger.debug(f"{log_prefix} 配置不允许进入 FOCUSED 状态 (allow_focus_mode=False)") - return # 如果不允许,直接返回 + return # 如果不允许,直接返回 # --- 结束新增 --- logger.debug(f"{log_prefix} 当前状态 ({current_state.value}) 开始尝试提升到FOCUSED状态") diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index db4979e35..32c78a106 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -261,7 +261,7 @@ class PromptBuilder: for person in who_chat_in_group: relation_prompt += await relationship_manager.build_relationship_info(person) print(f"relation_prompt: {relation_prompt}") - + print(f"relat11111111ion_prompt: {relation_prompt}") # 心情 diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py index cde4ca932..8bafe5ebf 100644 --- a/src/plugins/person_info/person_info.py +++ b/src/plugins/person_info/person_info.py @@ -143,9 +143,9 @@ class PersonInfoManager: parsed_json = json.loads(text) # 如果解析结果是列表,尝试取第一个元素 if isinstance(parsed_json, list): - if parsed_json: # 检查列表是否为空 + if parsed_json: # 检查列表是否为空 parsed_json = parsed_json[0] - else: # 如果列表为空,重置为 None,走后续逻辑 + else: # 如果列表为空,重置为 None,走后续逻辑 parsed_json = None # 确保解析结果是字典 if isinstance(parsed_json, dict): @@ -156,7 +156,7 @@ class PersonInfoManager: pass except Exception as e: logger.warning(f"尝试直接解析JSON时发生意外错误: {e}") - pass # 继续尝试其他方法 + pass # 继续尝试其他方法 # 如果直接解析失败或结果不是字典 try: @@ -165,7 +165,7 @@ class PersonInfoManager: matches = re.findall(json_pattern, text) if matches: parsed_obj = json.loads(matches[0]) - if isinstance(parsed_obj, dict): # 确保是字典 + if isinstance(parsed_obj, dict): # 确保是字典 return parsed_obj # 如果上面都失败了,尝试提取键值对 diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py index 34c284330..3062a7369 100644 --- a/src/plugins/person_info/relationship_manager.py +++ b/src/plugins/person_info/relationship_manager.py @@ -288,9 +288,9 @@ class RelationshipManager: print(f"person_name: {person_name}") relationship_value = await person_info_manager.get_value(person_id, "relationship_value") level_num = self.calculate_level_num(relationship_value) - + print(f"person_name: {person_name}, relationship_value: {relationship_value}, level_num: {level_num}") - + if level_num == 0 or level_num == 5: relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relation_prompt2_list = [ diff --git a/src/plugins/utils/chat_message_builder.py b/src/plugins/utils/chat_message_builder.py index eb2994f47..a7eef4431 100644 --- a/src/plugins/utils/chat_message_builder.py +++ b/src/plugins/utils/chat_message_builder.py @@ -348,7 +348,10 @@ async def build_readable_messages( messages_before_mark, replace_bot_name, merge_messages, timestamp_mode, truncate ) formatted_after, _ = await _build_readable_messages_internal( - messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, + messages_after_mark, + replace_bot_name, + merge_messages, + timestamp_mode, ) readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode) From 298bcbcee4212ee9c2c5feaeec4b8f2cfc91a7c9 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 29 Apr 2025 23:49:13 +0800 Subject: [PATCH 12/16] Delete remove_chong.py --- scripts/remove_chong.py | 104 ---------------------------------------- 1 file changed, 104 deletions(-) delete mode 100644 scripts/remove_chong.py diff --git a/scripts/remove_chong.py b/scripts/remove_chong.py deleted file mode 100644 index 1398c865a..000000000 --- a/scripts/remove_chong.py +++ /dev/null @@ -1,104 +0,0 @@ -import difflib -import random - - - -def ji_suan_xiang_si_du(wen_ben_yi: str, wen_ben_er: str) -> float: - """ - 计算两个文本字符串的相似度。 - - 参数: - wen_ben_yi (str): 第一个文本字符串。 - wen_ben_er (str): 第二个文本字符串。 - - 返回: - float: 两个文本的相似度比率 (0 到 1 之间)。 - """ - xu_lie_pi_pei_qi = difflib.SequenceMatcher(None, wen_ben_yi, wen_ben_er) - # 获取相似度比率 - xiang_si_bi_lv = xu_lie_pi_pei_qi.ratio() - return xiang_si_bi_lv - - - -def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float: - """ - 根据相似度计算替换的概率。 - 规则: - - 相似度 <= 0.4: 概率 = 0 - - 相似度 >= 0.9: 概率 = 1 - - 相似度 == 0.6: 概率 = 0.7 - - 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7) - - 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0) - """ - if xiang_si_du <= 0.4: - return 0.0 - elif xiang_si_du >= 0.9: - return 1.0 - elif 0.4 < xiang_si_du <= 0.6: - # p = 3.5 * s - 1.4 (线性方程 y - 0 = (0.7-0)/(0.6-0.4) * (x - 0.4)) - gai_lv = 3.5 * xiang_si_du - 1.4 - return max(0.0, gai_lv) # 确保概率不小于0 - elif 0.6 < xiang_si_du < 0.9: - # p = s + 0.1 (线性方程 y - 0.7 = (1-0.7)/(0.9-0.6) * (x - 0.6)) - gai_lv = xiang_si_du + 0.1 - return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间 - - -# 获取用户输入 -shu_ru_yi = "豆豆刚刚回复了我的问候 现在可以等待对方的回应 不需要再主动发言 目前情绪满足 不需要使用工具" - -shu_ru_er = "豆豆刚刚回复了我的问候 现在可以等待对方的回应 不需要再主动发言 目前情绪满足 不需要使用工具 群主突然提到复活的事情 感觉有点莫名其妙 但情绪上还是满足的 暂时不需要回复" - -# 计算相似度 -xiang_si_du = ji_suan_xiang_si_du(shu_ru_yi, shu_ru_er) - -# 计算替换概率 -ti_huan_gai_lv = ji_suan_ti_huan_gai_lv(xiang_si_du) -print(f"文本相似度: {xiang_si_du:.2f}, 执行替换操作的概率: {ti_huan_gai_lv:.2f}") - -# 根据概率决定是否执行替换 -if random.random() < ti_huan_gai_lv: - print(f"执行替换操作 (基于概率 {ti_huan_gai_lv:.2f})...") - pi_pei_qi = difflib.SequenceMatcher(None, shu_ru_yi, shu_ru_er) - qu_chong_hou_de_er = [] - last_match_end_in_b = 0 - # 获取匹配块 (i, j, n) 其中 a[i:i+n] == b[j:j+n] - # 注意:get_matching_blocks 最后会有一个 (len(a), len(b), 0) 的虚拟块 - for _i, j, n in pi_pei_qi.get_matching_blocks(): - # 添加上一个匹配块结束到当前匹配块开始之间的非匹配部分 (来自文本二) - if last_match_end_in_b < j: - qu_chong_hou_de_er.append(shu_ru_er[last_match_end_in_b:j]) - # 更新下一个非匹配部分的起始位置 - last_match_end_in_b = j + n - - jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 - jie_guo = "".join(qu_chong_hou_de_er).strip() # 去除首尾空白 - - if jie_guo: - # 定义词语列表 - yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"] - zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"] - cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"] - zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao - - # 根据概率决定是否添加词语 - qian_zhui_str = "" - if random.random() < 0.3: # 30% 概率添加语气词 - if random.random() < 0.3: # 30% 概率添加语气词 - qian_zhui_str += random.choice(yu_qi_ci_liebiao) - if random.random() < 0.7: # 70% 概率添加转折/承接词 - if random.random() < 0.7: # 70% 概率添加转折/承接词 - qian_zhui_str += random.choice(zhuan_jie_ci_liebiao) - - # 组合最终结果 - if qian_zhui_str: - zui_zhong_jie_guo = f"{qian_zhui_str},{jie_guo}" - print(f"移除重复部分并添加引导词后的文本二: {zui_zhong_jie_guo}") - else: - # 如果没有添加任何前缀词,直接输出去重结果 - print(f"移除重复部分后的文本二: {jie_guo}") - else: - print("移除重复部分后文本二为空。") -else: - print(f"未执行替换操作 (基于概率 {ti_huan_gai_lv:.2f})。原始相似度为: {xiang_si_du:.2f}") From 4b1c6784566f6d1f892f5f14b6a6a40910b2ac1e Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Wed, 30 Apr 2025 01:56:48 +0800 Subject: [PATCH 13/16] =?UTF-8?q?feat=EF=BC=9A=E4=B8=BA=E5=A4=8D=E8=AF=BB?= =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E7=A1=AC=E9=99=90=E5=88=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/mai_state_manager.py | 99 ++++++++---- src/plugins/heartFC_chat/heartFC_chat.py | 150 ++++++++++++++++-- .../heartFC_chat/heartflow_prompt_builder.py | 1 + 3 files changed, 200 insertions(+), 50 deletions(-) diff --git a/src/heart_flow/mai_state_manager.py b/src/heart_flow/mai_state_manager.py index 0888ae1fd..4d92d7fd8 100644 --- a/src/heart_flow/mai_state_manager.py +++ b/src/heart_flow/mai_state_manager.py @@ -8,9 +8,22 @@ from src.plugins.moods.moods import MoodManager logger = get_logger("mai_state") -# enable_unlimited_hfc_chat = True -enable_unlimited_hfc_chat = False +# -- 状态相关的可配置参数 (可以从 glocal_config 加载) -- +enable_unlimited_hfc_chat = True # 调试用:无限专注聊天 +# enable_unlimited_hfc_chat = False +prevent_offline_state = True # 调试用:防止进入离线状态 +# 不同状态下普通聊天的最大消息数 +MAX_NORMAL_CHAT_NUM_PEEKING = 30 +MAX_NORMAL_CHAT_NUM_NORMAL = 40 +MAX_NORMAL_CHAT_NUM_FOCUSED = 30 + +# 不同状态下专注聊天的最大消息数 +MAX_FOCUSED_CHAT_NUM_PEEKING = 20 +MAX_FOCUSED_CHAT_NUM_NORMAL = 30 +MAX_FOCUSED_CHAT_NUM_FOCUSED = 40 + +# -- 状态定义 -- class MaiState(enum.Enum): """ @@ -34,11 +47,11 @@ class MaiState(enum.Enum): if self == MaiState.OFFLINE: return 0 elif self == MaiState.PEEKING: - return 30 + return MAX_NORMAL_CHAT_NUM_PEEKING elif self == MaiState.NORMAL_CHAT: - return 40 + return MAX_NORMAL_CHAT_NUM_NORMAL elif self == MaiState.FOCUSED_CHAT: - return 30 + return MAX_NORMAL_CHAT_NUM_FOCUSED def get_focused_chat_max_num(self): # 调试用 @@ -48,11 +61,11 @@ class MaiState(enum.Enum): if self == MaiState.OFFLINE: return 0 elif self == MaiState.PEEKING: - return 20 + return MAX_FOCUSED_CHAT_NUM_PEEKING elif self == MaiState.NORMAL_CHAT: - return 30 + return MAX_FOCUSED_CHAT_NUM_NORMAL elif self == MaiState.FOCUSED_CHAT: - return 40 + return MAX_FOCUSED_CHAT_NUM_FOCUSED class MaiStateInfo: @@ -110,7 +123,6 @@ class MaiStateManager: """管理 Mai 的整体状态转换逻辑""" def __init__(self): - # MaiStateManager doesn't hold the state itself, it operates on a MaiStateInfo instance. pass def check_and_decide_next_state(self, current_state_info: MaiStateInfo) -> Optional[MaiState]: @@ -129,6 +141,13 @@ class MaiStateManager: time_since_last_min_check = current_time - current_state_info.last_min_check_time next_state: Optional[MaiState] = None + # 辅助函数:根据 prevent_offline_state 标志调整目标状态 + def _resolve_offline(candidate_state: MaiState) -> MaiState: + if prevent_offline_state and candidate_state == MaiState.OFFLINE: + logger.debug(f"阻止进入 OFFLINE,改为 PEEKING") + return MaiState.PEEKING + return candidate_state + if current_status == MaiState.OFFLINE: logger.info("当前[离线],没看手机,思考要不要上线看看......") elif current_status == MaiState.PEEKING: @@ -141,61 +160,71 @@ class MaiStateManager: # 1. 麦麦每分钟都有概率离线 if time_since_last_min_check >= 60: if current_status != MaiState.OFFLINE: - if random.random() < 0.03: # 3% 概率切换到 OFFLINE,20分钟有50%的概率还在线 - logger.debug(f"突然不想聊了,从 {current_status.value} 切换到 离线") - next_state = MaiState.OFFLINE + if random.random() < 0.03: # 3% 概率切换到 OFFLINE + potential_next = MaiState.OFFLINE + resolved_next = _resolve_offline(potential_next) + logger.debug(f"规则1:概率触发下线,resolve 为 {resolved_next.value}") + # 只有当解析后的状态与当前状态不同时才设置 next_state + if resolved_next != current_status: + next_state = resolved_next - # 2. 状态持续时间规则 (如果没有自行下线) + # 2. 状态持续时间规则 (只有在规则1没有触发状态改变时才检查) if next_state is None: + time_limit_exceeded = False + choices_list = [] + weights = [] + rule_id = "" + if current_status == MaiState.OFFLINE: - # OFFLINE 最多保持一分钟 - # 目前是一个调试值,可以修改 + # 注意:即使 prevent_offline_state=True,也可能从初始的 OFFLINE 状态启动 if time_in_current_status >= 60: + time_limit_exceeded = True + rule_id = "2.1 (From OFFLINE)" weights = [30, 30, 20, 20] choices_list = [MaiState.PEEKING, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT, MaiState.OFFLINE] - next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0] - if next_state_candidate != MaiState.OFFLINE: - next_state = next_state_candidate - logger.debug(f"上线!开始 {next_state.value}") - else: - # 继续离线状态 - next_state = MaiState.OFFLINE - elif current_status == MaiState.PEEKING: if time_in_current_status >= 600: # PEEKING 最多持续 600 秒 + time_limit_exceeded = True + rule_id = "2.2 (From PEEKING)" weights = [70, 20, 10] choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT] - next_state = random.choices(choices_list, weights=weights, k=1)[0] - logger.debug(f"手机看完了,接下来 {next_state.value}") - elif current_status == MaiState.NORMAL_CHAT: if time_in_current_status >= 300: # NORMAL_CHAT 最多持续 300 秒 + time_limit_exceeded = True + rule_id = "2.3 (From NORMAL_CHAT)" weights = [50, 50] choices_list = [MaiState.OFFLINE, MaiState.FOCUSED_CHAT] - next_state = random.choices(choices_list, weights=weights, k=1)[0] - if next_state == MaiState.FOCUSED_CHAT: - logger.debug(f"继续深入聊天, {next_state.value}") - else: - logger.debug(f"聊完了,接下来 {next_state.value}") - elif current_status == MaiState.FOCUSED_CHAT: if time_in_current_status >= 600: # FOCUSED_CHAT 最多持续 600 秒 + time_limit_exceeded = True + rule_id = "2.4 (From FOCUSED_CHAT)" weights = [80, 20] choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT] - next_state = random.choices(choices_list, weights=weights, k=1)[0] - logger.debug(f"深入聊天结束,接下来 {next_state.value}") + if time_limit_exceeded: + next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0] + resolved_candidate = _resolve_offline(next_state_candidate) + logger.debug(f"规则{rule_id}:时间到,随机选择 {next_state_candidate.value},resolve 为 {resolved_candidate.value}") + next_state = resolved_candidate # 直接使用解析后的状态 + + # 注意:enable_unlimited_hfc_chat 优先级高于 prevent_offline_state + # 如果触发了这个,它会覆盖上面规则2设置的 next_state if enable_unlimited_hfc_chat: logger.debug("调试用:开挂了,强制切换到专注聊天") next_state = MaiState.FOCUSED_CHAT + # --- 最终决策 --- # # 如果决定了下一个状态,且这个状态与当前状态不同,则返回下一个状态 if next_state is not None and next_state != current_status: return next_state # 如果决定保持 OFFLINE (next_state == MaiState.OFFLINE) 且当前也是 OFFLINE, - # 并且是由于持续时间规则触发的,返回 OFFLINE 以便调用者可以重置计时器 + # 并且是由于持续时间规则触发的,返回 OFFLINE 以便调用者可以重置计时器。 + # 注意:这个分支只有在 prevent_offline_state = False 时才可能被触发。 elif next_state == MaiState.OFFLINE and current_status == MaiState.OFFLINE and time_in_current_status >= 60: logger.debug("决定保持 OFFLINE (持续时间规则),返回 OFFLINE 以提示重置计时器。") return MaiState.OFFLINE # Return OFFLINE to signal caller that timer reset might be needed else: + # 1. next_state is None (没有触发任何转换规则) + # 2. next_state is not None 但等于 current_status (例如规则1想切OFFLINE但被resolve成PEEKING,而当前已经是PEEKING) + # 3. next_state is OFFLINE, current is OFFLINE, 但不是因为时间规则触发 (例如初始状态还没到60秒) return None # 没有状态转换发生或无需重置计时器 diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index 00cc27cd9..25cf854af 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -1,6 +1,7 @@ import asyncio import time import traceback +import random # <--- 添加导入 from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine from collections import deque from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending @@ -31,6 +32,8 @@ from src.individuality.individuality import Individuality INITIAL_DURATION = 60.0 +WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒 + logger = get_logger("interest") # Logger Name Changed @@ -45,10 +48,11 @@ class ActionManager: def __init__(self): # 初始化为默认动作集 self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy() + self._original_actions_backup: Optional[Dict[str, str]] = None # 用于临时移除时的备份 def get_available_actions(self) -> Dict[str, str]: """获取当前可用的动作集""" - return self._available_actions + return self._available_actions.copy() # 返回副本以防外部修改 def add_action(self, action_name: str, description: str) -> bool: """ @@ -81,6 +85,30 @@ class ActionManager: del self._available_actions[action_name] return True + def temporarily_remove_actions(self, actions_to_remove: List[str]): + """ + 临时移除指定的动作,备份原始动作集。 + 如果已经有备份,则不重复备份。 + """ + if self._original_actions_backup is None: + self._original_actions_backup = self._available_actions.copy() + + actions_actually_removed = [] + for action_name in actions_to_remove: + if action_name in self._available_actions: + del self._available_actions[action_name] + actions_actually_removed.append(action_name) + # logger.debug(f"临时移除了动作: {actions_actually_removed}") # 可选日志 + + def restore_actions(self): + """ + 恢复之前备份的原始动作集。 + """ + if self._original_actions_backup is not None: + self._available_actions = self._original_actions_backup.copy() + self._original_actions_backup = None + # logger.debug("恢复了原始动作集") # 可选日志 + def clear_actions(self): """清空所有动作""" self._available_actions.clear() @@ -151,7 +179,7 @@ class HeartFChatting: 其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。 """ - CONSECUTIVE_NO_REPLY_THRESHOLD = 5 # 连续不回复的阈值 + CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值 def __init__( self, @@ -214,6 +242,7 @@ class HeartFChatting: self._current_cycle: Optional[CycleInfo] = None self._lian_xu_bu_hui_fu_ci_shu: int = 0 # <--- 新增:连续不回复计数器 self._shutting_down: bool = False # <--- 新增:关闭标志位 + self._lian_xu_deng_dai_shi_jian: float = 0.0 # <--- 新增:累计等待时间 async def _initialize(self) -> bool: """ @@ -489,6 +518,7 @@ class HeartFChatting: logger.error(f"{self.log_prefix} 处理{action}时出错: {e}") # 出错时也重置计数器 self._lian_xu_bu_hui_fu_ci_shu = 0 + self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 return False, "" async def _handle_text_reply(self, reasoning: str, emoji_query: str, cycle_timers: dict) -> tuple[bool, str]: @@ -511,6 +541,7 @@ class HeartFChatting: """ # 重置连续不回复计数器 self._lian_xu_bu_hui_fu_ci_shu = 0 + self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 # 获取锚点消息 anchor_message = await self._get_anchor_message() @@ -566,6 +597,7 @@ class HeartFChatting: bool: 是否发送成功 """ logger.info(f"{self.log_prefix} 决定回复表情({emoji_query}): {reasoning}") + self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间(即使不计数也保持一致性) try: anchor = await self._get_anchor_message() @@ -601,23 +633,41 @@ class HeartFChatting: observation = self.observations[0] if self.observations else None try: + dang_qian_deng_dai = 0.0 # 初始化本次等待时间 with Timer("等待新消息", cycle_timers): # 等待新消息、超时或关闭信号,并获取结果 await self._wait_for_new_message(observation, planner_start_db_time, self.log_prefix) + # 从计时器获取实际等待时间 + dang_qian_deng_dai = cycle_timers.get("等待新消息", 0.0) + if not self._shutting_down: self._lian_xu_bu_hui_fu_ci_shu += 1 + self._lian_xu_deng_dai_shi_jian += dang_qian_deng_dai # 累加等待时间 logger.debug( - f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{self.CONSECUTIVE_NO_REPLY_THRESHOLD}" + f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{self.CONSECUTIVE_NO_REPLY_THRESHOLD}, " + f"本次等待: {dang_qian_deng_dai:.2f}秒, 累计等待: {self._lian_xu_deng_dai_shi_jian:.2f}秒" ) - # 检查是否达到阈值 - if self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD: + # 检查是否同时达到次数和时间阈值 + time_threshold = 0.66 * WAITING_TIME_THRESHOLD * self.CONSECUTIVE_NO_REPLY_THRESHOLD + if (self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD and + self._lian_xu_deng_dai_shi_jian >= time_threshold): logger.info( - f"{self.log_prefix} 连续不回复达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次),调用回调请求状态转换" + f"{self.log_prefix} 连续不回复达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) " + f"且累计等待时间达到 {self._lian_xu_deng_dai_shi_jian:.2f}秒 (阈值 {time_threshold}秒)," + f"调用回调请求状态转换" ) - # 调用回调。注意:这里不重置计数器,依赖回调函数成功改变状态来隐式重置上下文。 + # 调用回调。注意:这里不重置计数器和时间,依赖回调函数成功改变状态来隐式重置上下文。 await self.on_consecutive_no_reply_callback() + elif self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD: + # 仅次数达到阈值,但时间未达到 + logger.debug( + f"{self.log_prefix} 连续不回复次数达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) " + f"但累计等待时间 {self._lian_xu_deng_dai_shi_jian:.2f}秒 未达到时间阈值 ({time_threshold}秒),暂不调用回调" + ) + # else: 次数和时间都未达到阈值,不做处理 + return True @@ -658,8 +708,8 @@ class HeartFChatting: return True # 检查超时 (放在检查新消息和关闭之后) - if time.monotonic() - wait_start_time > 120: - logger.warning(f"{log_prefix} 等待新消息超时(20秒)") + if time.monotonic() - wait_start_time > WAITING_TIME_THRESHOLD: + logger.warning(f"{log_prefix} 等待新消息超时({WAITING_TIME_THRESHOLD}秒)") return False try: @@ -737,9 +787,49 @@ class HeartFChatting: 参数: current_mind: 子思维的当前思考结果 + cycle_timers: 计时器字典 + is_re_planned: 是否为重新规划 """ logger.info(f"{self.log_prefix}[Planner] 开始{'重新' if is_re_planned else ''}执行规划器") + # --- 新增:检查历史动作并调整可用动作 --- + lian_xu_wen_ben_hui_fu = 0 # 连续文本回复次数 + actions_to_remove_temporarily = [] + probability_roll = random.random() # 在循环外掷骰子一次,用于概率判断 + + # 反向遍历最近的循环历史 + for cycle in reversed(self._cycle_history): + # 只关心实际执行了动作的循环 + if cycle.action_taken: + if cycle.action_type == "text_reply": + lian_xu_wen_ben_hui_fu += 1 + else: + break # 遇到非文本回复,中断计数 + # 检查最近的3个循环即可,避免检查过多历史 (如果历史很长) + if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + (len(self._cycle_history) - 4): + break + + logger.debug(f"{self.log_prefix}[Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}") + + # 根据连续次数决定临时移除哪些动作 + if lian_xu_wen_ben_hui_fu >= 3: + logger.info(f"{self.log_prefix}[Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply") + actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) + elif lian_xu_wen_ben_hui_fu == 2: + if probability_roll < 0.8: # 80% 概率 + logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (触发)") + actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) + else: + logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)") + elif lian_xu_wen_ben_hui_fu == 1: + if probability_roll < 0.4: # 40% 概率 + logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (触发)") + actions_to_remove_temporarily.append("text_reply") + else: + logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (未触发)") + # 如果 lian_xu_wen_ben_hui_fu == 0,则不移除任何动作 + # --- 结束:检查历史动作 --- + # 获取观察信息 observation = self.observations[0] if is_re_planned: @@ -754,6 +844,11 @@ class HeartFChatting: emoji_query = "" # <--- 在这里初始化 emoji_query try: + # --- 新增:应用临时动作移除 --- + if actions_to_remove_temporarily: + self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily) + logger.debug(f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(self.action_manager.get_available_actions().keys())}") + # --- 构建提示词 --- replan_prompt_str = "" if is_re_planned: @@ -767,6 +862,7 @@ class HeartFChatting: # --- 调用 LLM --- try: planner_tools = self.action_manager.get_planner_tool_definition() + logger.debug(f"{self.log_prefix}[Planner] 本次使用的工具定义: {planner_tools}") # 记录本次使用的工具 _response_text, _reasoning_content, tool_calls = await self.planner_llm.generate_response_tool_async( prompt=prompt, tools=planner_tools, @@ -810,15 +906,23 @@ class HeartFChatting: extracted_action = arguments.get("action", "no_reply") # 验证动作 if extracted_action not in self.action_manager.get_available_actions(): + # 如果LLM返回了一个此时不该用的动作(因为被临时移除了) + # 或者完全无效的动作 logger.warning( - f"{self.log_prefix}[Planner] LLM返回了未授权的动作: {extracted_action},使用默认动作no_reply" + f"{self.log_prefix}[Planner] LLM返回了当前不可用或无效的动作: {extracted_action},将强制使用 'no_reply'" ) action = "no_reply" - reasoning = f"LLM返回了未授权的动作: {extracted_action}" + reasoning = f"LLM返回了当前不可用的动作: {extracted_action}" emoji_query = "" - llm_error = False # 视为非LLM错误,只是逻辑修正 + llm_error = False # 视为逻辑修正而非 LLM 错误 + # --- 检查 'no_reply' 是否也恰好被移除了 (极端情况) --- + if "no_reply" not in self.action_manager.get_available_actions(): + logger.error(f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。") + action = "error" # 回退到错误状态 + reasoning = "无法执行任何有效动作,包括 no_reply" + llm_error = True else: - # 动作有效,使用提取的值 + # 动作有效且可用,使用提取的值 action = extracted_action reasoning = arguments.get("reasoning", "未提供理由") emoji_query = arguments.get("emoji_query", "") @@ -837,8 +941,18 @@ class HeartFChatting: reasoning = f"验证工具调用失败: {error_msg}" logger.warning(f"{self.log_prefix}[Planner] {reasoning}") else: # not valid_tool_calls - reasoning = "LLM未返回有效的工具调用" - logger.warning(f"{self.log_prefix}[Planner] {reasoning}") + # 如果没有有效的工具调用,我们需要检查 'no_reply' 是否是当前唯一可用的动作 + available_actions = list(self.action_manager.get_available_actions().keys()) + if available_actions == ["no_reply"]: + logger.info(f"{self.log_prefix}[Planner] LLM未返回工具调用,但当前唯一可用动作是 'no_reply',将执行 'no_reply'") + action = "no_reply" + reasoning = "LLM未返回工具调用,且当前仅 'no_reply' 可用" + emoji_query = "" + llm_error = False # 视为逻辑选择而非错误 + else: + reasoning = "LLM未返回有效的工具调用" + logger.warning(f"{self.log_prefix}[Planner] {reasoning}") + # llm_error 保持为 True # 如果 llm_error 仍然是 True,说明在处理过程中有错误发生 except Exception as llm_e: @@ -847,6 +961,12 @@ class HeartFChatting: action = "error" reasoning = f"Planner内部处理错误: {llm_e}" llm_error = True + # --- 新增:确保动作恢复 --- + finally: + if actions_to_remove_temporarily: # 只有当确实移除了动作时才需要恢复 + self.action_manager.restore_actions() + logger.debug(f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}") + # --- 结束:确保动作恢复 --- # --- 结束 LLM 决策 --- # return { diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index 32c78a106..a0f266d66 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -85,6 +85,7 @@ def init_prompt(): - 遵守回复原则 - 必须调用工具并包含action和reasoning - 你可以选择文字回复(text_reply),纯表情回复(emoji_reply),不回复(no_reply) +- 并不是所有选择都可用 - 选择text_reply或emoji_reply时必须提供emoji_query - 保持回复自然,符合日常聊天习惯""", "planner_prompt", From fbb17e42742483ae916b7c50196f777e871c5ccb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com> Date: Wed, 30 Apr 2025 09:33:49 +0800 Subject: [PATCH 14/16] =?UTF-8?q?feat=EF=BC=9A=E6=96=B0=E5=A2=9E=E5=8E=86?= =?UTF-8?q?=E5=8F=B2=E6=83=B3=E6=B3=95=E9=98=9F=E5=88=97=E5=8F=8A=E5=AE=9E?= =?UTF-8?q?=E6=97=B6=E7=9B=91=E6=8E=A7=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/interest_monitor_gui.py | 99 ++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/scripts/interest_monitor_gui.py b/scripts/interest_monitor_gui.py index fb9e51cf8..fb61eefde 100644 --- a/scripts/interest_monitor_gui.py +++ b/scripts/interest_monitor_gui.py @@ -19,6 +19,7 @@ REFRESH_INTERVAL_MS = 200 # 刷新间隔 (毫秒) - 可以适当调长,因为 WINDOW_TITLE = "Interest Monitor (Live History)" MAX_HISTORY_POINTS = 1000 # 图表上显示的最大历史点数 (可以增加) MAX_STREAMS_TO_DISPLAY = 15 # 最多显示多少个聊天流的折线图 (可以增加) +MAX_QUEUE_SIZE = 30 # 新增:历史想法队列最大长度 # *** 添加 Matplotlib 中文字体配置 *** # 尝试使用 'SimHei' 或 'Microsoft YaHei',如果找不到,matplotlib 会回退到默认字体 @@ -61,6 +62,10 @@ class InterestMonitorApp: self.single_stream_last_active = tk.StringVar(value="活跃: N/A") self.single_stream_last_interaction = tk.StringVar(value="交互: N/A") + # 新增:历史想法队列 + self.main_mind_history = deque(maxlen=MAX_QUEUE_SIZE) + self.last_main_mind_timestamp = 0 # 记录最后一条main_mind的时间戳 + # --- UI 元素 --- # --- 新增:顶部全局信息框架 --- @@ -143,6 +148,20 @@ class InterestMonitorApp: self.canvas_widget_single = self.canvas_single.get_tk_widget() self.canvas_widget_single.pack(side=tk.TOP, fill=tk.BOTH, expand=1) + # --- 新增第三个选项卡:麦麦历史想法 --- + self.frame_mind_history = ttk.Frame(self.notebook, padding="5 5 5 5") + self.notebook.add(self.frame_mind_history, text="麦麦历史想法") + + # 聊天框样式的文本框(只读)+ 滚动条 + self.mind_text_scroll = tk.Scrollbar(self.frame_mind_history) + self.mind_text_scroll.pack(side=tk.RIGHT, fill=tk.Y) + self.mind_text = tk.Text( + self.frame_mind_history, height=25, state="disabled", wrap="word", font=("微软雅黑", 12), + yscrollcommand=self.mind_text_scroll.set + ) + self.mind_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, padx=5, pady=5) + self.mind_text_scroll.config(command=self.mind_text.yview) + # --- 初始化和启动刷新 --- self.update_display() # 首次加载并开始刷新循环 @@ -154,6 +173,78 @@ class InterestMonitorApp: """生成随机颜色用于区分线条""" return "#{:06x}".format(random.randint(0, 0xFFFFFF)) + def load_main_mind_history(self): + """只读取包含main_mind的日志行,维护历史想法队列""" + if not os.path.exists(LOG_FILE_PATH): + return + + main_mind_entries = [] + try: + with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: + for line in f: + try: + log_entry = json.loads(line.strip()) + if "main_mind" in log_entry: + ts = log_entry.get("timestamp", 0) + main_mind_entries.append((ts, log_entry)) + except Exception: + continue + main_mind_entries.sort(key=lambda x: x[0]) + recent_entries = main_mind_entries[-MAX_QUEUE_SIZE:] + self.main_mind_history.clear() + for ts, entry in recent_entries: + self.main_mind_history.append(entry) + if recent_entries: + self.last_main_mind_timestamp = recent_entries[-1][0] + # 首次加载时刷新 + self.refresh_mind_text() + except Exception: + pass + + def update_main_mind_history(self): + """实时监控log文件,发现新main_mind数据则更新队列和展示(仅有新数据时刷新)""" + if not os.path.exists(LOG_FILE_PATH): + return + + new_entries = [] + try: + with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: + for line in reversed(list(f)): + try: + log_entry = json.loads(line.strip()) + if "main_mind" in log_entry: + ts = log_entry.get("timestamp", 0) + if ts > self.last_main_mind_timestamp: + new_entries.append((ts, log_entry)) + else: + break + except Exception: + continue + if new_entries: + for ts, entry in sorted(new_entries): + if len(self.main_mind_history) >= MAX_QUEUE_SIZE: + self.main_mind_history.popleft() + self.main_mind_history.append(entry) + self.last_main_mind_timestamp = ts + self.refresh_mind_text() # 只有有新数据时才刷新 + except Exception: + pass + + def refresh_mind_text(self): + """刷新聊天框样式的历史想法展示""" + self.mind_text.config(state="normal") + self.mind_text.delete(1.0, tk.END) + for entry in self.main_mind_history: + ts = entry.get("timestamp", 0) + dt_str = datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S") if ts else "" + main_mind = entry.get("main_mind", "") + mai_state = entry.get("mai_state", "") + subflow_count = entry.get("subflow_count", "") + msg = f"[{dt_str}] 状态:{mai_state} 子流:{subflow_count}\n{main_mind}\n\n" + self.mind_text.insert(tk.END, msg) + self.mind_text.see(tk.END) + self.mind_text.config(state="disabled") + def load_and_update_history(self): """从 history log 文件加载数据并更新历史记录""" if not os.path.exists(LOG_FILE_PATH): @@ -537,8 +628,14 @@ class InterestMonitorApp: def update_display(self): """主更新循环""" try: - self.load_and_update_history() # 从文件加载数据并更新内部状态 + # --- 新增:首次加载历史想法 --- + if not hasattr(self, "_main_mind_loaded"): + self.load_main_mind_history() + self._main_mind_loaded = True + else: + self.update_main_mind_history() # 只有有新main_mind数据时才刷新界面 # *** 修改:分别调用两个图表的更新方法 *** + self.load_and_update_history() # 从文件加载数据并更新内部状态 self.update_all_streams_plot() # 更新所有流的图表 self.update_single_stream_plot() # 更新单个流的图表 except Exception as e: From 42505960e18a926840b8d8db1f386ba023899c5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com> Date: Wed, 30 Apr 2025 09:40:45 +0800 Subject: [PATCH 15/16] fix: Ruff --- scripts/interest_monitor_gui.py | 2 +- src/heart_flow/mai_state_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/interest_monitor_gui.py b/scripts/interest_monitor_gui.py index fb61eefde..4e688a301 100644 --- a/scripts/interest_monitor_gui.py +++ b/scripts/interest_monitor_gui.py @@ -192,7 +192,7 @@ class InterestMonitorApp: main_mind_entries.sort(key=lambda x: x[0]) recent_entries = main_mind_entries[-MAX_QUEUE_SIZE:] self.main_mind_history.clear() - for ts, entry in recent_entries: + for _ts, entry in recent_entries: self.main_mind_history.append(entry) if recent_entries: self.last_main_mind_timestamp = recent_entries[-1][0] diff --git a/src/heart_flow/mai_state_manager.py b/src/heart_flow/mai_state_manager.py index 4d92d7fd8..a3ff34e1d 100644 --- a/src/heart_flow/mai_state_manager.py +++ b/src/heart_flow/mai_state_manager.py @@ -144,7 +144,7 @@ class MaiStateManager: # 辅助函数:根据 prevent_offline_state 标志调整目标状态 def _resolve_offline(candidate_state: MaiState) -> MaiState: if prevent_offline_state and candidate_state == MaiState.OFFLINE: - logger.debug(f"阻止进入 OFFLINE,改为 PEEKING") + logger.debug("阻止进入 OFFLINE,改为 PEEKING") return MaiState.PEEKING return candidate_state From ed3d8098414b3d6774bae3af228bf7d16e5d8095 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 30 Apr 2025 01:41:00 +0000 Subject: [PATCH 16/16] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/interest_monitor_gui.py | 8 ++- src/heart_flow/mai_state_manager.py | 11 ++-- src/plugins/heartFC_chat/heartFC_chat.py | 76 ++++++++++++++---------- 3 files changed, 57 insertions(+), 38 deletions(-) diff --git a/scripts/interest_monitor_gui.py b/scripts/interest_monitor_gui.py index 4e688a301..adb83f729 100644 --- a/scripts/interest_monitor_gui.py +++ b/scripts/interest_monitor_gui.py @@ -156,8 +156,12 @@ class InterestMonitorApp: self.mind_text_scroll = tk.Scrollbar(self.frame_mind_history) self.mind_text_scroll.pack(side=tk.RIGHT, fill=tk.Y) self.mind_text = tk.Text( - self.frame_mind_history, height=25, state="disabled", wrap="word", font=("微软雅黑", 12), - yscrollcommand=self.mind_text_scroll.set + self.frame_mind_history, + height=25, + state="disabled", + wrap="word", + font=("微软雅黑", 12), + yscrollcommand=self.mind_text_scroll.set, ) self.mind_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, padx=5, pady=5) self.mind_text_scroll.config(command=self.mind_text.yview) diff --git a/src/heart_flow/mai_state_manager.py b/src/heart_flow/mai_state_manager.py index a3ff34e1d..48eead1a1 100644 --- a/src/heart_flow/mai_state_manager.py +++ b/src/heart_flow/mai_state_manager.py @@ -9,9 +9,9 @@ logger = get_logger("mai_state") # -- 状态相关的可配置参数 (可以从 glocal_config 加载) -- -enable_unlimited_hfc_chat = True # 调试用:无限专注聊天 +enable_unlimited_hfc_chat = True # 调试用:无限专注聊天 # enable_unlimited_hfc_chat = False -prevent_offline_state = True # 调试用:防止进入离线状态 +prevent_offline_state = True # 调试用:防止进入离线状态 # 不同状态下普通聊天的最大消息数 MAX_NORMAL_CHAT_NUM_PEEKING = 30 @@ -25,6 +25,7 @@ MAX_FOCUSED_CHAT_NUM_FOCUSED = 40 # -- 状态定义 -- + class MaiState(enum.Enum): """ 聊天状态: @@ -204,8 +205,10 @@ class MaiStateManager: if time_limit_exceeded: next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0] resolved_candidate = _resolve_offline(next_state_candidate) - logger.debug(f"规则{rule_id}:时间到,随机选择 {next_state_candidate.value},resolve 为 {resolved_candidate.value}") - next_state = resolved_candidate # 直接使用解析后的状态 + logger.debug( + f"规则{rule_id}:时间到,随机选择 {next_state_candidate.value},resolve 为 {resolved_candidate.value}" + ) + next_state = resolved_candidate # 直接使用解析后的状态 # 注意:enable_unlimited_hfc_chat 优先级高于 prevent_offline_state # 如果触发了这个,它会覆盖上面规则2设置的 next_state diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index 25cf854af..c15c4f83f 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -1,7 +1,7 @@ import asyncio import time import traceback -import random # <--- 添加导入 +import random # <--- 添加导入 from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine from collections import deque from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending @@ -32,7 +32,7 @@ from src.individuality.individuality import Individuality INITIAL_DURATION = 60.0 -WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒 +WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒 logger = get_logger("interest") # Logger Name Changed @@ -48,11 +48,11 @@ class ActionManager: def __init__(self): # 初始化为默认动作集 self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy() - self._original_actions_backup: Optional[Dict[str, str]] = None # 用于临时移除时的备份 + self._original_actions_backup: Optional[Dict[str, str]] = None # 用于临时移除时的备份 def get_available_actions(self) -> Dict[str, str]: """获取当前可用的动作集""" - return self._available_actions.copy() # 返回副本以防外部修改 + return self._available_actions.copy() # 返回副本以防外部修改 def add_action(self, action_name: str, description: str) -> bool: """ @@ -518,7 +518,7 @@ class HeartFChatting: logger.error(f"{self.log_prefix} 处理{action}时出错: {e}") # 出错时也重置计数器 self._lian_xu_bu_hui_fu_ci_shu = 0 - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 + self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 return False, "" async def _handle_text_reply(self, reasoning: str, emoji_query: str, cycle_timers: dict) -> tuple[bool, str]: @@ -541,7 +541,7 @@ class HeartFChatting: """ # 重置连续不回复计数器 self._lian_xu_bu_hui_fu_ci_shu = 0 - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 + self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间 # 获取锚点消息 anchor_message = await self._get_anchor_message() @@ -597,7 +597,7 @@ class HeartFChatting: bool: 是否发送成功 """ logger.info(f"{self.log_prefix} 决定回复表情({emoji_query}): {reasoning}") - self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间(即使不计数也保持一致性) + self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间(即使不计数也保持一致性) try: anchor = await self._get_anchor_message() @@ -633,17 +633,16 @@ class HeartFChatting: observation = self.observations[0] if self.observations else None try: - dang_qian_deng_dai = 0.0 # 初始化本次等待时间 + dang_qian_deng_dai = 0.0 # 初始化本次等待时间 with Timer("等待新消息", cycle_timers): # 等待新消息、超时或关闭信号,并获取结果 await self._wait_for_new_message(observation, planner_start_db_time, self.log_prefix) # 从计时器获取实际等待时间 dang_qian_deng_dai = cycle_timers.get("等待新消息", 0.0) - if not self._shutting_down: self._lian_xu_bu_hui_fu_ci_shu += 1 - self._lian_xu_deng_dai_shi_jian += dang_qian_deng_dai # 累加等待时间 + self._lian_xu_deng_dai_shi_jian += dang_qian_deng_dai # 累加等待时间 logger.debug( f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{self.CONSECUTIVE_NO_REPLY_THRESHOLD}, " f"本次等待: {dang_qian_deng_dai:.2f}秒, 累计等待: {self._lian_xu_deng_dai_shi_jian:.2f}秒" @@ -651,8 +650,10 @@ class HeartFChatting: # 检查是否同时达到次数和时间阈值 time_threshold = 0.66 * WAITING_TIME_THRESHOLD * self.CONSECUTIVE_NO_REPLY_THRESHOLD - if (self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD and - self._lian_xu_deng_dai_shi_jian >= time_threshold): + if ( + self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD + and self._lian_xu_deng_dai_shi_jian >= time_threshold + ): logger.info( f"{self.log_prefix} 连续不回复达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) " f"且累计等待时间达到 {self._lian_xu_deng_dai_shi_jian:.2f}秒 (阈值 {time_threshold}秒)," @@ -668,7 +669,6 @@ class HeartFChatting: ) # else: 次数和时间都未达到阈值,不做处理 - return True except asyncio.CancelledError: @@ -793,9 +793,9 @@ class HeartFChatting: logger.info(f"{self.log_prefix}[Planner] 开始{'重新' if is_re_planned else ''}执行规划器") # --- 新增:检查历史动作并调整可用动作 --- - lian_xu_wen_ben_hui_fu = 0 # 连续文本回复次数 + lian_xu_wen_ben_hui_fu = 0 # 连续文本回复次数 actions_to_remove_temporarily = [] - probability_roll = random.random() # 在循环外掷骰子一次,用于概率判断 + probability_roll = random.random() # 在循环外掷骰子一次,用于概率判断 # 反向遍历最近的循环历史 for cycle in reversed(self._cycle_history): @@ -804,9 +804,11 @@ class HeartFChatting: if cycle.action_type == "text_reply": lian_xu_wen_ben_hui_fu += 1 else: - break # 遇到非文本回复,中断计数 + break # 遇到非文本回复,中断计数 # 检查最近的3个循环即可,避免检查过多历史 (如果历史很长) - if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + (len(self._cycle_history) - 4): + if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + ( + len(self._cycle_history) - 4 + ): break logger.debug(f"{self.log_prefix}[Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}") @@ -816,13 +818,15 @@ class HeartFChatting: logger.info(f"{self.log_prefix}[Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply") actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) elif lian_xu_wen_ben_hui_fu == 2: - if probability_roll < 0.8: # 80% 概率 + if probability_roll < 0.8: # 80% 概率 logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (触发)") actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"]) else: - logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)") + logger.info( + f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)" + ) elif lian_xu_wen_ben_hui_fu == 1: - if probability_roll < 0.4: # 40% 概率 + if probability_roll < 0.4: # 40% 概率 logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (触发)") actions_to_remove_temporarily.append("text_reply") else: @@ -847,7 +851,9 @@ class HeartFChatting: # --- 新增:应用临时动作移除 --- if actions_to_remove_temporarily: self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily) - logger.debug(f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(self.action_manager.get_available_actions().keys())}") + logger.debug( + f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(self.action_manager.get_available_actions().keys())}" + ) # --- 构建提示词 --- replan_prompt_str = "" @@ -862,7 +868,7 @@ class HeartFChatting: # --- 调用 LLM --- try: planner_tools = self.action_manager.get_planner_tool_definition() - logger.debug(f"{self.log_prefix}[Planner] 本次使用的工具定义: {planner_tools}") # 记录本次使用的工具 + logger.debug(f"{self.log_prefix}[Planner] 本次使用的工具定义: {planner_tools}") # 记录本次使用的工具 _response_text, _reasoning_content, tool_calls = await self.planner_llm.generate_response_tool_async( prompt=prompt, tools=planner_tools, @@ -914,13 +920,15 @@ class HeartFChatting: action = "no_reply" reasoning = f"LLM返回了当前不可用的动作: {extracted_action}" emoji_query = "" - llm_error = False # 视为逻辑修正而非 LLM 错误 + llm_error = False # 视为逻辑修正而非 LLM 错误 # --- 检查 'no_reply' 是否也恰好被移除了 (极端情况) --- if "no_reply" not in self.action_manager.get_available_actions(): - logger.error(f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。") - action = "error" # 回退到错误状态 - reasoning = "无法执行任何有效动作,包括 no_reply" - llm_error = True + logger.error( + f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。" + ) + action = "error" # 回退到错误状态 + reasoning = "无法执行任何有效动作,包括 no_reply" + llm_error = True else: # 动作有效且可用,使用提取的值 action = extracted_action @@ -944,11 +952,13 @@ class HeartFChatting: # 如果没有有效的工具调用,我们需要检查 'no_reply' 是否是当前唯一可用的动作 available_actions = list(self.action_manager.get_available_actions().keys()) if available_actions == ["no_reply"]: - logger.info(f"{self.log_prefix}[Planner] LLM未返回工具调用,但当前唯一可用动作是 'no_reply',将执行 'no_reply'") + logger.info( + f"{self.log_prefix}[Planner] LLM未返回工具调用,但当前唯一可用动作是 'no_reply',将执行 'no_reply'" + ) action = "no_reply" reasoning = "LLM未返回工具调用,且当前仅 'no_reply' 可用" emoji_query = "" - llm_error = False # 视为逻辑选择而非错误 + llm_error = False # 视为逻辑选择而非错误 else: reasoning = "LLM未返回有效的工具调用" logger.warning(f"{self.log_prefix}[Planner] {reasoning}") @@ -963,9 +973,11 @@ class HeartFChatting: llm_error = True # --- 新增:确保动作恢复 --- finally: - if actions_to_remove_temporarily: # 只有当确实移除了动作时才需要恢复 - self.action_manager.restore_actions() - logger.debug(f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}") + if actions_to_remove_temporarily: # 只有当确实移除了动作时才需要恢复 + self.action_manager.restore_actions() + logger.debug( + f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}" + ) # --- 结束:确保动作恢复 --- # --- 结束 LLM 决策 --- #