From 900ce211759ce259ca6992868105757d9ea079d1 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 12 May 2025 21:12:59 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E6=B7=BB=E5=8A=A0=E8=AF=AD?= =?UTF-8?q?=E8=A8=80=E9=A3=8E=E6=A0=BC=E5=AD=A6=E4=B9=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/info/obs_info.py | 8 + .../observation/chatting_observation.py | 46 ++---- .../observation/hfcloop_observation.py | 17 +- src/main.py | 11 ++ .../expressors/default_expressor.py | 8 +- .../expressors/exprssion_learner.py | 148 ++++++++++++++++++ src/plugins/heartFC_chat/heartFC_chat.py | 3 +- .../heartFC_chat/heartflow_prompt_builder.py | 65 +++++--- .../info_processors/chattinginfo_processor.py | 49 +++++- .../info_processors/mind_processor.py | 22 ++- .../info_processors/tool_processor.py | 18 +-- src/plugins/heartFC_chat/memory_activator.py | 36 +++-- src/plugins/utils/chat_message_builder.py | 18 +++ 13 files changed, 340 insertions(+), 109 deletions(-) create mode 100644 src/plugins/heartFC_chat/expressors/exprssion_learner.py diff --git a/src/heart_flow/info/obs_info.py b/src/heart_flow/info/obs_info.py index 9a3d41f56..05dcf98c8 100644 --- a/src/heart_flow/info/obs_info.py +++ b/src/heart_flow/info/obs_info.py @@ -37,6 +37,14 @@ class ObsInfo(InfoBase): """ self.data["talking_message_str_truncate"] = message + def set_previous_chat_info(self, message: str) -> None: + """设置之前聊天信息 + + Args: + message (str): 之前聊天信息内容 + """ + self.data["previous_chat_info"] = message + def set_chat_type(self, chat_type: str) -> None: """设置聊天类型 diff --git a/src/heart_flow/observation/chatting_observation.py b/src/heart_flow/observation/chatting_observation.py index 21e32e68f..a8d6a807d 100644 --- a/src/heart_flow/observation/chatting_observation.py +++ b/src/heart_flow/observation/chatting_observation.py @@ -61,6 +61,9 @@ class ChattingObservation(Observation): self.max_mid_memory_len = global_config.compress_length_limit self.mid_memory_info = "" self.person_list = [] + self.oldest_messages = [] + self.oldest_messages_str = "" + self.compressor_prompt = "" self.llm_summary = LLMRequest( model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" ) @@ -75,8 +78,8 @@ class ChattingObservation(Observation): # 进行一次观察 返回观察结果observe_info def get_observe_info(self, ids=None): + mid_memory_str = "" if ids: - mid_memory_str = "" for id in ids: print(f"id:{id}") try: @@ -97,7 +100,10 @@ class ChattingObservation(Observation): return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str else: - return self.talking_message_str + mid_memory_str = "之前的聊天内容:\n" + for mid_memory in self.mid_memorys: + mid_memory_str += f"{mid_memory['theme']}\n" + return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str def serch_message_by_text(self, text: str) -> Optional[MessageRecv]: """ @@ -221,40 +227,10 @@ class ChattingObservation(Observation): logger.error(f"构建总结 Prompt 失败 for chat {self.chat_id}: {e}") # prompt remains None - summary = "没有主题的闲聊" # 默认值 - if prompt: # Check if prompt was built successfully - try: - summary_result, _, _ = await self.llm_summary.generate_response(prompt) - if summary_result: # 确保结果不为空 - summary = summary_result - except Exception as e: - logger.error(f"总结主题失败 for chat {self.chat_id}: {e}") - # 保留默认总结 "没有主题的闲聊" - else: - logger.warning(f"因 Prompt 构建失败,跳过 LLM 总结 for chat {self.chat_id}") - - mid_memory = { - "id": str(int(datetime.now().timestamp())), - "theme": summary, - "messages": oldest_messages, # 存储原始消息对象 - "readable_messages": oldest_messages_str, - # "timestamps": oldest_timestamps, - "chat_id": self.chat_id, - "created_at": datetime.now().timestamp(), - } - - self.mid_memorys.append(mid_memory) - if len(self.mid_memorys) > self.max_mid_memory_len: - self.mid_memorys.pop(0) # 移除最旧的 - - mid_memory_str = "之前聊天的内容概述是:\n" - for mid_memory_item in self.mid_memorys: # 重命名循环变量以示区分 - time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) - mid_memory_str += ( - f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" - ) - self.mid_memory_info = mid_memory_str + self.compressor_prompt = prompt + self.oldest_messages = oldest_messages + self.oldest_messages_str = oldest_messages_str self.talking_message_str = await build_readable_messages( messages=self.talking_message, diff --git a/src/heart_flow/observation/hfcloop_observation.py b/src/heart_flow/observation/hfcloop_observation.py index f920761ce..e95c6895d 100644 --- a/src/heart_flow/observation/hfcloop_observation.py +++ b/src/heart_flow/observation/hfcloop_observation.py @@ -21,8 +21,8 @@ class HFCloopObservation: return self.observe_info def add_loop_info(self, loop_info: CycleDetail): - logger.debug(f"添加循环信息111111111111111111111111111111111111: {loop_info}") - print(f"添加循环信息111111111111111111111111111111111111: {loop_info}") + # logger.debug(f"添加循环信息111111111111111111111111111111111111: {loop_info}") + # print(f"添加循环信息111111111111111111111111111111111111: {loop_info}") print(f"action_taken: {loop_info.action_taken}") print(f"action_type: {loop_info.action_type}") print(f"response_info: {loop_info.response_info}") @@ -63,9 +63,18 @@ class HFCloopObservation: # 包装提示块,增加可读性,即使没有连续回复也给个标记 if cycle_info_block: - cycle_info_block = f"\n【近期回复历史】\n{cycle_info_block}\n" + cycle_info_block = f"\n你最近的回复\n{cycle_info_block}\n" else: # 如果最近的活动循环不是文本回复,或者没有活动循环 - cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n" + cycle_info_block = "\n" + + # 获取history_loop中最新添加的 + if self.history_loop: + last_loop = self.history_loop[-1] + start_time = last_loop.start_time + end_time = last_loop.end_time + time_diff = int(end_time - start_time) + + cycle_info_block += f"\n距离你上一次阅读消息已经过去了{time_diff}分钟\n" self.observe_info = cycle_info_block diff --git a/src/main.py b/src/main.py index be71524e2..862b9051a 100644 --- a/src/main.py +++ b/src/main.py @@ -18,6 +18,7 @@ from .plugins.remote import heartbeat_thread # noqa: F401 from .individuality.individuality import Individuality from .common.server import global_server from rich.traceback import install +from .plugins.heartFC_chat.expressors.exprssion_learner import expression_learner from .api.main import start_api_server install(extra_lines=3) @@ -129,6 +130,7 @@ class MainSystem: self.build_memory_task(), self.forget_memory_task(), self.consolidate_memory_task(), + self.learn_and_store_expression_task(), self.print_mood_task(), self.remove_recalled_message_task(), emoji_manager.start_periodic_check_register(), @@ -163,6 +165,15 @@ class MainSystem: await HippocampusManager.get_instance().consolidate_memory() print("\033[1;32m[记忆整合]\033[0m 记忆整合完成") + @staticmethod + async def learn_and_store_expression_task(): + """学习并存储表达方式任务""" + while True: + await asyncio.sleep(60) + print("\033[1;32m[表达方式学习]\033[0m 开始学习表达方式...") + await expression_learner.learn_and_store_expression() + print("\033[1;32m[表达方式学习]\033[0m 表达方式学习完成") + async def print_mood_task(self): """打印情绪状态""" while True: diff --git a/src/plugins/heartFC_chat/expressors/default_expressor.py b/src/plugins/heartFC_chat/expressors/default_expressor.py index fd0448ac4..4e08e2ba8 100644 --- a/src/plugins/heartFC_chat/expressors/default_expressor.py +++ b/src/plugins/heartFC_chat/expressors/default_expressor.py @@ -54,10 +54,10 @@ class DefaultExpressor: user_nickname=global_config.BOT_NICKNAME, platform=messageinfo.platform, ) - logger.debug(f"创建思考消息:{anchor_message}") - logger.debug(f"创建思考消息chat:{chat}") - logger.debug(f"创建思考消息bot_user_info:{bot_user_info}") - logger.debug(f"创建思考消息messageinfo:{messageinfo}") + # logger.debug(f"创建思考消息:{anchor_message}") + # logger.debug(f"创建思考消息chat:{chat}") + # logger.debug(f"创建思考消息bot_user_info:{bot_user_info}") + # logger.debug(f"创建思考消息messageinfo:{messageinfo}") thinking_time_point = round(time.time(), 2) thinking_id = "mt" + str(thinking_time_point) diff --git a/src/plugins/heartFC_chat/expressors/exprssion_learner.py b/src/plugins/heartFC_chat/expressors/exprssion_learner.py new file mode 100644 index 000000000..af5c87226 --- /dev/null +++ b/src/plugins/heartFC_chat/expressors/exprssion_learner.py @@ -0,0 +1,148 @@ +import time +from typing import List, Dict, Optional, Any, Tuple, Coroutine +from src.common.logger_manager import get_logger +from src.plugins.models.utils_model import LLMRequest +from src.config.config import global_config +from src.plugins.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_readable_messages +from src.plugins.heartFC_chat.heartflow_prompt_builder import Prompt, global_prompt_manager +import os +import json + +logger = get_logger("expressor") + + +def init_prompt() -> None: + learn_expression_prompt = """ +{chat_str} + +请从上面这段群聊中概括除了人名为"麦麦"之外的人的语言风格,只考虑文字,不要考虑表情包和图片 +不要涉及具体的人名,只考虑语言风格 +思考回复语法,长度和情感 +思考有没有特殊的梗,一并总结成语言风格 +总结成如下格式的规律,总结的内容要详细,但具有概括性: +当"xxx"时,可以"xxx", xxx不超过10个字 + +例如: +当"表示十分惊叹"时,使用"我嘞个xxxx" +当"表示讽刺的赞同,不想讲道理"时,使用"对对对" +当"想表达某个观点,但不想明说",使用"反讽" +当"想说明某个观点,但懒得明说",使用"懂的都懂" + +现在请你概括 +""" + Prompt(learn_expression_prompt, "learn_expression_prompt") + + +class ExpressionLearner: + def __init__(self) -> None: + self.express_learn_model: LLMRequest = LLMRequest( + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=256, + request_type="response_heartflow", + ) + + async def get_expression_by_chat_id(self, chat_id: str) -> List[Dict[str, str]]: + """从/data/expression/对应chat_id/expressions.json中读取表达方式""" + file_path: str = os.path.join("data", "expression", str(chat_id), "expressions.json") + if not os.path.exists(file_path): + return [] + with open(file_path, "r", encoding="utf-8") as f: + expressions: List[dict] = json.load(f) + return expressions + + async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]: + """选择从当前到最近1小时内的随机10条消息,然后学习这些消息的表达方式""" + logger.info("开始学习表达方式...") + expressions: Optional[List[Tuple[str, str, str]]] = await self.learn_expression() + logger.info(f"学习到{len(expressions) if expressions else 0}条表达方式") + # expressions: List[(chat_id, situation, style)] + if not expressions: + logger.info("没有学习到表达方式") + return [] + # 按chat_id分组 + chat_dict: Dict[str, List[Dict[str, str]]] = {} + for chat_id, situation, style in expressions: + if chat_id not in chat_dict: + chat_dict[chat_id] = [] + chat_dict[chat_id].append({"situation": situation, "style": style}) + # 存储到/data/expression/对应chat_id/expressions.json + for chat_id, expr_list in chat_dict.items(): + dir_path = os.path.join("data", "expression", str(chat_id)) + os.makedirs(dir_path, exist_ok=True) + file_path = os.path.join(dir_path, "expressions.json") + # 若已存在,先读出合并 + if os.path.exists(file_path): + old_data: List[Dict[str, str]] = [] + try: + with open(file_path, "r", encoding="utf-8") as f: + old_data = json.load(f) + except Exception: + old_data = [] + expr_list = old_data + expr_list + with open(file_path, "w", encoding="utf-8") as f: + json.dump(expr_list, f, ensure_ascii=False, indent=2) + return expressions + + async def learn_expression(self) -> Optional[List[Tuple[str, str, str]]]: + """选择从当前到最近1小时内的随机10条消息,然后学习这些消息的表达方式 + + Args: + chat_stream (ChatStream): _description_ + """ + current_time = time.time() + random_msg: Optional[List[Dict[str, Any]]] = get_raw_msg_by_timestamp_random(current_time - 3600 * 24, current_time, limit=10) + if not random_msg: + return None + # 转化成str + chat_id: str = random_msg[0]["chat_id"] + random_msg_str: str = await build_readable_messages(random_msg, timestamp_mode="normal") + + prompt: str = await global_prompt_manager.format_prompt( + "learn_expression_prompt", + chat_str=random_msg_str, + ) + + logger.info(f"学习表达方式的prompt: {prompt}") + + response, _ = await self.express_learn_model.generate_response_async(prompt) + + logger.info(f"学习表达方式的response: {response}") + + expressions: List[Tuple[str, str, str]] = self.parse_expression_response(response, chat_id) + + return expressions + + def parse_expression_response(self, response: str, chat_id: str) -> List[Tuple[str, str, str]]: + """ + 解析LLM返回的表达风格总结,每一行提取"当"和"使用"之间的内容,存储为(situation, style)元组 + """ + expressions: List[Tuple[str, str, str]] = [] + for line in response.splitlines(): + line = line.strip() + if not line: + continue + # 查找"当"和下一个引号 + idx_when = line.find('当"') + if idx_when == -1: + continue + idx_quote1 = idx_when + 1 + idx_quote2 = line.find('"', idx_quote1 + 1) + if idx_quote2 == -1: + continue + situation = line[idx_quote1 + 1 : idx_quote2] + # 查找"使用" + idx_use = line.find('使用"', idx_quote2) + if idx_use == -1: + continue + idx_quote3 = idx_use + 2 + idx_quote4 = line.find('"', idx_quote3 + 1) + if idx_quote4 == -1: + continue + style = line[idx_quote3 + 1 : idx_quote4] + expressions.append((chat_id, situation, style)) + return expressions + +init_prompt() + +expression_learner = ExpressionLearner() diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index 25d6b3ad1..9bd24b242 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -408,7 +408,8 @@ class HeartFChatting: for observation in observations: logger.debug(f"{self.log_prefix} 观察信息: {observation}") - running_memorys = await self.memory_activator.activate_memory(observations) + with Timer("回忆", cycle_timers): + running_memorys = await self.memory_activator.activate_memory(observations) # 记录并行任务开始时间 parallel_start_time = time.time() diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index 4d37268d6..dd12826eb 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -14,6 +14,7 @@ from ..moods.moods import MoodManager from ..memory_system.Hippocampus import HippocampusManager from ..schedule.schedule_generator import bot_schedule from ..knowledge.knowledge_lib import qa_manager +from .expressors.exprssion_learner import expression_learner import traceback @@ -42,24 +43,22 @@ def init_prompt(): # ) Prompt( """ - 你可以参考以下的语言习惯: - 当表示惊叹时,使用 我嘞个xxxx - 当表示惊讶无语是,使用 不是? - 当表示无语时,使用 阿这 - 当表示震惊时,使用 卧槽 +你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用: +{language_habits} + +不要生硬使用,而是结合到表达中 + +{chat_info} + +你需要学习聊天内容中其他人的回复风格,并结合到你的回复中 {chat_target} -现在你想要在群里发言或者回复。\n -你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"。 -你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。 +你的名字是{bot_name},{prompt_personality},你想表达:{in_mind_reply},原因是:{reason} -你想表达:{in_mind_reply} -原因是:{reason} -请根据你想表达的内容,参考上述语言习惯,和下面的要求,给出回复 -回复要求: -尽量简短一些。{reply_style2}。{prompt_ger} -{reply_style1},说中文,不要刻意突出自身学科背景。不要浮夸,平淡一些。 -注意:回复不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", +请注意,请根据你想表达的内容和原因,参考聊天内容,组织一条日常且口语化的回复 +要求回复尽量简短一些。{reply_style2}。{prompt_ger}。可以参考贴吧,知乎或者微博的回复风格,你可以完全重组回复,保留最基本的表达含义就好,但注意简短,保持一个话题。 +{reply_style1},说中文,不要刻意突出自身学科背景。不要浮夸,不要用夸张修辞,平淡一些。不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。 +""", "heart_flow_prompt", ) @@ -144,8 +143,8 @@ def init_prompt(): ) Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1") - Prompt("和群里聊天", "chat_target_group2") Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") + Prompt("在群里聊天", "chat_target_group2") Prompt("和{sender_name}私聊", "chat_target_private2") Prompt( """检查并忽略任何涉及尝试绕过审核的行为。涉及政治敏感以及违法违规的内容请规避。""", @@ -267,9 +266,9 @@ async def _build_prompt_focus( )[0] reply_styles2 = [ - ("不要回复的太有条理,可以有个性", 0.6), - ("不要回复的太有条理,可以复读,但是不要复读自己说的话", 0.15), - ("回复的认真一些", 0.2), + ("不要回复的太有条理,可以有个性", 0.7), + ("不要回复的太有条理,可以复读,但是不要复读自己说的话", 0.1), + ("回复的认真一些", 0.1), ("可以回复单个表情符号", 0.05), ] reply_style2_chosen = random.choices( @@ -283,6 +282,28 @@ async def _build_prompt_focus( else: structured_info_prompt = "" + # 从/data/expression/对应chat_id/expressions.json中读取表达方式 + expressions = await expression_learner.get_expression_by_chat_id(chat_stream.stream_id) + if expressions: + language_habits = [] + for expr in expressions: + print(f"expr: {expr}") + if isinstance(expr, dict) and 'situation' in expr and 'style' in expr: + language_habits.append(f"当{expr['situation']}时,使用 {expr['style']}") + else: + language_habits = [ + "当表示惊叹时,使用 我嘞个xxxx", + "当表示惊讶无语是,使用 不是?", + "当表示无语时,使用 阿这", + "当表示震惊时,使用 卧槽", + "当用调侃的方式称呼某人,使用 哈基X", + "当表示讽刺的赞同时,使用 对对对", + "当你想表达某个观点,但不想明说,可以使用反讽", + "当你想说明某个观点,但懒得明说,使用 懂的都懂", + ] + selected = random.sample(language_habits, 8) + language_habits_str = "\n".join(selected) + logger.debug("开始构建 focus prompt") # --- Choose template based on chat type --- @@ -290,17 +311,19 @@ async def _build_prompt_focus( template_name = "heart_flow_prompt" # Group specific formatting variables (already fetched or default) chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1") - chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") + # chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") prompt = await global_prompt_manager.format_prompt( template_name, # info_from_tools=structured_info_prompt, + language_habits=language_habits_str, chat_target=chat_target_1, # Used in group template # chat_talking_prompt=chat_talking_prompt, + chat_info=chat_talking_prompt, bot_name=global_config.BOT_NICKNAME, # prompt_personality=prompt_personality, prompt_personality="", - chat_target_2=chat_target_2, # Used in group template + # chat_target_2=chat_target_2, # Used in group template # current_mind_info=current_mind_info, reply_style2=reply_style2_chosen, reply_style1=reply_style1_chosen, diff --git a/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py b/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py index 4b15d4728..42aea1527 100644 --- a/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py +++ b/src/plugins/heartFC_chat/info_processors/chattinginfo_processor.py @@ -7,7 +7,10 @@ from src.common.logger_manager import get_logger from src.heart_flow.observation.chatting_observation import ChattingObservation from src.heart_flow.observation.hfcloop_observation import HFCloopObservation from src.heart_flow.info.cycle_info import CycleInfo +from datetime import datetime from typing import Dict +from src.plugins.models.utils_model import LLMRequest +from src.config.config import global_config logger = get_logger("observation") @@ -20,6 +23,9 @@ class ChattingInfoProcessor(BaseProcessor): def __init__(self): """初始化观察处理器""" + self.llm_summary = LLMRequest( + model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" + ) super().__init__() async def process_info( @@ -48,6 +54,8 @@ class ChattingInfoProcessor(BaseProcessor): if isinstance(obs, ChattingObservation): obs_info = ObsInfo() + await self.chat_compress(obs) + # 设置说话消息 if hasattr(obs, "talking_message_str"): obs_info.set_talking_message(obs.talking_message_str) @@ -56,6 +64,9 @@ class ChattingInfoProcessor(BaseProcessor): if hasattr(obs, "talking_message_str_truncate"): obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate) + if hasattr(obs, "mid_memory_info"): + obs_info.set_previous_chat_info(obs.mid_memory_info) + # 设置聊天类型 is_group_chat = obs.is_group_chat if is_group_chat: @@ -65,7 +76,7 @@ class ChattingInfoProcessor(BaseProcessor): obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人")) obs_info.set_chat_type(chat_type) - logger.debug(f"聊天信息处理器处理后的信息: {obs_info}") + # logger.debug(f"聊天信息处理器处理后的信息: {obs_info}") processed_infos.append(obs_info) if isinstance(obs, HFCloopObservation): @@ -74,3 +85,39 @@ class ChattingInfoProcessor(BaseProcessor): processed_infos.append(obs_info) return processed_infos + + async def chat_compress(self, obs: ChattingObservation): + if obs.compressor_prompt: + try: + summary_result, _, _ = await self.llm_summary.generate_response(obs.compressor_prompt) + summary = "没有主题的闲聊" # 默认值 + if summary_result: # 确保结果不为空 + summary = summary_result + except Exception as e: + logger.error(f"总结主题失败 for chat {obs.chat_id}: {e}") + + mid_memory = { + "id": str(int(datetime.now().timestamp())), + "theme": summary, + "messages": obs.oldest_messages, # 存储原始消息对象 + "readable_messages": obs.oldest_messages_str, + # "timestamps": oldest_timestamps, + "chat_id": obs.chat_id, + "created_at": datetime.now().timestamp(), + } + + obs.mid_memorys.append(mid_memory) + if len(obs.mid_memorys) > obs.max_mid_memory_len: + obs.mid_memorys.pop(0) # 移除最旧的 + + mid_memory_str = "之前聊天的内容概述是:\n" + for mid_memory_item in obs.mid_memorys: # 重命名循环变量以示区分 + time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60) + mid_memory_str += ( + f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n" + ) + obs.mid_memory_info = mid_memory_str + + obs.compressor_prompt = "" + obs.oldest_messages = [] + obs.oldest_messages_str = "" diff --git a/src/plugins/heartFC_chat/info_processors/mind_processor.py b/src/plugins/heartFC_chat/info_processors/mind_processor.py index 85bd42068..26a2cdab4 100644 --- a/src/plugins/heartFC_chat/info_processors/mind_processor.py +++ b/src/plugins/heartFC_chat/info_processors/mind_processor.py @@ -32,20 +32,20 @@ def init_prompt(): {memory_str} {extra_info} {relation_prompt} -你的名字是{bot_name},{prompt_personality},你现在{mood_info} +你的名字是{bot_name} +{mood_info} {cycle_info_block} 现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容: {chat_observe_info} -以下是你之前对这个群聊的陈述: +以下是你之前对聊天的观察和规划,你的名字是{bot_name}: {last_mind} -现在请你继续输出思考内容,输出要求: -1. 根据聊天内容生成你的想法,{hf_do_next} -2. 参考之前的思考,基于之前的内容对这个群聊继续陈述,可以删除不重要的内容,添加新的内容 -3. 思考群内进行的话题,话题由谁发起,进展状况如何,你如何参与?思考你在群聊天中的角色,你是一个什么样的人,你在这个群聊中扮演什么角色? -4. 注意群聊的时间线索,思考聊天的时间线。 -5. 请结合你做出的行为,对前面的陈述进行补充 +现在请你继续输出观察和规划,输出要求: +1. 先关注未读新消息的内容和近期回复历史 +2. 根据新信息,修改和删除之前的观察和规划 +3. 根据聊天内容继续输出观察和规划,{hf_do_next} +4. 注意群聊的时间线索,话题由谁发起,进展状况如何,思考聊天的时间线。 6. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出思考内容就好""" Prompt(group_prompt, "sub_heartflow_prompt_before") @@ -58,10 +58,8 @@ def init_prompt(): {cycle_info_block} 现在是{time_now},你正在上网,和 {chat_target_name} 私聊,以下是你们的聊天内容: {chat_observe_info} - -以下是你之前在这个群聊中的思考: +以下是你之前对聊天的观察和规划: {last_mind} - 请仔细阅读聊天内容,想想你和 {chat_target_name} 的关系,回顾你们刚刚的交流,你刚刚发言和对方的反应,思考聊天的主题。 请思考你要不要回复以及如何回复对方。 思考并输出你的内心想法 @@ -222,7 +220,7 @@ class MindProcessor(BaseProcessor): prompt = (await global_prompt_manager.get_prompt_async(template_name)).format( memory_str=memory_str, extra_info=self.structured_info_str, - prompt_personality=prompt_personality, + # prompt_personality=prompt_personality, relation_prompt=relation_prompt, bot_name=individuality.name, time_now=time_now, diff --git a/src/plugins/heartFC_chat/info_processors/tool_processor.py b/src/plugins/heartFC_chat/info_processors/tool_processor.py index 78e375285..a51e04801 100644 --- a/src/plugins/heartFC_chat/info_processors/tool_processor.py +++ b/src/plugins/heartFC_chat/info_processors/tool_processor.py @@ -28,14 +28,8 @@ def init_prompt(): {prompt_personality} 你当前的额外信息: -{extra_info} - {memory_str} -你的心情是:{mood_info} - -{relation_prompt} - 群里正在进行的聊天内容: {chat_observe_info} @@ -46,7 +40,6 @@ def init_prompt(): 4. 考虑用户与你的关系以及当前的对话氛围 如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。 -尽量只在确实必要时才使用工具。 """ Prompt(tool_executor_prompt, "tool_executor_prompt") @@ -57,7 +50,7 @@ class ToolProcessor(BaseProcessor): self.subheartflow_id = subheartflow_id self.log_prefix = f"[{subheartflow_id}:ToolExecutor] " self.llm_model = LLMRequest( - model=global_config.llm_normal, + model=global_config.llm_tool_use, max_tokens=500, request_type="tool_execution", ) @@ -141,9 +134,6 @@ class ToolProcessor(BaseProcessor): individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(x_person=2, level=2) - # 获取心情信息 - mood_info = observation.chat_state.mood if hasattr(observation, "chat_state") else "" - # 获取时间信息 time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) @@ -151,13 +141,13 @@ class ToolProcessor(BaseProcessor): prompt = await global_prompt_manager.format_prompt( "tool_executor_prompt", memory_str=memory_str, - extra_info="extra_structured_info", + # extra_info="extra_structured_info", chat_observe_info=chat_observe_info, # chat_target_name=chat_target_name, is_group_chat=is_group_chat, - relation_prompt=relation_prompt, + # relation_prompt=relation_prompt, prompt_personality=prompt_personality, - mood_info=mood_info, + # mood_info=mood_info, bot_name=individuality.name, time_now=time_now, ) diff --git a/src/plugins/heartFC_chat/memory_activator.py b/src/plugins/heartFC_chat/memory_activator.py index 67fac8ab6..dea532145 100644 --- a/src/plugins/heartFC_chat/memory_activator.py +++ b/src/plugins/heartFC_chat/memory_activator.py @@ -4,8 +4,7 @@ from src.heart_flow.observation.hfcloop_observation import HFCloopObservation from src.plugins.models.utils_model import LLMRequest from src.config.config import global_config from src.common.logger_manager import get_logger -from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager -from src.plugins.heartFC_chat.hfc_utils import get_keywords_from_json +from src.plugins.utils.prompt_builder import Prompt from datetime import datetime from src.plugins.memory_system.Hippocampus import HippocampusManager from typing import List, Dict @@ -35,7 +34,7 @@ def init_prompt(): class MemoryActivator: def __init__(self): self.summary_model = LLMRequest( - model=global_config.llm_summary, temperature=0.7, max_tokens=300, request_type="chat_observation" + model=global_config.llm_summary, temperature=0.7, max_tokens=50, request_type="chat_observation" ) self.running_memory = [] @@ -60,24 +59,27 @@ class MemoryActivator: elif isinstance(observation, HFCloopObservation): obs_info_text += observation.get_observe_info() - prompt = await global_prompt_manager.format_prompt( - "memory_activator_prompt", - obs_info_text=obs_info_text, - ) + # prompt = await global_prompt_manager.format_prompt( + # "memory_activator_prompt", + # obs_info_text=obs_info_text, + # ) - logger.debug(f"prompt: {prompt}") + # logger.debug(f"prompt: {prompt}") - response = await self.summary_model.generate_response(prompt) + # response = await self.summary_model.generate_response(prompt) - logger.debug(f"response: {response}") + # logger.debug(f"response: {response}") - # 只取response的第一个元素(字符串) - response_str = response[0] - keywords = list(get_keywords_from_json(response_str)) + # # 只取response的第一个元素(字符串) + # response_str = response[0] + # keywords = list(get_keywords_from_json(response_str)) - # 调用记忆系统获取相关记忆 - related_memory = await HippocampusManager.get_instance().get_memory_from_topic( - valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 + # #调用记忆系统获取相关记忆 + # related_memory = await HippocampusManager.get_instance().get_memory_from_topic( + # valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 + # ) + related_memory = await HippocampusManager.get_instance().get_memory_from_text( + text=obs_info_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=True ) logger.debug(f"获取到的记忆: {related_memory}") @@ -85,7 +87,7 @@ class MemoryActivator: # 激活时,所有已有记忆的duration+1,达到3则移除 for m in self.running_memory[:]: m["duration"] = m.get("duration", 1) + 1 - self.running_memory = [m for m in self.running_memory if m["duration"] < 4] + self.running_memory = [m for m in self.running_memory if m["duration"] < 3] if related_memory: for topic, memory in related_memory: diff --git a/src/plugins/utils/chat_message_builder.py b/src/plugins/utils/chat_message_builder.py index f30403e39..773d8f336 100644 --- a/src/plugins/utils/chat_message_builder.py +++ b/src/plugins/utils/chat_message_builder.py @@ -7,6 +7,7 @@ from src.config.config import global_config # import traceback from typing import List, Dict, Any, Tuple # 确保类型提示被导入 import time # 导入 time 模块以获取当前时间 +import random # 导入新的 repository 函数 from src.common.message_repository import find_messages, count_messages @@ -69,6 +70,23 @@ def get_raw_msg_by_timestamp_with_chat_users( return find_messages(message_filter=filter_query, sort=sort_order, limit=limit, limit_mode=limit_mode) +def get_raw_msg_by_timestamp_random( + timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest" +) -> List[Dict[str, Any]]: + """ + 先在范围时间戳内随机选择一条消息,取得消息的chat_id,然后根据chat_id获取该聊天在指定时间戳范围内的消息 + """ + # 获取所有消息,只取chat_id字段 + all_msgs = get_raw_msg_by_timestamp(timestamp_start, timestamp_end) + if not all_msgs: + return [] + # 随机选一条 + msg = random.choice(all_msgs) + chat_id = msg["chat_id"] + # 用 chat_id 获取该聊天在指定时间戳范围内的消息 + return get_raw_msg_by_timestamp_with_chat(chat_id, timestamp_start, timestamp_end, limit, limit_mode) + + def get_raw_msg_by_timestamp_with_users( timestamp_start: float, timestamp_end: float, person_ids: list, limit: int = 0, limit_mode: str = "latest" ) -> List[Dict[str, Any]]: