From 0f5fdc2ae5b071df3d29d8dfd043fc0472bcd1f9 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sat, 21 Jun 2025 15:46:53 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E5=90=88=E5=B9=B6=E8=87=AA?= =?UTF-8?q?=E6=88=91=E5=A4=84=E7=90=86=E5=99=A8=E5=92=8C=E5=85=B3=E7=B3=BB?= =?UTF-8?q?=E5=A4=84=E7=90=86=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/log_viewer_optimized.py | 189 ++++++-- src/chat/focus_chat/heartFC_chat.py | 3 - .../info_processors/relationship_processor.py | 102 +++-- .../info_processors/self_processor.py | 184 -------- .../focus_chat/planners/planner_simple.py | 28 +- src/config/official_configs.py | 6 - src/individuality/individuality.py | 403 +++++------------- src/person_info/person_info.py | 1 + src/plugins/built_in/core_actions/plugin.py | 6 +- template/bot_config_template.toml | 3 +- 10 files changed, 327 insertions(+), 598 deletions(-) delete mode 100644 src/chat/focus_chat/info_processors/self_processor.py diff --git a/scripts/log_viewer_optimized.py b/scripts/log_viewer_optimized.py index c970022c4..fbf698e87 100644 --- a/scripts/log_viewer_optimized.py +++ b/scripts/log_viewer_optimized.py @@ -7,6 +7,7 @@ import toml from datetime import datetime from collections import defaultdict import os +import time class LogIndex: @@ -334,40 +335,33 @@ class VirtualLogDisplay: def display_batch(self, start_index, end_index): """批量显示日志条目""" - batch_text = [] - batch_tags = [] - for i in range(start_index, end_index): log_entry = self.log_index.get_entry_at_filtered_position(i) if log_entry: - parts, tags = self.formatter.format_log_entry(log_entry) + self.append_entry(log_entry, scroll=False) - # 合并部分为单行文本 - line_text = " ".join(parts) + "\n" - batch_text.append(line_text) + def append_entry(self, log_entry, scroll=True): + """将单个日志条目附加到文本小部件""" + # 检查在添加新内容之前视图是否已滚动到底部 + should_scroll = scroll and self.text_widget.yview()[1] > 0.99 - # 记录标签信息(简化处理) - if tags and self.formatter.enable_level_colors: - level = log_entry.get("level", "info") - batch_tags.append( - ( - "line", - len("".join(batch_text)) - len(line_text), - len("".join(batch_text)) - 1, - f"level_{level}", - ) - ) + parts, tags = self.formatter.format_log_entry(log_entry) + line_text = " ".join(parts) + "\n" - # 一次性插入所有文本 - if batch_text: - start_pos = self.text_widget.index(tk.END) - all_text = "".join(batch_text) - self.text_widget.insert(tk.END, all_text) + # 获取插入前的末尾位置 + start_pos = self.text_widget.index(tk.END + "-1c") + self.text_widget.insert(tk.END, line_text) - # 应用标签(可选,为了性能可以考虑简化) - for tag_info in batch_tags: - tag_name = tag_info[3] - self.text_widget.tag_add(tag_name, f"{start_pos}+{tag_info[1]}c", f"{start_pos}+{tag_info[2]}c") + # 为每个部分应用正确的标签 + current_len = 0 + for part, tag_name in zip(parts, tags): + start_index = f"{start_pos}+{current_len}c" + end_index = f"{start_pos}+{current_len + len(part)}c" + self.text_widget.tag_add(tag_name, start_index, end_index) + current_len += len(part) + 1 # 计入空格 + + if should_scroll: + self.text_widget.see(tk.END) class AsyncLogLoader: @@ -459,6 +453,9 @@ class LogViewer: # 初始化日志文件路径 self.current_log_file = Path("logs/app.log.jsonl") + self.last_file_size = 0 + self.watching_thread = None + self.is_watching = tk.BooleanVar(value=True) # 初始化异步加载器 self.async_loader = AsyncLogLoader(self.on_file_loaded) @@ -548,6 +545,9 @@ class LogViewer: ttk.Button(button_frame, text="选择文件", command=self.select_log_file).pack(side=tk.LEFT, padx=2) ttk.Button(button_frame, text="刷新", command=self.refresh_log_file).pack(side=tk.LEFT, padx=2) + ttk.Checkbutton(button_frame, text="实时更新", variable=self.is_watching, command=self.toggle_watching).pack( + side=tk.LEFT, padx=2 + ) # 过滤控制框架 filter_frame = ttk.Frame(self.control_frame) @@ -583,16 +583,22 @@ class LogViewer: return self.log_index = log_index + try: + self.last_file_size = os.path.getsize(self.current_log_file) + except OSError: + self.last_file_size = 0 self.status_var.set(f"已加载 {log_index.total_entries} 条日志") # 更新模块列表 - self.modules = set(log_index.module_index.keys()) - module_values = ["全部"] + sorted(list(self.modules)) - self.module_combo["values"] = module_values + self.update_module_list() # 应用过滤并显示 self.filter_logs() + # 如果开启了实时更新,则开始监视 + if self.is_watching.get(): + self.start_watching() + def on_loading_progress(self, progress, line_count): """加载进度回调""" self.root.after(0, lambda: self.update_progress(progress, line_count)) @@ -604,6 +610,8 @@ class LogViewer: def load_log_file_async(self): """异步加载日志文件""" + self.stop_watching() # 停止任何正在运行的监视器 + if not self.current_log_file.exists(): self.status_var.set("文件不存在") return @@ -617,6 +625,7 @@ class LogViewer: self.log_index = LogIndex() self.modules.clear() self.selected_modules.clear() + self.module_var.set("全部") # 开始异步加载 self.async_loader.load_file_async(str(self.current_log_file), self.on_loading_progress) @@ -672,6 +681,126 @@ class LogViewer: """刷新日志文件""" self.load_log_file_async() + def toggle_watching(self): + """切换实时更新状态""" + if self.is_watching.get(): + self.start_watching() + else: + self.stop_watching() + + def start_watching(self): + """开始监视文件变化""" + if self.watching_thread and self.watching_thread.is_alive(): + return # 已经在监视 + + if not self.current_log_file.exists(): + self.is_watching.set(False) + messagebox.showwarning("警告", "日志文件不存在,无法开启实时更新。") + return + + self.watching_thread = threading.Thread(target=self.watch_file_loop, daemon=True) + self.watching_thread.start() + + def stop_watching(self): + """停止监视文件变化""" + self.is_watching.set(False) + # 线程通过检查 is_watching 变量来停止,这里不需要强制干预 + self.watching_thread = None + + def watch_file_loop(self): + """监视文件循环""" + while self.is_watching.get(): + try: + if not self.current_log_file.exists(): + self.root.after( + 0, + lambda: messagebox.showwarning("警告", "日志文件丢失,已停止实时更新。"), + ) + self.root.after(0, self.is_watching.set, False) + break + + current_size = os.path.getsize(self.current_log_file) + if current_size > self.last_file_size: + new_entries = self.read_new_logs(self.last_file_size) + self.last_file_size = current_size + if new_entries: + self.root.after(0, self.append_new_logs, new_entries) + elif current_size < self.last_file_size: + # 文件被截断或替换 + self.last_file_size = 0 + self.root.after(0, self.refresh_log_file) + break # 刷新会重新启动监视(如果需要),所以结束当前循环 + + except Exception as e: + print(f"监视日志文件时出错: {e}") + self.root.after(0, self.is_watching.set, False) + break + + time.sleep(1) + + self.watching_thread = None + + def read_new_logs(self, from_position): + """读取新的日志条目并返回它们""" + new_entries = [] + new_modules_found = False + with open(self.current_log_file, "r", encoding="utf-8") as f: + f.seek(from_position) + line_count = self.log_index.total_entries + for line in f: + if line.strip(): + try: + log_entry = json.loads(line) + self.log_index.add_entry(line_count, log_entry) + new_entries.append(log_entry) + + logger_name = log_entry.get("logger_name", "") + if logger_name and logger_name not in self.modules: + self.modules.add(logger_name) + new_modules_found = True + + line_count += 1 + except json.JSONDecodeError: + continue + if new_modules_found: + self.root.after(0, self.update_module_list) + return new_entries + + def append_new_logs(self, new_entries): + """将新日志附加到显示中""" + # 检查是否应附加或执行完全刷新(例如,如果过滤器处于活动状态) + selected_modules = ( + self.selected_modules if (self.selected_modules and "全部" not in self.selected_modules) else None + ) + level = self.level_var.get() if self.level_var.get() != "全部" else None + search_text = self.search_var.get().strip() if self.search_var.get().strip() else None + + is_filtered = selected_modules or level or search_text + + if is_filtered: + # 如果过滤器处于活动状态,我们必须执行完全刷新以应用它们 + self.filter_logs() + return + + # 如果没有过滤器,只需附加新日志 + for entry in new_entries: + self.log_display.append_entry(entry) + + # 更新状态 + total_count = self.log_index.total_entries + self.status_var.set(f"显示 {total_count} 条日志") + + def update_module_list(self): + """更新模块下拉列表""" + current_selection = self.module_var.get() + self.modules = set(self.log_index.module_index.keys()) + module_values = ["全部"] + sorted(list(self.modules)) + self.module_combo["values"] = module_values + if current_selection in module_values: + self.module_var.set(current_selection) + else: + self.module_var.set("全部") + def main(): root = tk.Tk() diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index f8cd5dfe3..88a87565c 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -23,7 +23,6 @@ from src.chat.heart_flow.observation.actions_observation import ActionObservatio from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.info_processors.base_processor import BaseProcessor -from src.chat.focus_chat.info_processors.self_processor import SelfProcessor from src.chat.focus_chat.info_processors.expression_selector_processor import ExpressionSelectorProcessor from src.chat.focus_chat.planners.planner_factory import PlannerFactory from src.chat.focus_chat.planners.modify_actions import ActionModifier @@ -45,7 +44,6 @@ PROCESSOR_CLASSES = { "ChattingInfoProcessor": (ChattingInfoProcessor, None), "ToolProcessor": (ToolProcessor, "tool_use_processor"), "WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"), - "SelfProcessor": (SelfProcessor, "self_identify_processor"), "RelationshipProcessor": (RelationshipProcessor, "relation_processor"), "ExpressionSelectorProcessor": (ExpressionSelectorProcessor, "expression_selector_processor"), } @@ -184,7 +182,6 @@ class HeartFChatting: if name in [ "ToolProcessor", "WorkingMemoryProcessor", - "SelfProcessor", "RelationshipProcessor", "ExpressionSelectorProcessor", ]: diff --git a/src/chat/focus_chat/info_processors/relationship_processor.py b/src/chat/focus_chat/info_processors/relationship_processor.py index 4e20782ca..3fa662ee0 100644 --- a/src/chat/focus_chat/info_processors/relationship_processor.py +++ b/src/chat/focus_chat/info_processors/relationship_processor.py @@ -13,6 +13,7 @@ from typing import List from typing import Dict from src.chat.focus_chat.info.info_base import InfoBase from src.chat.focus_chat.info.relation_info import RelationInfo +from src.person_info.person_info import PersonInfoManager from json_repair import repair_json from src.person_info.person_info import get_person_info_manager import json @@ -48,10 +49,11 @@ def init_prompt(): 请不要重复调取相同的信息 {name_block} -请你阅读聊天记录,查看是否需要调取某个人的信息,这个人可以是出现在聊天记录中的,也可以是记录中提到的人。 +请你阅读聊天记录,查看是否需要调取某个人的信息,这个人可以是出现在聊天记录中的,也可以是记录中提到的人,也可以是你自己({bot_name})。 你不同程度上认识群聊里的人,以及他们谈论到的人,你可以根据聊天记录,回忆起有关他们的信息,帮助你参与聊天 1.你需要提供用户名和你想要提取的信息名称类型来进行调取 2.请注意,提取的信息类型一定要和用户有关,不要提取无关的信息 +3.你也可以调取有关自己({bot_name})的信息 请以json格式输出,例如: @@ -59,7 +61,7 @@ def init_prompt(): "用户A": "ta的昵称", "用户B": "ta对你的态度", "用户D": "你对ta的印象", - "person_name": "其他信息", + "{bot_name}": "身份", "person_name": "其他信息", }} @@ -81,6 +83,18 @@ def init_prompt(): """ Prompt(fetch_info_prompt, "fetch_person_info_prompt") + fetch_bot_info_prompt = """ +你是{nickname},你的昵称有{alias_names}。 +以下是你对自己的了解,请你从中提取和"{info_type}"有关的信息,如果无法提取,请输出none: +{person_impression_block} +{points_text_block} +请严格按照以下json输出格式,不要输出多余内容: +{{ + "{info_type}": "有关你自己的{info_type}的信息内容" +}} +""" + Prompt(fetch_bot_info_prompt, "fetch_bot_info_prompt") + class RelationshipProcessor(BaseProcessor): log_prefix = "关系" @@ -549,6 +563,7 @@ class RelationshipProcessor(BaseProcessor): prompt = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format( name_block=name_block, + bot_name=global_config.bot.nickname, time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), chat_observe_info=chat_observe_info, info_cache_block=info_cache_block, @@ -567,17 +582,17 @@ class RelationshipProcessor(BaseProcessor): person_info_manager = get_person_info_manager() for person_name, info_type in content_json.items(): - person_id = person_info_manager.get_person_id_by_person_name(person_name) + is_bot = person_name == global_config.bot.nickname or person_name in global_config.bot.alias_names + if is_bot: + person_id = person_info_manager.get_person_id("system", "bot_id") + logger.info(f"{self.log_prefix} 检测到对bot自身({person_name})的信息查询,使用特殊ID。") + else: + person_id = person_info_manager.get_person_id_by_person_name(person_name) + if not person_id: logger.warning(f"{self.log_prefix} 未找到用户 {person_name} 的ID,跳过调取信息。") continue - # 检查是否是bot自己,如果是则跳过 - user_id = person_info_manager.get_value_sync(person_id, "user_id") - if user_id == global_config.bot.qq_account: - logger.info(f"{self.log_prefix} 跳过调取bot自己({person_name})的信息。") - continue - self.info_fetching_cache.append( { "person_id": person_id, @@ -747,42 +762,37 @@ class RelationshipProcessor(BaseProcessor): # 首先检查 info_list 缓存 info_list = await person_info_manager.get_value(person_id, "info_list") or [] cached_info = None + person_name = await person_info_manager.get_value(person_id, "person_name") - print(f"info_list: {info_list}") + # print(f"info_list: {info_list}") # 查找对应的 info_type for info_item in info_list: if info_item.get("info_type") == info_type: cached_info = info_item.get("info_content") - logger.info(f"{self.log_prefix} [缓存命中] 从 info_list 中找到 {info_type} 信息: {cached_info}") + logger.debug(f"{self.log_prefix} 在info_list中找到 {person_name} 的 {info_type} 信息: {cached_info}") break # 如果缓存中有信息,直接使用 if cached_info: - person_name = await person_info_manager.get_value(person_id, "person_name") if person_id not in self.info_fetched_cache: self.info_fetched_cache[person_id] = {} - if cached_info == "none": - unknow = True - else: - unknow = False - self.info_fetched_cache[person_id][info_type] = { "info": cached_info, - "ttl": 8, + "ttl": 4, "start_time": start_time, "person_name": person_name, - "unknow": unknow, + "unknow": cached_info == "none", } - logger.info(f"{self.log_prefix} [缓存使用] 直接使用缓存的 {person_name} 的 {info_type}: {cached_info}") + logger.info(f"{self.log_prefix} 记得 {person_name} 的 {info_type}: {cached_info}") return - logger.info(f"{self.log_prefix} [缓存命中] 缓存中没有信息") + + bot_person_id = PersonInfoManager.get_person_id("system", "bot_id") + is_bot = person_id == bot_person_id try: - nickname_str = ",".join(global_config.bot.alias_names) - name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" person_name = await person_info_manager.get_value(person_id, "person_name") person_impression = await person_info_manager.get_value(person_id, "impression") if person_impression: @@ -804,31 +814,43 @@ class RelationshipProcessor(BaseProcessor): self.info_fetched_cache[person_id] = {} self.info_fetched_cache[person_id][info_type] = { "info": "none", - "ttl": 8, + "ttl": 4, "start_time": start_time, "person_name": person_name, "unknow": True, } + logger.info(f"{self.log_prefix} 完全不认识 {person_name}") + await self._save_info_to_cache(person_id, info_type, "none") return - prompt = (await global_prompt_manager.get_prompt_async("fetch_person_info_prompt")).format( - name_block=name_block, - info_type=info_type, - person_impression_block=person_impression_block, - person_name=person_name, - info_json_str=f'"{info_type}": "有关{info_type}的信息内容"', - points_text_block=points_text_block, - ) + if is_bot: + prompt = (await global_prompt_manager.get_prompt_async("fetch_bot_info_prompt")).format( + nickname=global_config.bot.nickname, + alias_names=",".join(global_config.bot.alias_names), + info_type=info_type, + person_impression_block=person_impression_block, + points_text_block=points_text_block, + ) + else: + nickname_str = ",".join(global_config.bot.alias_names) + name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" + prompt = (await global_prompt_manager.get_prompt_async("fetch_person_info_prompt")).format( + name_block=name_block, + info_type=info_type, + person_impression_block=person_impression_block, + person_name=person_name, + info_json_str=f'"{info_type}": "有关{info_type}的信息内容"', + points_text_block=points_text_block, + ) except Exception: logger.error(traceback.format_exc()) - - print(prompt) + return try: # 使用小模型进行即时提取 content, _ = await self.instant_llm_model.generate_response_async(prompt=prompt) - logger.info(f"{self.log_prefix} [LLM提取] {person_name} 的 {info_type} 结果: {content}") + if content: content_json = json.loads(repair_json(content)) @@ -851,17 +873,15 @@ class RelationshipProcessor(BaseProcessor): await self._save_info_to_cache(person_id, info_type, info_content if not is_unknown else "none") if not is_unknown: - logger.info( - f"{self.log_prefix} [LLM提取] 成功获取并缓存 {person_name} 的 {info_type}: {info_content}" - ) + logger.info(f"{self.log_prefix} 思考得到,{person_name} 的 {info_type}: {content}") else: - logger.info(f"{self.log_prefix} [LLM提取] {person_name} 的 {info_type} 信息不明确") + logger.info(f"{self.log_prefix} 思考了也不知道{person_name} 的 {info_type} 信息") else: logger.warning( - f"{self.log_prefix} [LLM提取] 小模型返回空结果,获取 {person_name} 的 {info_type} 信息失败。" + f"{self.log_prefix} 小模型返回空结果,获取 {person_name} 的 {info_type} 信息失败。" ) except Exception as e: - logger.error(f"{self.log_prefix} [LLM提取] 执行小模型请求获取用户信息时出错: {e}") + logger.error(f"{self.log_prefix} 执行小模型请求获取用户信息时出错: {e}") logger.error(traceback.format_exc()) async def _save_info_to_cache(self, person_id: str, info_type: str, info_content: str): diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py deleted file mode 100644 index 648e7dadb..000000000 --- a/src/chat/focus_chat/info_processors/self_processor.py +++ /dev/null @@ -1,184 +0,0 @@ -from src.chat.heart_flow.observation.chatting_observation import ChattingObservation -from src.chat.heart_flow.observation.observation import Observation -from src.llm_models.utils_model import LLMRequest -from src.config.config import global_config -import time -import traceback -from src.common.logger import get_logger -from src.chat.utils.prompt_builder import Prompt, global_prompt_manager -from src.chat.message_receive.chat_stream import get_chat_manager -from .base_processor import BaseProcessor -from typing import List, Dict -from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation -from src.chat.focus_chat.info.info_base import InfoBase -from src.chat.focus_chat.info.self_info import SelfInfo -from src.individuality.individuality import get_individuality - -logger = get_logger("processor") - - -def init_prompt(): - indentify_prompt = """ -{time_now},以下是正在进行的聊天内容: -<聊天记录> -{chat_observe_info} - - -{name_block} -请你根据以上聊天记录,思考聊天记录中是否有人提到你自己相关的信息,或者有人询问你的相关信息。 - -请选择你需要查询的关键词来回答聊天中的问题。如果需要多个关键词,请用逗号隔开。 -如果聊天中没有涉及任何关于你的问题,请输出none。 - -现在请输出你要查询的关键词,注意只输出关键词就好,不要输出其他内容: -""" - Prompt(indentify_prompt, "indentify_prompt") - - -class SelfProcessor(BaseProcessor): - log_prefix = "自我认同" - - def __init__(self, subheartflow_id: str): - super().__init__() - - self.subheartflow_id = subheartflow_id - - self.info_fetched_cache: Dict[str, Dict[str, any]] = {} - - self.llm_model = LLMRequest( - model=global_config.model.utils_small, - request_type="focus.processor.self_identify", - ) - - name = get_chat_manager().get_stream_name(self.subheartflow_id) - self.log_prefix = f"[{name}] " - - async def process_info(self, observations: List[Observation] = None, *infos) -> List[InfoBase]: - """处理信息对象 - - Args: - *infos: 可变数量的InfoBase类型的信息对象 - - Returns: - List[InfoBase]: 处理后的结构化信息列表 - """ - self_info_str = await self.self_indentify(observations) - - if self_info_str: - self_info = SelfInfo() - self_info.set_self_info(self_info_str) - else: - self_info = None - return None - - return [self_info] - - async def self_indentify( - self, - observations: List[Observation] = None, - ): - """ - 在回复前进行思考,生成内心想法并收集工具调用结果 - - 参数: - observations: 观察信息 - - 返回: - 如果return_prompt为False: - tuple: (current_mind, past_mind) 当前想法和过去的想法列表 - 如果return_prompt为True: - tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt - """ - - if observations is None: - observations = [] - for observation in observations: - if isinstance(observation, ChattingObservation): - # 获取聊天元信息 - is_group_chat = observation.is_group_chat - chat_target_info = observation.chat_target_info - chat_target_name = "对方" # 私聊默认名称 - if not is_group_chat and chat_target_info: - # 优先使用person_name,其次user_nickname,最后回退到默认值 - chat_target_name = ( - chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name - ) - # 获取聊天内容 - chat_observe_info = observation.get_observe_info() - if isinstance(observation, HFCloopObservation): - pass - - nickname_str = "" - for nicknames in global_config.bot.alias_names: - nickname_str += f"{nicknames}," - name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" - - # 获取所有可用的关键词 - individuality = get_individuality() - available_keywords = individuality.get_all_keywords() - available_keywords_str = "、".join(available_keywords) if available_keywords else "暂无关键词" - - prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format( - name_block=name_block, - time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), - chat_observe_info=chat_observe_info[-200:], - available_keywords=available_keywords_str, - bot_name=global_config.bot.nickname, - ) - - keyword = "" - - try: - keyword, _ = await self.llm_model.generate_response_async(prompt=prompt) - - # print(f"prompt: {prompt}\nkeyword: {keyword}") - - if not keyword: - logger.warning(f"{self.log_prefix} LLM返回空结果,自我识别失败。") - except Exception as e: - # 处理总体异常 - logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}") - logger.error(traceback.format_exc()) - keyword = "我是谁,我从哪来,要到哪去" - - # 解析关键词 - keyword = keyword.strip() - if not keyword or keyword == "none": - keyword_set = [] - else: - # 只保留非空关键词,去除多余空格 - keyword_set = [k.strip() for k in keyword.split(",") if k.strip()] - - # 从individuality缓存中查询关键词信息 - for keyword in keyword_set: - if keyword not in self.info_fetched_cache: - # 直接从individuality的json缓存中获取关键词信息 - fetched_info = individuality.get_keyword_info(keyword) - - if fetched_info: - self.info_fetched_cache[keyword] = { - "info": fetched_info, - "ttl": 5, - } - logger.info(f"{self.log_prefix} 从个体特征缓存中获取关键词 '{keyword}' 的信息") - - # 管理TTL(生存时间) - expired_keywords = [] - for fetched_keyword, info in self.info_fetched_cache.items(): - if info["ttl"] > 0: - info["ttl"] -= 1 - else: - expired_keywords.append(fetched_keyword) - - # 删除过期的关键词 - for expired_keyword in expired_keywords: - del self.info_fetched_cache[expired_keyword] - - fetched_info_str = "" - for keyword, info in self.info_fetched_cache.items(): - fetched_info_str += f"你的:{keyword}信息是: {info['info']}\n" - - return fetched_info_str - - -init_prompt() diff --git a/src/chat/focus_chat/planners/planner_simple.py b/src/chat/focus_chat/planners/planner_simple.py index d816fec49..ebbde9488 100644 --- a/src/chat/focus_chat/planners/planner_simple.py +++ b/src/chat/focus_chat/planners/planner_simple.py @@ -25,34 +25,10 @@ install(extra_lines=3) def init_prompt(): - # Prompt( - # """ - # {time_block} - # {indentify_block}你现在正在参与以下的聊天,以下是具体的聊天内容: - - # {chat_content_block} - - # {self_info_block} - # {relation_info_block} - - # {cycle_info_block} - - # {moderation_prompt} - # 注意,除了下面动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界,现在请你选择合适的action: - - # {action_options_text} - - # 请以动作的输出要求,以严格的 JSON 格式输出,且仅包含 JSON 内容。 - # 请输出你提取的JSON,不要有任何其他文字或解释: - - # """, - # "simple_planner_prompt", - # ) - Prompt( """ {time_block} -{indentify_block}你现在正在B站进行直播,你是一个虚拟主播,以下是直播间内容和电脑屏幕内容: +{indentify_block}你现在正在参与以下的聊天,以下是具体的聊天内容: {chat_content_block} @@ -62,7 +38,7 @@ def init_prompt(): {cycle_info_block} {moderation_prompt} -注意,除了下面动作选项之外,你在直播间里不能做其他任何事情,这是你能力的边界,现在请你选择合适的action: +注意,除了下面动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界,现在请你选择合适的action: {action_options_text} diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 3d751bf37..0a1c63c9d 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -167,12 +167,6 @@ class FocusChatConfig(ConfigBase): class FocusChatProcessorConfig(ConfigBase): """专注聊天处理器配置类""" - mind_processor: bool = False - """是否启用思维处理器""" - - self_identify_processor: bool = True - """是否启用自我识别处理器""" - relation_processor: bool = True """是否启用关系识别处理器""" diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py index 92ab2c7d8..beb7422cb 100644 --- a/src/individuality/individuality.py +++ b/src/individuality/individuality.py @@ -14,37 +14,13 @@ from src.manager.async_task_manager import AsyncTask from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.common.logger import get_logger +from src.person_info.person_info import get_person_info_manager install(extra_lines=3) logger = get_logger("individuality") -def init_prompt(): - """初始化用于关键词提取的prompts""" - - extract_keywords_prompt = """ -请分析以下对{bot_name}的描述,提取出其中的独立关键词。每个关键词应该是可以用来从某一角度概括的方面,例如: -性格,身高,喜好,外貌,身份,兴趣,爱好,习惯,等等.......... - -描述内容: -{personality_sides} - -要求: -1. 选择关键词,对{bot_name}的某一方面进行概括 -2. 用json格式输出,以下是示例格式: -{{ - "性格":"性格开朗", - "兴趣":"喜欢唱歌", - "身份":"大学生", -}} -以上是一个例子,你可以输出多个关键词,现在请你根据描述内容进行总结{bot_name},输出json格式: - -请输出json格式,不要输出任何解释或其他内容 -""" - Prompt(extract_keywords_prompt, "extract_keywords_prompt") - - class Individuality: """个体特征管理类""" @@ -55,11 +31,8 @@ class Individuality: self.express_style: PersonalityExpression = PersonalityExpression() self.name = "" - - # 关键词缓存相关 - self.keyword_info_cache: dict = {} # {keyword: [info_list]} - self.fetch_info_file_path = "data/personality/fetch_info.json" - self.meta_info_file_path = "data/personality/meta_info.json" + self.bot_person_id = "" + self.meta_info_file_path = "data/personality/meta.json" async def initialize( self, @@ -76,6 +49,13 @@ class Individuality: personality_sides: 人格侧面描述 identity_detail: 身份细节描述 """ + person_info_manager = get_person_info_manager() + self.bot_person_id = person_info_manager.get_person_id("system", "bot_id") + self.name = bot_nickname + + # 检查配置变化,如果变化则清空 + await self._check_config_and_clear_if_changed(bot_nickname, personality_core, personality_sides, identity_detail) + # 初始化人格 self.personality = Personality.initialize( bot_nickname=bot_nickname, personality_core=personality_core, personality_sides=personality_sides @@ -84,13 +64,31 @@ class Individuality: # 初始化身份 self.identity = Identity(identity_detail=identity_detail) + # 将所有人设写入impression + impression_parts = [] + if personality_core: + impression_parts.append(f"核心人格: {personality_core}") + if personality_sides: + impression_parts.append(f"人格侧面: {'、'.join(personality_sides)}") + if identity_detail: + impression_parts.append(f"身份: {'、'.join(identity_detail)}") + + impression_text = "。".join(impression_parts) + if impression_text: + impression_text += "。" + + if impression_text: + update_data = { + "platform": "system", + "user_id": "bot_id", + "person_name": self.name, + "nickname": self.name, + } + await person_info_manager.update_one_field(self.bot_person_id, "impression", impression_text, data=update_data) + logger.info(f"已将完整人设更新到bot的impression中") + await self.express_style.extract_and_store_personality_expressions() - self.name = bot_nickname - - # 预处理关键词和生成信息缓存 - await self._preprocess_personality_keywords(personality_sides, identity_detail) - def to_dict(self) -> dict: """将个体特征转换为字典格式""" return { @@ -257,227 +255,64 @@ class Individuality: return self.personality.neuroticism return None - def _get_config_hash(self, personality_sides: list, identity_detail: list) -> str: + def _get_config_hash(self, bot_nickname: str, personality_core: str, personality_sides: list, identity_detail: list) -> str: """获取当前personality和identity配置的哈希值""" - # 将配置转换为字符串并排序,确保一致性 - config_str = json.dumps( - {"personality_sides": sorted(personality_sides), "identity_detail": sorted(identity_detail)}, sort_keys=True - ) - + config_data = { + "nickname": bot_nickname, + "personality_core": personality_core, + "personality_sides": sorted(personality_sides), + "identity_detail": sorted(identity_detail) + } + config_str = json.dumps(config_data, sort_keys=True) return hashlib.md5(config_str.encode("utf-8")).hexdigest() + async def _check_config_and_clear_if_changed( + self, bot_nickname: str, personality_core: str, personality_sides: list, identity_detail: list + ): + """检查配置是否发生变化,如果变化则清空info_list""" + person_info_manager = get_person_info_manager() + current_hash = self._get_config_hash(bot_nickname, personality_core, personality_sides, identity_detail) + + meta_info = self._load_meta_info() + stored_hash = meta_info.get("config_hash") + + if current_hash != stored_hash: + logger.info("检测到人格配置发生变化,将清空原有的关键词缓存。") + + # 清空数据库中的info_list + update_data = { + "platform": "system", + "user_id": "bot_id", + "person_name": self.name, + "nickname": self.name, + } + await person_info_manager.update_one_field(self.bot_person_id, "info_list", [], data=update_data) + + # 更新元信息文件,重置计数器 + new_meta_info = {"config_hash": current_hash} + self._save_meta_info(new_meta_info) + def _load_meta_info(self) -> dict: """从JSON文件中加载元信息""" if os.path.exists(self.meta_info_file_path): try: with open(self.meta_info_file_path, "r", encoding="utf-8") as f: return json.load(f) - except Exception as e: - print(f"读取meta_info文件失败: {e}") + except (json.JSONDecodeError, IOError) as e: + logger.error(f"读取meta_info文件失败: {e}, 将创建新文件。") return {} return {} def _save_meta_info(self, meta_info: dict): """将元信息保存到JSON文件""" try: - # 确保目录存在 os.makedirs(os.path.dirname(self.meta_info_file_path), exist_ok=True) with open(self.meta_info_file_path, "w", encoding="utf-8") as f: json.dump(meta_info, f, ensure_ascii=False, indent=2) - except Exception as e: - print(f"保存meta_info文件失败: {e}") + except IOError as e: + logger.error(f"保存meta_info文件失败: {e}") - def _check_config_change_and_clear(self, personality_sides: list, identity_detail: list): - """检查配置是否发生变化,如果变化则清空fetch_info.json""" - current_config_hash = self._get_config_hash(personality_sides, identity_detail) - meta_info = self._load_meta_info() - - stored_config_hash = meta_info.get("config_hash", "") - - if current_config_hash != stored_config_hash: - logger.info("检测到personality或identity配置发生变化,清空fetch_info数据") - - # 清空fetch_info文件 - if os.path.exists(self.fetch_info_file_path): - try: - os.remove(self.fetch_info_file_path) - logger.info("已清空fetch_info文件") - except Exception as e: - logger.error(f"清空fetch_info文件失败: {e}") - - # 更新元信息 - meta_info["config_hash"] = current_config_hash - self._save_meta_info(meta_info) - logger.info("已更新配置哈希值") - - def _load_fetch_info_from_file(self) -> dict: - """从JSON文件中加载已保存的fetch_info数据""" - if os.path.exists(self.fetch_info_file_path): - try: - with open(self.fetch_info_file_path, "r", encoding="utf-8") as f: - return json.load(f) - except Exception as e: - logger.error(f"读取fetch_info文件失败: {e}") - return {} - return {} - - def _save_fetch_info_to_file(self, fetch_info_data: dict): - """将fetch_info数据保存到JSON文件""" - try: - # 确保目录存在 - os.makedirs(os.path.dirname(self.fetch_info_file_path), exist_ok=True) - with open(self.fetch_info_file_path, "w", encoding="utf-8") as f: - json.dump(fetch_info_data, f, ensure_ascii=False, indent=2) - except Exception as e: - logger.error(f"保存fetch_info文件失败: {e}") - - async def _preprocess_personality_keywords(self, personality_sides: list, identity_detail: list): - """预处理personality关键词,提取关键词并生成缓存""" - try: - logger.info("开始预处理personality关键词...") - - # 检查配置变化 - self._check_config_change_and_clear(personality_sides, identity_detail) - - # 加载已有的预处理数据(如果存在) - fetch_info_data = self._load_fetch_info_from_file() - logger.info(f"加载已有数据,现有关键词数量: {len(fetch_info_data)}") - - # 构建完整描述(personality + identity) - personality_sides_str = "" - for personality_side in personality_sides: - personality_sides_str += f"{personality_side}," - - # 添加identity内容 - for detail in identity_detail: - personality_sides_str += f"{detail}," - - if not personality_sides_str: - logger.info("没有personality和identity配置,跳过预处理") - return - - # 提取关键词 - extract_prompt = (await global_prompt_manager.get_prompt_async("extract_keywords_prompt")).format( - personality_sides=personality_sides_str, bot_name=self.name - ) - - llm_model = LLMRequest( - model=global_config.model.utils_small, - request_type="individuality.keyword_extract", - ) - - keywords_result, _ = await llm_model.generate_response_async(prompt=extract_prompt) - logger.info(f"LLM返回的原始关键词结果: '{keywords_result}'") - - if not keywords_result or keywords_result.strip() == "none": - logger.info("未提取到有效关键词") - return - - # 使用json_repair修复并解析JSON - keyword_dict = json.loads(repair_json(keywords_result)) - logger.info(f"成功解析JSON格式的关键词: {keyword_dict}") - - # 从字典中提取关键词列表,跳过"keywords"键 - keyword_set = [] - for key, _value in keyword_dict.items(): - if key.lower() != "keywords" and key.strip(): - keyword_set.append(key.strip()) - - logger.info(f"最终提取的关键词列表: {keyword_set}") - logger.info(f"共提取到 {len(keyword_set)} 个关键词") - - # 处理每个关键词的信息 - updated_count = 0 - new_count = 0 - - for keyword in keyword_set: - try: - logger.info(f"正在处理关键词: '{keyword}' (长度: {len(keyword)})") - - # 检查是否已存在该关键词 - if keyword in fetch_info_data: - logger.info(f"关键词 '{keyword}' 已存在,将添加新信息...") - action_type = "追加" - else: - logger.info(f"正在为新关键词 '{keyword}' 生成信息...") - action_type = "新增" - fetch_info_data[keyword] = [] # 初始化为空列表 - - # 从JSON结果中获取关键词的信息 - existing_info_from_json = keyword_dict.get(keyword, "") - if ( - existing_info_from_json - and existing_info_from_json.strip() - and existing_info_from_json != keyword - ): - # 如果JSON中有有效信息且不只是重复关键词本身,直接使用 - logger.info(f"从JSON结果中获取到关键词 '{keyword}' 的信息: '{existing_info_from_json}'") - if existing_info_from_json not in fetch_info_data[keyword]: - fetch_info_data[keyword].append(existing_info_from_json) - if action_type == "追加": - updated_count += 1 - else: - new_count += 1 - logger.info(f"{action_type}关键词 '{keyword}' 的信息成功") - else: - logger.info(f"关键词 '{keyword}' 的信息已存在,跳过重复添加") - else: - logger.info(f"关键词 '{keyword}' 在JSON中没有有效信息,跳过") - - except Exception as e: - logger.error(f"为关键词 '{keyword}' 生成信息时出错: {e}") - continue - - # 保存合并后的数据到文件和内存缓存 - if updated_count > 0 or new_count > 0: - self._save_fetch_info_to_file(fetch_info_data) - logger.info( - f"预处理完成,新增 {new_count} 个关键词,追加 {updated_count} 个关键词信息,总计 {len(fetch_info_data)} 个关键词" - ) - else: - logger.info("预处理完成,但没有生成任何新的有效信息") - - # 将数据加载到内存缓存 - self.keyword_info_cache = fetch_info_data - logger.info(f"关键词缓存已加载,共 {len(self.keyword_info_cache)} 个关键词") - - # 注册定时任务(延迟执行,避免阻塞初始化) - import asyncio - - asyncio.create_task(self._register_keyword_update_task_delayed()) - - except Exception as e: - logger.error(f"预处理personality关键词时出错: {e}") - traceback.print_exc() - - async def _register_keyword_update_task_delayed(self): - """延迟注册关键词更新定时任务""" - try: - # 等待一小段时间确保系统完全初始化 - import asyncio - - await asyncio.sleep(5) - - from src.manager.async_task_manager import async_task_manager - - logger = get_logger("individuality") - - # 创建定时任务 - task = KeywordUpdateTask( - personality_sides=list(global_config.personality.personality_sides), - identity_detail=list(global_config.identity.identity_detail), - individuality_instance=self, - ) - - # 注册任务 - await async_task_manager.add_task(task) - logger.info("关键词更新定时任务已注册") - - except Exception as e: - logger.error(f"注册关键词更新定时任务失败: {e}") - traceback.print_exc() - - def get_keyword_info(self, keyword: str) -> str: + async def get_keyword_info(self, keyword: str) -> str: """获取指定关键词的信息 Args: @@ -486,13 +321,36 @@ class Individuality: Returns: str: 随机选择的一条信息,如果没有则返回空字符串 """ - if keyword in self.keyword_info_cache and self.keyword_info_cache[keyword]: - return random.choice(self.keyword_info_cache[keyword]) + person_info_manager = get_person_info_manager() + info_list_json = await person_info_manager.get_value(self.bot_person_id, "info_list") + if info_list_json: + try: + # get_value might return a pre-deserialized list if it comes from a cache, + # or a JSON string if it comes from DB. + info_list = json.loads(info_list_json) if isinstance(info_list_json, str) else info_list_json + + for item in info_list: + if isinstance(item, dict) and item.get("info_type") == keyword: + return item.get("info_content", "") + except (json.JSONDecodeError, TypeError): + logger.error(f"解析info_list失败: {info_list_json}") + return "" return "" - def get_all_keywords(self) -> list: + async def get_all_keywords(self) -> list: """获取所有已缓存的关键词列表""" - return list(self.keyword_info_cache.keys()) + person_info_manager = get_person_info_manager() + info_list_json = await person_info_manager.get_value(self.bot_person_id, "info_list") + keywords = [] + if info_list_json: + try: + info_list = json.loads(info_list_json) if isinstance(info_list_json, str) else info_list_json + for item in info_list: + if isinstance(item, dict) and "info_type" in item: + keywords.append(item["info_type"]) + except (json.JSONDecodeError, TypeError): + logger.error(f"解析info_list失败: {info_list_json}") + return keywords individuality = None @@ -503,66 +361,3 @@ def get_individuality(): if individuality is None: individuality = Individuality() return individuality - - -class KeywordUpdateTask(AsyncTask): - """关键词更新定时任务""" - - def __init__(self, personality_sides: list, identity_detail: list, individuality_instance): - # 调用父类构造函数 - super().__init__( - task_name="keyword_update_task", - wait_before_start=3600, # 1小时后开始 - run_interval=3600, # 每小时运行一次 - ) - - self.personality_sides = personality_sides - self.identity_detail = identity_detail - self.individuality_instance = individuality_instance - - # 任务控制参数 - self.max_runs = 20 - self.current_runs = 0 - self.original_config_hash = individuality_instance._get_config_hash(personality_sides, identity_detail) - - async def run(self): - """执行任务""" - try: - from src.common.logger import get_logger - - logger = get_logger("individuality.task") - - # 检查是否超过最大运行次数 - if self.current_runs >= self.max_runs: - logger.info(f"关键词更新任务已达到最大运行次数({self.max_runs}),停止执行") - # 设置为0间隔来停止循环任务 - self.run_interval = 0 - return - - # 检查配置是否发生变化 - current_config_hash = self.individuality_instance._get_config_hash( - self.personality_sides, self.identity_detail - ) - if current_config_hash != self.original_config_hash: - logger.info("检测到personality或identity配置发生变化,停止定时任务") - # 设置为0间隔来停止循环任务 - self.run_interval = 0 - return - - self.current_runs += 1 - logger.info(f"开始执行关键词更新任务 (第{self.current_runs}/{self.max_runs}次)") - - # 执行关键词预处理 - await self.individuality_instance._preprocess_personality_keywords( - self.personality_sides, self.identity_detail - ) - - logger.info(f"关键词更新任务完成 (第{self.current_runs}/{self.max_runs}次)") - - except Exception as e: - logger.error(f"关键词更新任务执行失败: {e}") - traceback.print_exc() - - -# 初始化prompt模板 -init_prompt() diff --git a/src/person_info/person_info.py b/src/person_info/person_info.py index 9896e2280..885e98822 100644 --- a/src/person_info/person_info.py +++ b/src/person_info/person_info.py @@ -47,6 +47,7 @@ person_info_default = { "info_list": None, "points": None, "forgotten_points": None, + "config_hash": None, } diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index e257f1d9b..8ff8c3c8d 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -128,7 +128,7 @@ class NoReplyAction(BaseAction): _waiting_stages = [10, 60, 600] # 第1、2、3次的等待时间 # 动作参数定义 - action_parameters = {} + action_parameters = {"reason": "不回复的原因"} # 动作使用场景 action_require = ["你发送了消息,目前无人回复"] @@ -142,6 +142,8 @@ class NoReplyAction(BaseAction): # 增加连续计数 NoReplyAction._consecutive_count += 1 count = NoReplyAction._consecutive_count + + reason = self.action_data.get("reason", "") # 计算本次等待时间 if count <= len(self._waiting_stages): @@ -153,7 +155,7 @@ class NoReplyAction(BaseAction): # 第4次及以后使用WAITING_TIME_THRESHOLD timeout = self.waiting_timeout - logger.info(f"{self.log_prefix} 选择不回复(第{count}次连续),等待新消息中... (超时: {timeout}秒)") + logger.info(f"{self.log_prefix} 选择不回复(第{count}次连续),等待新消息中... (超时: {timeout}秒),原因: {reason}") # 等待新消息或达到时间上限 result = await self.wait_for_new_message(timeout) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 348ed1b1c..26f55be44 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "2.24.0" +version = "2.25.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -102,7 +102,6 @@ compressed_length = 8 # 不能大于observation_context_size,心流上下文压 compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 [focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗 -self_identify_processor = true # 是否启用自我识别处理器 relation_processor = true # 是否启用关系识别处理器 tool_use_processor = false # 是否启用工具使用处理器 working_memory_processor = false # 是否启用工作记忆处理器,消耗量大