diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py index 6d8d4fbc7..376e6b8fe 100644 --- a/src/chat/emoji_system/emoji_manager.py +++ b/src/chat/emoji_system/emoji_manager.py @@ -369,7 +369,7 @@ class EmojiManager: def __init__(self) -> None: if self._initialized: return # 如果已经初始化过,直接返回 - + self._scan_task = None self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji") diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py index 6c78dd78a..c2fa6dbb8 100644 --- a/src/chat/focus_chat/info_processors/action_processor.py +++ b/src/chat/focus_chat/info_processors/action_processor.py @@ -56,18 +56,18 @@ class ActionProcessor(BaseProcessor): all_actions = None hfc_obs = None chat_obs = None - + # 收集所有观察对象 for obs in observations: if isinstance(obs, HFCloopObservation): hfc_obs = obs if isinstance(obs, ChattingObservation): chat_obs = obs - + # 合并所有动作变更 merged_action_changes = {"add": [], "remove": []} reasons = [] - + # 处理HFCloopObservation if hfc_obs: obs = hfc_obs @@ -77,32 +77,32 @@ class ActionProcessor(BaseProcessor): # 合并动作变更 merged_action_changes["add"].extend(action_changes["add"]) merged_action_changes["remove"].extend(action_changes["remove"]) - + # 收集变更原因 if action_changes["add"]: reasons.append(f"添加动作{action_changes['add']}因为检测到大量无回复") if action_changes["remove"]: reasons.append(f"移除动作{action_changes['remove']}因为检测到连续回复") - + # 处理ChattingObservation if chat_obs and all_actions is not None: obs = chat_obs # 检查动作的关联类型 chat_context = chat_manager.get_stream(obs.chat_id).context type_mismatched_actions = [] - + for action_name in all_actions.keys(): data = all_actions[action_name] if data.get("associated_types"): if not chat_context.check_types(data["associated_types"]): type_mismatched_actions.append(action_name) logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作") - + if type_mismatched_actions: # 合并到移除列表中 merged_action_changes["remove"].extend(type_mismatched_actions) reasons.append(f"移除动作{type_mismatched_actions}因为关联类型不匹配") - + # 如果有任何动作变更,设置到action_info中 if merged_action_changes["add"] or merged_action_changes["remove"]: action_info.set_action_changes(merged_action_changes) diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py index 9d0643d67..c4c4ead54 100644 --- a/src/chat/focus_chat/info_processors/self_processor.py +++ b/src/chat/focus_chat/info_processors/self_processor.py @@ -15,7 +15,6 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati from typing import Dict from src.chat.focus_chat.info.info_base import InfoBase from src.chat.focus_chat.info.self_info import SelfInfo -from src.chat.utils.utils import get_recent_group_speaker logger = get_logger("processor") @@ -101,8 +100,7 @@ class SelfProcessor(BaseProcessor): 如果return_prompt为True: tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt """ - - + for observation in observations: if isinstance(observation, ChattingObservation): is_group_chat = observation.is_group_chat @@ -115,13 +113,11 @@ class SelfProcessor(BaseProcessor): memory_str = "以下是当前在聊天中,你回忆起的记忆:\n" for running_memory in running_memorys: memory_str += f"{running_memory['topic']}: {running_memory['content']}\n" - relation_prompt = "" for person in person_list: if len(person) >= 3 and person[0] and person[1]: - relation_prompt += await relationship_manager.build_relationship_info(person,is_id=True) - + relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True) if observations is None: observations = [] @@ -161,7 +157,6 @@ class SelfProcessor(BaseProcessor): relation_prompt = relation_prompt_init + relation_prompt else: relation_prompt = relation_prompt_init + "没有特别在意的人\n" - prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format( name_block=name_block, @@ -172,7 +167,7 @@ class SelfProcessor(BaseProcessor): time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), chat_observe_info=chat_observe_info, ) - + # print(prompt) content = "" diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py index 267522c63..57c83cff6 100644 --- a/src/chat/focus_chat/planners/planner.py +++ b/src/chat/focus_chat/planners/planner.py @@ -125,14 +125,13 @@ class ActionPlanner: if action in remove_actions: action = "no_reply" reasoning = f"之前选择的动作{action}已被移除,原因: {reason}" - + using_actions = self.action_manager.get_using_actions() action_available_block = "" for action_name, action_info in using_actions.items(): action_description = action_info["description"] action_available_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n" - action_available_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n" - + action_available_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n" # 继续处理其他信息 for info in all_plan_info: @@ -156,7 +155,9 @@ class ActionPlanner: current_available_actions = self.action_manager.get_using_actions() # 如果没有可用动作或只有no_reply动作,直接返回no_reply - if not current_available_actions or (len(current_available_actions) == 1 and "no_reply" in current_available_actions): + if not current_available_actions or ( + len(current_available_actions) == 1 and "no_reply" in current_available_actions + ): action = "no_reply" reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划" logger.info(f"{self.log_prefix}{reasoning}") @@ -325,8 +326,7 @@ class ActionPlanner: extra_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策" else: extra_info_block = "" - - + moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。" planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") diff --git a/src/chat/normal_chat/normal_prompt.py b/src/chat/normal_chat/normal_prompt.py index 8308ab20e..88a1fadc1 100644 --- a/src/chat/normal_chat/normal_prompt.py +++ b/src/chat/normal_chat/normal_prompt.py @@ -17,14 +17,14 @@ logger = get_logger("prompt") def init_prompt(): -# Prompt( -# """ -# 你有以下信息可供参考: -# {structured_info} -# 以上的消息是你获取到的消息,或许可以帮助你更好地回复。 -# """, -# "info_from_tools", -# ) + # Prompt( + # """ + # 你有以下信息可供参考: + # {structured_info} + # 以上的消息是你获取到的消息,或许可以帮助你更好地回复。 + # """, + # "info_from_tools", + # ) Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1") Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") @@ -94,10 +94,8 @@ class PromptBuilder: in_mind_reply=None, target_message=None, ) -> Optional[str]: - return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name) - async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> str: prompt_personality = individuality.get_prompt(x_person=2, level=2) is_group_chat = bool(chat_stream.group_info) @@ -118,8 +116,7 @@ class PromptBuilder: for person in who_chat_in_group: if len(person) >= 3 and person[0] and person[1]: relation_prompt += await relationship_manager.build_relationship_info(person) - - + mood_prompt = mood_manager.get_mood_prompt() reply_styles1 = [ ("然后给出日常且口语化的回复,平淡一些", 0.4), @@ -193,7 +190,7 @@ class PromptBuilder: prompt_ger += "你喜欢用文言文" if random.random() < 0.04: prompt_ger += "你喜欢用流行梗" - + moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。" # 知识构建 diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py index 4ef9151b6..37f75955f 100644 --- a/src/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -298,7 +298,7 @@ class RelationshipManager: level_num = self.calculate_level_num(relationship_value) relation_value_prompt = "" - + if level_num == 0 or level_num == 5: relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"] relation_prompt2_list = [ @@ -309,7 +309,9 @@ class RelationshipManager: "积极回复", "友善和包容的回复", ] - relation_value_prompt = f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。" + relation_value_prompt = ( + f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。" + ) elif level_num == 2: relation_value_prompt = "" else: @@ -323,17 +325,19 @@ class RelationshipManager: "积极回复", "友善和包容的回复", ] - relation_value_prompt = f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。" + relation_value_prompt = ( + f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。" + ) else: relation_value_prompt = "" - + if relation_value_prompt: nickname_str = await person_info_manager.get_value(person_id, "nickname") platform = await person_info_manager.get_value(person_id, "platform") relation_prompt = f"{relation_value_prompt},ta在{platform}上的昵称是{nickname_str}。\n" else: relation_prompt = "" - + return relation_prompt @staticmethod