fix:优化prompt和log
This commit is contained in:
@@ -1,231 +0,0 @@
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
|
||||||
from src.chat.heart_flow.observation.observation import Observation
|
|
||||||
from src.llm_models.utils_model import LLMRequest
|
|
||||||
from src.config.config import global_config
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
from src.common.logger import get_logger
|
|
||||||
from src.individuality.individuality import get_individuality
|
|
||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
|
||||||
from src.chat.utils.json_utils import safe_json_dumps
|
|
||||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
|
||||||
from src.person_info.relationship_manager import get_relationship_manager
|
|
||||||
from .base_processor import BaseProcessor
|
|
||||||
from src.chat.focus_chat.info.mind_info import MindInfo
|
|
||||||
from typing import List
|
|
||||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
|
||||||
from src.chat.heart_flow.observation.actions_observation import ActionObservation
|
|
||||||
from src.chat.focus_chat.info.info_base import InfoBase
|
|
||||||
|
|
||||||
logger = get_logger("processor")
|
|
||||||
|
|
||||||
|
|
||||||
def init_prompt():
|
|
||||||
group_prompt = """
|
|
||||||
{extra_info}{relation_prompt}
|
|
||||||
{cycle_info_block}
|
|
||||||
现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容:
|
|
||||||
{chat_observe_info}
|
|
||||||
|
|
||||||
{action_observe_info}
|
|
||||||
|
|
||||||
以下是你之前对聊天的观察和规划,你的名字是{bot_name}:
|
|
||||||
{last_mind}
|
|
||||||
|
|
||||||
现在请你继续输出观察和规划,输出要求:
|
|
||||||
1. 先关注未读新消息的内容和近期回复历史
|
|
||||||
2. 根据新信息,修改和删除之前的观察和规划
|
|
||||||
3. 注意群聊的时间线索,话题由谁发起,进展状况如何。
|
|
||||||
4. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出内容就好"""
|
|
||||||
Prompt(group_prompt, "sub_heartflow_prompt_before")
|
|
||||||
|
|
||||||
private_prompt = """
|
|
||||||
你的名字是{bot_name}
|
|
||||||
{extra_info}{relation_prompt}
|
|
||||||
{cycle_info_block}
|
|
||||||
现在是{time_now},你正在上网,和qq群里的网友们聊天,以下是正在进行的聊天内容:
|
|
||||||
{chat_observe_info}
|
|
||||||
{action_observe_info}
|
|
||||||
以下是你之前对聊天的观察和规划,你的名字是{bot_name}:
|
|
||||||
{last_mind}
|
|
||||||
|
|
||||||
现在请你继续输出观察和规划,输出要求:
|
|
||||||
1. 先关注未读新消息的内容和近期回复历史
|
|
||||||
2. 根据新信息,修改和删除之前的观察和规划
|
|
||||||
3. 根据聊天内容继续输出观察和规划
|
|
||||||
4. 注意群聊的时间线索,话题由谁发起,进展状况如何,思考聊天的时间线。
|
|
||||||
6. 语言简洁自然,不要分点,不要浮夸,不要修辞,仅输出思考内容就好"""
|
|
||||||
Prompt(private_prompt, "sub_heartflow_prompt_private_before")
|
|
||||||
|
|
||||||
|
|
||||||
class MindProcessor(BaseProcessor):
|
|
||||||
log_prefix = "聊天思考"
|
|
||||||
|
|
||||||
def __init__(self, subheartflow_id: str):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.subheartflow_id = subheartflow_id
|
|
||||||
|
|
||||||
self.llm_model = LLMRequest(
|
|
||||||
model=global_config.model.planner,
|
|
||||||
request_type="focus.processor.chat_mind",
|
|
||||||
)
|
|
||||||
|
|
||||||
self.current_mind = ""
|
|
||||||
self.past_mind = []
|
|
||||||
self.structured_info = []
|
|
||||||
self.structured_info_str = ""
|
|
||||||
|
|
||||||
name = get_chat_manager().get_stream_name(self.subheartflow_id)
|
|
||||||
self.log_prefix = f"[{name}] "
|
|
||||||
self._update_structured_info_str()
|
|
||||||
|
|
||||||
def _update_structured_info_str(self):
|
|
||||||
"""根据 structured_info 更新 structured_info_str"""
|
|
||||||
if not self.structured_info:
|
|
||||||
self.structured_info_str = ""
|
|
||||||
return
|
|
||||||
|
|
||||||
lines = ["【信息】"]
|
|
||||||
for item in self.structured_info:
|
|
||||||
# 简化展示,突出内容和类型,包含TTL供调试
|
|
||||||
type_str = item.get("type", "未知类型")
|
|
||||||
content_str = item.get("content", "")
|
|
||||||
|
|
||||||
if type_str == "info":
|
|
||||||
lines.append(f"刚刚: {content_str}")
|
|
||||||
elif type_str == "memory":
|
|
||||||
lines.append(f"{content_str}")
|
|
||||||
elif type_str == "comparison_result":
|
|
||||||
lines.append(f"数字大小比较结果: {content_str}")
|
|
||||||
elif type_str == "time_info":
|
|
||||||
lines.append(f"{content_str}")
|
|
||||||
elif type_str == "lpmm_knowledge":
|
|
||||||
lines.append(f"你知道:{content_str}")
|
|
||||||
else:
|
|
||||||
lines.append(f"{type_str}的信息: {content_str}")
|
|
||||||
|
|
||||||
self.structured_info_str = "\n".join(lines)
|
|
||||||
logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}")
|
|
||||||
|
|
||||||
async def process_info(
|
|
||||||
self,
|
|
||||||
observations: List[Observation] = None,
|
|
||||||
) -> List[InfoBase]:
|
|
||||||
"""处理信息对象
|
|
||||||
|
|
||||||
Args:
|
|
||||||
*infos: 可变数量的InfoBase类型的信息对象
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[InfoBase]: 处理后的结构化信息列表
|
|
||||||
"""
|
|
||||||
current_mind = await self.do_thinking_before_reply(observations)
|
|
||||||
|
|
||||||
mind_info = MindInfo()
|
|
||||||
mind_info.set_current_mind(current_mind)
|
|
||||||
|
|
||||||
return [mind_info]
|
|
||||||
|
|
||||||
async def do_thinking_before_reply(self, observations: List[Observation] = None):
|
|
||||||
"""
|
|
||||||
在回复前进行思考,生成内心想法并收集工具调用结果
|
|
||||||
|
|
||||||
参数:
|
|
||||||
observations: 观察信息
|
|
||||||
|
|
||||||
返回:
|
|
||||||
如果return_prompt为False:
|
|
||||||
tuple: (current_mind, past_mind) 当前想法和过去的想法列表
|
|
||||||
如果return_prompt为True:
|
|
||||||
tuple: (current_mind, past_mind, prompt) 当前想法、过去的想法列表和使用的prompt
|
|
||||||
"""
|
|
||||||
|
|
||||||
# ---------- 0. 更新和清理 structured_info ----------
|
|
||||||
if self.structured_info:
|
|
||||||
# updated_info = []
|
|
||||||
# for item in self.structured_info:
|
|
||||||
# item["ttl"] -= 1
|
|
||||||
# if item["ttl"] > 0:
|
|
||||||
# updated_info.append(item)
|
|
||||||
# else:
|
|
||||||
# logger.debug(f"{self.log_prefix} 移除过期的 structured_info 项: {item['id']}")
|
|
||||||
# self.structured_info = updated_info
|
|
||||||
self._update_structured_info_str()
|
|
||||||
logger.debug(
|
|
||||||
f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}"
|
|
||||||
)
|
|
||||||
# ---------- 1. 准备基础数据 ----------
|
|
||||||
# 获取现有想法和情绪状态
|
|
||||||
previous_mind = self.current_mind if self.current_mind else ""
|
|
||||||
|
|
||||||
if observations is None:
|
|
||||||
observations = []
|
|
||||||
for observation in observations:
|
|
||||||
if isinstance(observation, ChattingObservation):
|
|
||||||
# 获取聊天元信息
|
|
||||||
is_group_chat = observation.is_group_chat
|
|
||||||
chat_target_info = observation.chat_target_info
|
|
||||||
chat_target_name = "对方" # 私聊默认名称
|
|
||||||
if not is_group_chat and chat_target_info:
|
|
||||||
# 优先使用person_name,其次user_nickname,最后回退到默认值
|
|
||||||
chat_target_name = (
|
|
||||||
chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or chat_target_name
|
|
||||||
)
|
|
||||||
# 获取聊天内容
|
|
||||||
chat_observe_info = observation.get_observe_info()
|
|
||||||
person_list = observation.person_list
|
|
||||||
if isinstance(observation, HFCloopObservation):
|
|
||||||
hfcloop_observe_info = observation.get_observe_info()
|
|
||||||
if isinstance(observation, ActionObservation):
|
|
||||||
action_observe_info = observation.get_observe_info()
|
|
||||||
|
|
||||||
# ---------- 3. 准备个性化数据 ----------
|
|
||||||
# 获取个性化信息
|
|
||||||
|
|
||||||
relation_prompt = ""
|
|
||||||
if global_config.relationship.enable_relationship:
|
|
||||||
for person in person_list:
|
|
||||||
relationship_manager = get_relationship_manager()
|
|
||||||
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
|
|
||||||
|
|
||||||
template_name = "sub_heartflow_prompt_before" if is_group_chat else "sub_heartflow_prompt_private_before"
|
|
||||||
logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板")
|
|
||||||
|
|
||||||
prompt = (await global_prompt_manager.get_prompt_async(template_name)).format(
|
|
||||||
bot_name=get_individuality().name,
|
|
||||||
extra_info=self.structured_info_str,
|
|
||||||
relation_prompt=relation_prompt,
|
|
||||||
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
|
|
||||||
chat_observe_info=chat_observe_info,
|
|
||||||
last_mind=previous_mind,
|
|
||||||
cycle_info_block=hfcloop_observe_info,
|
|
||||||
action_observe_info=action_observe_info,
|
|
||||||
chat_target_name=chat_target_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
content = "(不知道该想些什么...)"
|
|
||||||
try:
|
|
||||||
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
|
||||||
if not content:
|
|
||||||
logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。")
|
|
||||||
except Exception as e:
|
|
||||||
# 处理总体异常
|
|
||||||
logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}")
|
|
||||||
logger.error(traceback.format_exc())
|
|
||||||
content = "注意:思考过程中出现错误,应该是LLM大模型有问题!!你需要告诉别人,检查大模型配置"
|
|
||||||
|
|
||||||
# 记录初步思考结果
|
|
||||||
logger.debug(f"{self.log_prefix} 思考prompt: \n{prompt}\n")
|
|
||||||
logger.info(f"{self.log_prefix} 聊天规划: {content}")
|
|
||||||
self.update_current_mind(content)
|
|
||||||
|
|
||||||
return content
|
|
||||||
|
|
||||||
def update_current_mind(self, response):
|
|
||||||
if self.current_mind: # 只有当 current_mind 非空时才添加到 past_mind
|
|
||||||
self.past_mind.append(self.current_mind)
|
|
||||||
self.current_mind = response
|
|
||||||
|
|
||||||
|
|
||||||
init_prompt()
|
|
||||||
@@ -644,7 +644,7 @@ class PersonImpressionpProcessor(BaseProcessor):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logger.info(f"{self.log_prefix} 人物信息prompt: \n{prompt}\n")
|
logger.debug(f"{self.log_prefix} 人物信息prompt: \n{prompt}\n")
|
||||||
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
||||||
if content:
|
if content:
|
||||||
# print(f"content: {content}")
|
# print(f"content: {content}")
|
||||||
@@ -704,26 +704,47 @@ class PersonImpressionpProcessor(BaseProcessor):
|
|||||||
persons_infos_str = ""
|
persons_infos_str = ""
|
||||||
# 处理已获取到的信息
|
# 处理已获取到的信息
|
||||||
if self.info_fetched_cache:
|
if self.info_fetched_cache:
|
||||||
|
persons_with_known_info = [] # 有已知信息的人员
|
||||||
|
persons_with_unknown_info = [] # 有未知信息的人员
|
||||||
|
|
||||||
for person_id in self.info_fetched_cache:
|
for person_id in self.info_fetched_cache:
|
||||||
person_infos_str = ""
|
person_known_infos = []
|
||||||
unknown_info_types = [] # 收集所有unknow的信息类型
|
person_unknown_infos = []
|
||||||
person_name = ""
|
person_name = ""
|
||||||
|
|
||||||
for info_type in self.info_fetched_cache[person_id]:
|
for info_type in self.info_fetched_cache[person_id]:
|
||||||
person_name = self.info_fetched_cache[person_id][info_type]["person_name"]
|
person_name = self.info_fetched_cache[person_id][info_type]["person_name"]
|
||||||
if not self.info_fetched_cache[person_id][info_type]["unknow"]:
|
if not self.info_fetched_cache[person_id][info_type]["unknow"]:
|
||||||
info_content = self.info_fetched_cache[person_id][info_type]["info"]
|
info_content = self.info_fetched_cache[person_id][info_type]["info"]
|
||||||
person_infos_str += f"[{info_type}]:{info_content};"
|
person_known_infos.append(f"[{info_type}]:{info_content}")
|
||||||
else:
|
else:
|
||||||
unknown_info_types.append(info_type)
|
person_unknown_infos.append(info_type)
|
||||||
|
|
||||||
# 如果有unknow的信息类型,合并输出
|
# 如果有已知信息,添加到已知信息列表
|
||||||
if unknown_info_types:
|
if person_known_infos:
|
||||||
unknown_types_str = "、".join(unknown_info_types)
|
known_info_str = ";".join(person_known_infos) + ";"
|
||||||
person_infos_str += f"你不了解{person_name}有关[{unknown_types_str}]的信息,不要胡乱回答,你可以直接说你不知道,或者你忘记了;"
|
persons_with_known_info.append((person_name, known_info_str))
|
||||||
|
|
||||||
if person_infos_str:
|
# 如果有未知信息,添加到未知信息列表
|
||||||
persons_infos_str += f"你对 {person_name} 的了解:{person_infos_str}\n"
|
if person_unknown_infos:
|
||||||
|
persons_with_unknown_info.append((person_name, person_unknown_infos))
|
||||||
|
|
||||||
|
# 先输出有已知信息的人员
|
||||||
|
for person_name, known_info_str in persons_with_known_info:
|
||||||
|
persons_infos_str += f"你对 {person_name} 的了解:{known_info_str}\n"
|
||||||
|
|
||||||
|
# 统一处理未知信息,避免重复的警告文本
|
||||||
|
if persons_with_unknown_info:
|
||||||
|
unknown_persons_details = []
|
||||||
|
for person_name, unknown_types in persons_with_unknown_info:
|
||||||
|
unknown_types_str = "、".join(unknown_types)
|
||||||
|
unknown_persons_details.append(f"{person_name}的[{unknown_types_str}]")
|
||||||
|
|
||||||
|
if len(unknown_persons_details) == 1:
|
||||||
|
persons_infos_str += f"你不了解{unknown_persons_details[0]}信息,不要胡乱回答,可以直接说不知道或忘记了;\n"
|
||||||
|
else:
|
||||||
|
unknown_all_str = "、".join(unknown_persons_details)
|
||||||
|
persons_infos_str += f"你不了解{unknown_all_str}等信息,不要胡乱回答,可以直接说不知道或忘记了;\n"
|
||||||
|
|
||||||
return persons_infos_str
|
return persons_infos_str
|
||||||
|
|
||||||
|
|||||||
@@ -610,6 +610,17 @@ class NormalChat:
|
|||||||
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
|
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# 新增:在auto模式下检查是否需要直接切换到focus模式
|
||||||
|
if global_config.chat.chat_mode == "auto":
|
||||||
|
should_switch = await self._check_should_switch_to_focus()
|
||||||
|
if should_switch:
|
||||||
|
logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,直接执行切换")
|
||||||
|
if self.on_switch_to_focus_callback:
|
||||||
|
await self.on_switch_to_focus_callback()
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数,无法执行切换")
|
||||||
|
|
||||||
# 执行定期清理
|
# 执行定期清理
|
||||||
self._cleanup_old_segments()
|
self._cleanup_old_segments()
|
||||||
|
|
||||||
@@ -729,9 +740,6 @@ class NormalChat:
|
|||||||
if action_type == "no_action":
|
if action_type == "no_action":
|
||||||
logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作")
|
logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作")
|
||||||
return no_action
|
return no_action
|
||||||
elif action_type == "change_to_focus_chat":
|
|
||||||
logger.info(f"[{self.stream_name}] Planner决定切换到focus聊天模式")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# 执行额外的动作(不影响回复生成)
|
# 执行额外的动作(不影响回复生成)
|
||||||
action_result = await self._execute_action(action_type, action_data, message, thinking_id)
|
action_result = await self._execute_action(action_type, action_data, message, thinking_id)
|
||||||
@@ -772,14 +780,14 @@ class NormalChat:
|
|||||||
|
|
||||||
if not response_set or (
|
if not response_set or (
|
||||||
self.enable_planner
|
self.enable_planner
|
||||||
and self.action_type not in ["no_action", "change_to_focus_chat"]
|
and self.action_type not in ["no_action"]
|
||||||
and not self.is_parallel_action
|
and not self.is_parallel_action
|
||||||
):
|
):
|
||||||
if not response_set:
|
if not response_set:
|
||||||
logger.info(f"[{self.stream_name}] 模型未生成回复内容")
|
logger.info(f"[{self.stream_name}] 模型未生成回复内容")
|
||||||
elif (
|
elif (
|
||||||
self.enable_planner
|
self.enable_planner
|
||||||
and self.action_type not in ["no_action", "change_to_focus_chat"]
|
and self.action_type not in ["no_action"]
|
||||||
and not self.is_parallel_action
|
and not self.is_parallel_action
|
||||||
):
|
):
|
||||||
logger.info(f"[{self.stream_name}] 模型选择其他动作(非并行动作)")
|
logger.info(f"[{self.stream_name}] 模型选择其他动作(非并行动作)")
|
||||||
@@ -828,15 +836,7 @@ class NormalChat:
|
|||||||
if len(self.recent_replies) > self.max_replies_history:
|
if len(self.recent_replies) > self.max_replies_history:
|
||||||
self.recent_replies = self.recent_replies[-self.max_replies_history :]
|
self.recent_replies = self.recent_replies[-self.max_replies_history :]
|
||||||
|
|
||||||
# 检查是否需要切换到focus模式
|
|
||||||
if global_config.chat.chat_mode == "auto":
|
|
||||||
if self.action_type == "change_to_focus_chat":
|
|
||||||
logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的请求")
|
|
||||||
if self.on_switch_to_focus_callback:
|
|
||||||
await self.on_switch_to_focus_callback()
|
|
||||||
else:
|
|
||||||
logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数,无法执行切换")
|
|
||||||
return
|
|
||||||
|
|
||||||
# 回复后处理
|
# 回复后处理
|
||||||
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
|
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
|
||||||
@@ -1124,61 +1124,64 @@ class NormalChat:
|
|||||||
logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建已启动,缓存已清理")
|
logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建已启动,缓存已清理")
|
||||||
|
|
||||||
async def _build_relation_for_person_segments(self, person_id: str, segments: List[Dict[str, any]]):
|
async def _build_relation_for_person_segments(self, person_id: str, segments: List[Dict[str, any]]):
|
||||||
"""基于消息段为特定用户构建关系"""
|
"""为特定用户的消息段构建关系"""
|
||||||
logger.info(f"[{self.stream_name}] 开始为 {person_id} 基于 {len(segments)} 个消息段更新印象")
|
if not segments:
|
||||||
try:
|
return
|
||||||
processed_messages = []
|
|
||||||
|
|
||||||
for i, segment in enumerate(segments):
|
try:
|
||||||
|
chat_stream = get_chat_manager().get_stream(self.stream_id)
|
||||||
|
relationship_manager = chat_stream.relationship_manager
|
||||||
|
|
||||||
|
for segment in segments:
|
||||||
start_time = segment["start_time"]
|
start_time = segment["start_time"]
|
||||||
end_time = segment["end_time"]
|
end_time = segment["end_time"]
|
||||||
segment["message_count"]
|
message_count = segment["message_count"]
|
||||||
start_date = time.strftime("%Y-%m-%d %H:%M", time.localtime(start_time))
|
|
||||||
|
|
||||||
# 获取该段的消息(包含边界)
|
logger.debug(
|
||||||
segment_messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time)
|
f"[{self.stream_name}] 为用户 {person_id} 构建关系 "
|
||||||
logger.info(
|
f"消息段时间: {time.strftime('%H:%M:%S', time.localtime(start_time))} - "
|
||||||
f"[{self.stream_name}] 消息段 {i + 1}: {start_date} - {time.strftime('%Y-%m-%d %H:%M', time.localtime(end_time))}, 消息数: {len(segment_messages)}"
|
f"{time.strftime('%H:%M:%S', time.localtime(end_time))} "
|
||||||
|
f"消息数量: {message_count}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if segment_messages:
|
await relationship_manager.direct_build_relation(
|
||||||
# 如果不是第一个消息段,在消息列表前添加间隔标识
|
person_id, start_time, end_time, message_count, time.time()
|
||||||
if i > 0:
|
|
||||||
# 创建一个特殊的间隔消息
|
|
||||||
gap_message = {
|
|
||||||
"time": start_time - 0.1, # 稍微早于段开始时间
|
|
||||||
"user_id": "system",
|
|
||||||
"user_platform": "system",
|
|
||||||
"user_nickname": "系统",
|
|
||||||
"user_cardname": "",
|
|
||||||
"display_message": f"...(中间省略一些消息){start_date} 之后的消息如下...",
|
|
||||||
"is_action_record": True,
|
|
||||||
"chat_info_platform": segment_messages[0].get("chat_info_platform", ""),
|
|
||||||
"chat_id": self.stream_id,
|
|
||||||
}
|
|
||||||
processed_messages.append(gap_message)
|
|
||||||
|
|
||||||
# 添加该段的所有消息
|
|
||||||
processed_messages.extend(segment_messages)
|
|
||||||
|
|
||||||
if processed_messages:
|
|
||||||
# 按时间排序所有消息(包括间隔标识)
|
|
||||||
processed_messages.sort(key=lambda x: x["time"])
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"[{self.stream_name}] 为 {person_id} 获取到总共 {len(processed_messages)} 条消息(包含间隔标识)用于印象更新"
|
|
||||||
)
|
)
|
||||||
relationship_manager = get_relationship_manager()
|
|
||||||
|
|
||||||
# 调用原有的更新方法
|
|
||||||
await relationship_manager.update_person_impression(
|
|
||||||
person_id=person_id, timestamp=time.time(), bot_engaged_messages=processed_messages
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建完成")
|
|
||||||
else:
|
|
||||||
logger.warning(f"[{self.stream_name}] 没有找到 {person_id} 的消息段对应的消息,不更新印象")
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"[{self.stream_name}] 为 {person_id} 更新印象时发生错误: {e}")
|
logger.error(f"[{self.stream_name}] 构建关系失败: {e}")
|
||||||
logger.error(traceback.format_exc())
|
|
||||||
|
async def _check_should_switch_to_focus(self) -> bool:
|
||||||
|
"""
|
||||||
|
检查是否满足切换到focus模式的条件
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: 是否应该切换到focus模式
|
||||||
|
"""
|
||||||
|
# 检查思考消息堆积情况
|
||||||
|
container = await message_manager.get_container(self.stream_id)
|
||||||
|
if container:
|
||||||
|
thinking_count = sum(1 for msg in container.messages if isinstance(msg, MessageThinking))
|
||||||
|
if thinking_count >= 4 * global_config.chat.auto_focus_threshold: # 如果堆积超过阈值条思考消息
|
||||||
|
logger.debug(f"[{self.stream_name}] 检测到思考消息堆积({thinking_count}条),切换到focus模式")
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not self.recent_replies:
|
||||||
|
return False
|
||||||
|
|
||||||
|
current_time = time.time()
|
||||||
|
time_threshold = 120 / global_config.chat.auto_focus_threshold
|
||||||
|
reply_threshold = 6 * global_config.chat.auto_focus_threshold
|
||||||
|
|
||||||
|
one_minute_ago = current_time - time_threshold
|
||||||
|
|
||||||
|
# 统计指定时间内的回复数量
|
||||||
|
recent_reply_count = sum(1 for reply in self.recent_replies if reply["time"] > one_minute_ago)
|
||||||
|
|
||||||
|
should_switch = recent_reply_count > reply_threshold
|
||||||
|
if should_switch:
|
||||||
|
logger.debug(
|
||||||
|
f"[{self.stream_name}] 检测到{time_threshold:.0f}秒内回复数量({recent_reply_count})大于{reply_threshold},满足切换到focus模式条件"
|
||||||
|
)
|
||||||
|
|
||||||
|
return should_switch
|
||||||
|
|||||||
@@ -170,7 +170,6 @@ class NormalChatActionModifier:
|
|||||||
2. RANDOM类型保持概率激活
|
2. RANDOM类型保持概率激活
|
||||||
3. KEYWORD类型保持关键词匹配
|
3. KEYWORD类型保持关键词匹配
|
||||||
4. ALWAYS类型直接激活
|
4. ALWAYS类型直接激活
|
||||||
5. change_to_focus_chat 特殊处理:根据回复频率判断
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
actions_with_info: 带完整信息的动作字典
|
actions_with_info: 带完整信息的动作字典
|
||||||
@@ -183,25 +182,12 @@ class NormalChatActionModifier:
|
|||||||
"""
|
"""
|
||||||
activated_actions = {}
|
activated_actions = {}
|
||||||
|
|
||||||
# 特殊处理 change_to_focus_chat 动作
|
|
||||||
if global_config.chat.chat_mode == "auto":
|
|
||||||
if "change_to_focus_chat" in actions_with_info:
|
|
||||||
# 检查是否满足切换到focus模式的条件
|
|
||||||
if await self._check_should_switch_to_focus(recent_replies):
|
|
||||||
activated_actions["change_to_focus_chat"] = actions_with_info["change_to_focus_chat"]
|
|
||||||
logger.debug(f"{self.log_prefix} 特殊激活 change_to_focus_chat 动作,原因: 满足切换到focus模式条件")
|
|
||||||
return activated_actions
|
|
||||||
|
|
||||||
# 分类处理不同激活类型的actions
|
# 分类处理不同激活类型的actions
|
||||||
always_actions = {}
|
always_actions = {}
|
||||||
random_actions = {}
|
random_actions = {}
|
||||||
keyword_actions = {}
|
keyword_actions = {}
|
||||||
|
|
||||||
for action_name, action_info in actions_with_info.items():
|
for action_name, action_info in actions_with_info.items():
|
||||||
# 跳过已特殊处理的 change_to_focus_chat
|
|
||||||
if action_name == "change_to_focus_chat":
|
|
||||||
continue
|
|
||||||
|
|
||||||
# 使用normal_activation_type
|
# 使用normal_activation_type
|
||||||
activation_type = action_info.get("normal_activation_type", "always")
|
activation_type = action_info.get("normal_activation_type", "always")
|
||||||
|
|
||||||
@@ -294,47 +280,7 @@ class NormalChatActionModifier:
|
|||||||
logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}")
|
logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def _check_should_switch_to_focus(self, recent_replies: List[dict]) -> bool:
|
|
||||||
"""
|
|
||||||
检查是否满足切换到focus模式的条件
|
|
||||||
|
|
||||||
Args:
|
|
||||||
recent_replies: 最近的回复记录列表
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: 是否应该切换到focus模式
|
|
||||||
"""
|
|
||||||
# 检查思考消息堆积情况
|
|
||||||
container = await message_manager.get_container(self.stream_id)
|
|
||||||
if container:
|
|
||||||
thinking_count = sum(1 for msg in container.messages if isinstance(msg, MessageThinking))
|
|
||||||
print(f"thinking_count: {thinking_count}")
|
|
||||||
if thinking_count >= 4 * global_config.chat.auto_focus_threshold: # 如果堆积超过3条思考消息
|
|
||||||
logger.debug(f"{self.log_prefix} 检测到思考消息堆积({thinking_count}条),切换到focus模式")
|
|
||||||
return True
|
|
||||||
|
|
||||||
if not recent_replies:
|
|
||||||
return False
|
|
||||||
|
|
||||||
current_time = time.time()
|
|
||||||
time_threshold = 120 / global_config.chat.auto_focus_threshold
|
|
||||||
reply_threshold = 6 * global_config.chat.auto_focus_threshold
|
|
||||||
|
|
||||||
one_minute_ago = current_time - time_threshold
|
|
||||||
|
|
||||||
# 统计1分钟内的回复数量
|
|
||||||
recent_reply_count = sum(1 for reply in recent_replies if reply["time"] > one_minute_ago)
|
|
||||||
|
|
||||||
print(f"recent_reply_count: {recent_reply_count}")
|
|
||||||
print(f"reply_threshold: {reply_threshold}")
|
|
||||||
|
|
||||||
should_switch = recent_reply_count > reply_threshold
|
|
||||||
if should_switch:
|
|
||||||
logger.debug(
|
|
||||||
f"{self.log_prefix} 检测到1分钟内回复数量({recent_reply_count})大于{reply_threshold},满足切换到focus模式条件"
|
|
||||||
)
|
|
||||||
|
|
||||||
return should_switch
|
|
||||||
|
|
||||||
def get_available_actions_count(self) -> int:
|
def get_available_actions_count(self) -> int:
|
||||||
"""获取当前可用动作数量(排除默认的no_action)"""
|
"""获取当前可用动作数量(排除默认的no_action)"""
|
||||||
|
|||||||
@@ -622,10 +622,13 @@ def build_readable_messages(
|
|||||||
messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, False, pic_id_mapping, pic_counter
|
messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, False, pic_id_mapping, pic_counter
|
||||||
)
|
)
|
||||||
|
|
||||||
read_mark_line = "\n--- 以上消息是你已经看过---\n--- 请关注以下未读的新消息---\n"
|
read_mark_line = "\n--- 以上消息是你已经看过,请关注以下未读的新消息---\n"
|
||||||
|
|
||||||
# 生成图片映射信息
|
# 生成图片映射信息
|
||||||
|
if pic_id_mapping:
|
||||||
pic_mapping_info = f"图片信息:\n{build_pic_mapping_info(pic_id_mapping)}\n聊天记录信息:\n"
|
pic_mapping_info = f"图片信息:\n{build_pic_mapping_info(pic_id_mapping)}\n聊天记录信息:\n"
|
||||||
|
else:
|
||||||
|
pic_mapping_info = "聊天记录信息:\n"
|
||||||
|
|
||||||
# 组合结果
|
# 组合结果
|
||||||
result_parts = []
|
result_parts = []
|
||||||
|
|||||||
@@ -477,7 +477,7 @@ class ImageManager:
|
|||||||
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
|
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
|
||||||
|
|
||||||
# 构建prompt
|
# 构建prompt
|
||||||
prompt = """请用中文描述这张图片的内容。如果有文字,请把文字描述概括出来,请留意其主题,直观感受,输出为一段平文本,最多50字"""
|
prompt = """请用中文描述这张图片的内容。如果有文字,请把文字描述概括出来,请留意其主题,直观感受,输出为一段平文本,最多30字,请注意不要分点,就输出一段文本"""
|
||||||
|
|
||||||
# 获取VLM描述
|
# 获取VLM描述
|
||||||
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
|
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
|
||||||
|
|||||||
@@ -286,7 +286,7 @@ class RelationshipManager:
|
|||||||
|
|
||||||
logger_str = f"了解了有关{person_name}的新印象:\n"
|
logger_str = f"了解了有关{person_name}的新印象:\n"
|
||||||
for point in points_list:
|
for point in points_list:
|
||||||
logger_str += f"{point[0]},重要性:{point[1]}\n\n"
|
logger_str += f"{point[0]},重要性:{point[1]}\n"
|
||||||
logger.info(logger_str)
|
logger.info(logger_str)
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
|
|||||||
Reference in New Issue
Block a user