fix - 优化normal_chat代码,采用和focus一致的关系构建,优化log,添加超时检查,允许normal使用llm激活

This commit is contained in:
SengokuCola
2025-07-06 17:02:36 +08:00
parent c82c7744d6
commit b3a93d16e6
25 changed files with 378 additions and 795 deletions

View File

@@ -1,11 +1,12 @@
import asyncio
import time
from random import random
from typing import List, Dict, Optional
import os
import pickle
from maim_message import UserInfo, Seg
from typing import List, Optional
from src.config.config import global_config
from src.common.logger import get_logger
from src.person_info.person_info import get_person_info_manager
from src.plugin_system.apis import generator_api
from maim_message import UserInfo, Seg
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
from src.chat.utils.timer_calculator import Timer
@@ -14,20 +15,10 @@ from ..message_receive.message import MessageSending, MessageRecv, MessageThinki
from src.chat.message_receive.message_sender import message_manager
from src.chat.normal_chat.willing.willing_manager import get_willing_manager
from src.chat.normal_chat.normal_chat_utils import get_recent_message_stats
from src.config.config import global_config
from src.chat.focus_chat.planners.action_manager import ActionManager
from src.person_info.person_info import PersonInfoManager
from src.person_info.relationship_manager import get_relationship_manager
from src.chat.utils.chat_message_builder import (
get_raw_msg_by_timestamp_with_chat,
get_raw_msg_by_timestamp_with_chat_inclusive,
get_raw_msg_before_timestamp_with_chat,
num_new_messages_since,
)
from src.person_info.relationship_builder_manager import relationship_builder_manager
from .priority_manager import PriorityManager
import traceback
from .normal_chat_generator import NormalChatGenerator
from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner
from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier
@@ -38,15 +29,6 @@ willing_manager = get_willing_manager()
logger = get_logger("normal_chat")
# 消息段清理配置
SEGMENT_CLEANUP_CONFIG = {
"enable_cleanup": True, # 是否启用清理
"max_segment_age_days": 7, # 消息段最大保存天数
"max_segments_per_user": 10, # 每用户最大消息段数
"cleanup_interval_hours": 1, # 清理间隔(小时)
}
class NormalChat:
"""
普通聊天处理类,负责处理非核心对话的聊天逻辑。
@@ -71,6 +53,8 @@ class NormalChat:
self.stream_name = get_chat_manager().get_stream_name(self.stream_id) or self.stream_id
self.relationship_builder = relationship_builder_manager.get_or_create_builder(self.stream_id)
# Interest dict
self.interest_dict = interest_dict
@@ -78,9 +62,7 @@ class NormalChat:
self.willing_amplifier = 1
self.start_time = time.time()
# Other sync initializations
self.gpt = NormalChatGenerator()
self.mood_manager = mood_manager
self.start_time = time.time()
@@ -96,18 +78,6 @@ class NormalChat:
self.recent_replies = []
self.max_replies_history = 20 # 最多保存最近20条回复记录
# 新的消息段缓存结构:
# {person_id: [{"start_time": float, "end_time": float, "last_msg_time": float, "message_count": int}, ...]}
self.person_engaged_cache: Dict[str, List[Dict[str, any]]] = {}
# 持久化存储文件路径
self.cache_file_path = os.path.join("data", "relationship", f"relationship_cache_{self.stream_id}.pkl")
# 最后处理的消息时间,避免重复处理相同消息
self.last_processed_message_time = 0.0
# 最后清理时间,用于定期清理老消息段
self.last_cleanup_time = 0.0
# 添加回调函数用于在满足条件时通知切换到focus_chat模式
self.on_switch_to_focus_callback = on_switch_to_focus_callback
@@ -119,11 +89,6 @@ class NormalChat:
self.timeout_count = 0
# 加载持久化的缓存
self._load_cache()
logger.debug(f"[{self.stream_name}] NormalChat 初始化完成 (异步部分)。")
self.action_type: Optional[str] = None # 当前动作类型
self.is_parallel_action: bool = False # 是否是可并行动作
@@ -151,320 +116,25 @@ class NormalChat:
self._priority_chat_task.cancel()
logger.info(f"[{self.stream_name}] NormalChat 已停用。")
# ================================
# 缓存管理模块
# 负责持久化存储、状态管理、缓存读写
# ================================
def _load_cache(self):
"""从文件加载持久化的缓存"""
if os.path.exists(self.cache_file_path):
try:
with open(self.cache_file_path, "rb") as f:
cache_data = pickle.load(f)
# 新格式:包含额外信息的缓存
self.person_engaged_cache = cache_data.get("person_engaged_cache", {})
self.last_processed_message_time = cache_data.get("last_processed_message_time", 0.0)
self.last_cleanup_time = cache_data.get("last_cleanup_time", 0.0)
logger.info(
f"[{self.stream_name}] 成功加载关系缓存,包含 {len(self.person_engaged_cache)} 个用户,最后处理时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_processed_message_time)) if self.last_processed_message_time > 0 else '未设置'}"
)
except Exception as e:
logger.error(f"[{self.stream_name}] 加载关系缓存失败: {e}")
self.person_engaged_cache = {}
self.last_processed_message_time = 0.0
else:
logger.info(f"[{self.stream_name}] 关系缓存文件不存在,使用空缓存")
def _save_cache(self):
"""保存缓存到文件"""
try:
os.makedirs(os.path.dirname(self.cache_file_path), exist_ok=True)
cache_data = {
"person_engaged_cache": self.person_engaged_cache,
"last_processed_message_time": self.last_processed_message_time,
"last_cleanup_time": self.last_cleanup_time,
}
with open(self.cache_file_path, "wb") as f:
pickle.dump(cache_data, f)
logger.debug(f"[{self.stream_name}] 成功保存关系缓存")
except Exception as e:
logger.error(f"[{self.stream_name}] 保存关系缓存失败: {e}")
# ================================
# 消息段管理模块
# 负责跟踪用户消息活动、管理消息段、清理过期数据
# ================================
def _update_message_segments(self, person_id: str, message_time: float):
"""更新用户的消息段
Args:
person_id: 用户ID
message_time: 消息时间戳
"""
if person_id not in self.person_engaged_cache:
self.person_engaged_cache[person_id] = []
segments = self.person_engaged_cache[person_id]
current_time = time.time()
# 获取该消息前5条消息的时间作为潜在的开始时间
before_messages = get_raw_msg_before_timestamp_with_chat(self.stream_id, message_time, limit=5)
if before_messages:
# 由于get_raw_msg_before_timestamp_with_chat返回按时间升序排序的消息最后一个是最接近message_time的
# 我们需要第一个消息作为开始时间但应该确保至少包含5条消息或该用户之前的消息
potential_start_time = before_messages[0]["time"]
else:
# 如果没有前面的消息,就从当前消息开始
potential_start_time = message_time
# 如果没有现有消息段,创建新的
if not segments:
new_segment = {
"start_time": potential_start_time,
"end_time": message_time,
"last_msg_time": message_time,
"message_count": self._count_messages_in_timerange(potential_start_time, message_time),
}
segments.append(new_segment)
logger.debug(
f"[{self.stream_name}] 为用户 {person_id} 创建新消息段: 时间范围 {time.strftime('%H:%M:%S', time.localtime(potential_start_time))} - {time.strftime('%H:%M:%S', time.localtime(message_time))}, 消息数: {new_segment['message_count']}"
)
self._save_cache()
return
# 获取最后一个消息段
last_segment = segments[-1]
# 计算从最后一条消息到当前消息之间的消息数量(不包含边界)
messages_between = self._count_messages_between(last_segment["last_msg_time"], message_time)
if messages_between <= 10:
# 在10条消息内延伸当前消息段
last_segment["end_time"] = message_time
last_segment["last_msg_time"] = message_time
# 重新计算整个消息段的消息数量
last_segment["message_count"] = self._count_messages_in_timerange(
last_segment["start_time"], last_segment["end_time"]
)
logger.debug(f"[{self.stream_name}] 延伸用户 {person_id} 的消息段: {last_segment}")
else:
# 超过10条消息结束当前消息段并创建新的
# 结束当前消息段延伸到原消息段最后一条消息后5条消息的时间
after_messages = get_raw_msg_by_timestamp_with_chat(
self.stream_id, last_segment["last_msg_time"], current_time, limit=5, limit_mode="earliest"
)
if after_messages and len(after_messages) >= 5:
# 如果有足够的后续消息使用第5条消息的时间作为结束时间
last_segment["end_time"] = after_messages[4]["time"]
else:
# 如果没有足够的后续消息,保持原有的结束时间
pass
# 重新计算当前消息段的消息数量
last_segment["message_count"] = self._count_messages_in_timerange(
last_segment["start_time"], last_segment["end_time"]
)
# 创建新的消息段
new_segment = {
"start_time": potential_start_time,
"end_time": message_time,
"last_msg_time": message_time,
"message_count": self._count_messages_in_timerange(potential_start_time, message_time),
}
segments.append(new_segment)
logger.debug(f"[{self.stream_name}] 为用户 {person_id} 创建新消息段超过10条消息间隔: {new_segment}")
self._save_cache()
def _count_messages_in_timerange(self, start_time: float, end_time: float) -> int:
"""计算指定时间范围内的消息数量(包含边界)"""
messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time)
return len(messages)
def _count_messages_between(self, start_time: float, end_time: float) -> int:
"""计算两个时间点之间的消息数量(不包含边界),用于间隔检查"""
return num_new_messages_since(self.stream_id, start_time, end_time)
def _get_total_message_count(self, person_id: str) -> int:
"""获取用户所有消息段的总消息数量"""
if person_id not in self.person_engaged_cache:
return 0
total_count = 0
for segment in self.person_engaged_cache[person_id]:
total_count += segment["message_count"]
return total_count
def _cleanup_old_segments(self) -> bool:
"""清理老旧的消息段
Returns:
bool: 是否执行了清理操作
"""
if not SEGMENT_CLEANUP_CONFIG["enable_cleanup"]:
return False
current_time = time.time()
# 检查是否需要执行清理(基于时间间隔)
cleanup_interval_seconds = SEGMENT_CLEANUP_CONFIG["cleanup_interval_hours"] * 3600
if current_time - self.last_cleanup_time < cleanup_interval_seconds:
return False
logger.info(f"[{self.stream_name}] 开始执行老消息段清理...")
cleanup_stats = {
"users_cleaned": 0,
"segments_removed": 0,
"total_segments_before": 0,
"total_segments_after": 0,
}
max_age_seconds = SEGMENT_CLEANUP_CONFIG["max_segment_age_days"] * 24 * 3600
max_segments_per_user = SEGMENT_CLEANUP_CONFIG["max_segments_per_user"]
users_to_remove = []
for person_id, segments in self.person_engaged_cache.items():
cleanup_stats["total_segments_before"] += len(segments)
original_segment_count = len(segments)
# 1. 按时间清理:移除过期的消息段
segments_after_age_cleanup = []
for segment in segments:
segment_age = current_time - segment["end_time"]
if segment_age <= max_age_seconds:
segments_after_age_cleanup.append(segment)
else:
cleanup_stats["segments_removed"] += 1
logger.debug(
f"[{self.stream_name}] 移除用户 {person_id} 的过期消息段: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(segment['start_time']))} - {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(segment['end_time']))}"
)
# 2. 按数量清理:如果消息段数量仍然过多,保留最新的
if len(segments_after_age_cleanup) > max_segments_per_user:
# 按end_time排序保留最新的
segments_after_age_cleanup.sort(key=lambda x: x["end_time"], reverse=True)
segments_removed_count = len(segments_after_age_cleanup) - max_segments_per_user
cleanup_stats["segments_removed"] += segments_removed_count
segments_after_age_cleanup = segments_after_age_cleanup[:max_segments_per_user]
logger.debug(
f"[{self.stream_name}] 用户 {person_id} 消息段数量过多,移除 {segments_removed_count} 个最老的消息段"
)
# 使用清理后的消息段
# 更新缓存
if len(segments_after_age_cleanup) == 0:
# 如果没有剩余消息段,标记用户为待移除
users_to_remove.append(person_id)
else:
self.person_engaged_cache[person_id] = segments_after_age_cleanup
cleanup_stats["total_segments_after"] += len(segments_after_age_cleanup)
if original_segment_count != len(segments_after_age_cleanup):
cleanup_stats["users_cleaned"] += 1
# 移除没有消息段的用户
for person_id in users_to_remove:
del self.person_engaged_cache[person_id]
logger.debug(f"[{self.stream_name}] 移除用户 {person_id}:没有剩余消息段")
# 更新最后清理时间
self.last_cleanup_time = current_time
# 保存缓存
if cleanup_stats["segments_removed"] > 0 or len(users_to_remove) > 0:
self._save_cache()
logger.info(
f"[{self.stream_name}] 清理完成 - 影响用户: {cleanup_stats['users_cleaned']}, 移除消息段: {cleanup_stats['segments_removed']}, 移除用户: {len(users_to_remove)}"
)
logger.info(
f"[{self.stream_name}] 消息段统计 - 清理前: {cleanup_stats['total_segments_before']}, 清理后: {cleanup_stats['total_segments_after']}"
)
else:
logger.debug(f"[{self.stream_name}] 清理完成 - 无需清理任何内容")
return cleanup_stats["segments_removed"] > 0 or len(users_to_remove) > 0
def get_cache_status(self) -> str:
"""获取缓存状态信息,用于调试和监控"""
if not self.person_engaged_cache:
return f"[{self.stream_name}] 关系缓存为空"
status_lines = [f"[{self.stream_name}] 关系缓存状态:"]
status_lines.append(
f"最后处理消息时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_processed_message_time)) if self.last_processed_message_time > 0 else '未设置'}"
)
status_lines.append(
f"最后清理时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_cleanup_time)) if self.last_cleanup_time > 0 else '未执行'}"
)
status_lines.append(f"总用户数:{len(self.person_engaged_cache)}")
status_lines.append(
f"清理配置:{'启用' if SEGMENT_CLEANUP_CONFIG['enable_cleanup'] else '禁用'} (最大保存{SEGMENT_CLEANUP_CONFIG['max_segment_age_days']}天, 每用户最多{SEGMENT_CLEANUP_CONFIG['max_segments_per_user']}段)"
)
status_lines.append("")
for person_id, segments in self.person_engaged_cache.items():
total_count = self._get_total_message_count(person_id)
status_lines.append(f"用户 {person_id}:")
status_lines.append(f" 总消息数:{total_count} ({total_count}/45)")
status_lines.append(f" 消息段数:{len(segments)}")
for i, segment in enumerate(segments):
start_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["start_time"]))
end_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["end_time"]))
last_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["last_msg_time"]))
status_lines.append(
f"{i + 1}: {start_str} -> {end_str} (最后消息: {last_str}, 消息数: {segment['message_count']})"
)
status_lines.append("")
return "\n".join(status_lines)
def _update_user_message_segments(self, message: MessageRecv):
"""更新用户消息段信息"""
time.time()
user_id = message.message_info.user_info.user_id
platform = message.message_info.platform
msg_time = message.message_info.time
# 跳过机器人自己的消息
if user_id == global_config.bot.qq_account:
return
# 只处理新消息(避免重复处理)
if msg_time <= self.last_processed_message_time:
return
person_id = PersonInfoManager.get_person_id(platform, user_id)
self._update_message_segments(person_id, msg_time)
# 更新最后处理时间
self.last_processed_message_time = max(self.last_processed_message_time, msg_time)
logger.debug(
f"[{self.stream_name}] 更新用户 {person_id} 的消息段,消息时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg_time))}"
)
async def _priority_chat_loop_add_message(self):
while not self._disabled:
try:
ids = list(self.interest_dict.keys())
for msg_id in ids:
message, interest_value, _ = self.interest_dict[msg_id]
# 创建字典条目的副本以避免在迭代时发生修改
items_to_process = list(self.interest_dict.items())
for msg_id, value in items_to_process:
# 尝试从原始字典中弹出条目,如果它已被其他任务处理,则跳过
if self.interest_dict.pop(msg_id, None) is None:
continue # 条目已被其他任务处理
message, interest_value, _ = value
if not self._disabled:
# 更新消息段信息
self._update_user_message_segments(message)
# self._update_user_message_segments(message)
# 添加消息到优先级管理器
if self.priority_manager:
self.priority_manager.add_message(message, interest_value)
self.interest_dict.pop(msg_id, None)
except Exception:
logger.error(
f"[{self.stream_name}] 优先级聊天循环添加消息时出现错误: {traceback.format_exc()}", exc_info=True
@@ -489,9 +159,6 @@ class NormalChat:
f"[{self.stream_name}] 从队列中取出消息进行处理: User {message.message_info.user_info.user_id}, Time: {time.strftime('%H:%M:%S', time.localtime(message.message_info.time))}"
)
# 检查是否有用户满足关系构建条件
asyncio.create_task(self._check_relation_building_conditions(message))
do_reply = await self.reply_one_message(message)
response_set = do_reply if do_reply else []
factor = 0.5
@@ -708,19 +375,12 @@ class NormalChat:
async def normal_response(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None:
"""
处理接收到的消息。
根据回复模式,决定是立即处理还是放入优先级队列
"兴趣"模式下,判断是否回复并生成内容
"""
if self._disabled:
return
# 根据回复模式决定行为
if self.reply_mode == "priority":
# 优先模式下,所有消息都进入管理器
if self.priority_manager:
self.priority_manager.add_message(message)
return
# 新增在auto模式下检查是否需要直接切换到focus模式
# 新增在auto模式下检查是否需要直接切换到focus模式
if global_config.chat.chat_mode == "auto":
if await self._check_should_switch_to_focus():
logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件尝试执行切换")
@@ -734,19 +394,7 @@ class NormalChat:
else:
logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数无法执行切换")
# --- 以下为原有的 "兴趣" 模式逻辑 ---
await self._process_message(message, is_mentioned, interested_rate)
async def _process_message(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None:
"""
实际处理单条消息的逻辑,包括意愿判断、回复生成、动作执行等。
"""
if self._disabled:
return
# 检查是否有用户满足关系构建条件
asyncio.create_task(self._check_relation_building_conditions(message))
# --- 以下为 "兴趣" 模式逻辑 (从 _process_message 合并而来) ---
timing_results = {}
reply_probability = (
1.0 if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply else 0.0
@@ -804,7 +452,7 @@ class NormalChat:
if do_reply and response_set: # 确保 response_set 不是 None
timing_str = " | ".join([f"{step}: {duration:.2f}" for step, duration in timing_results.items()])
trigger_msg = message.processed_plain_text
response_msg = " ".join(response_set)
response_msg = " ".join([item[1] for item in response_set if item[0] == "text"])
logger.info(
f"[{self.stream_name}]回复消息: {trigger_msg[:30]}... | 回复内容: {response_msg[:30]}... | 计时: {timing_str}"
)
@@ -816,8 +464,105 @@ class NormalChat:
# 意愿管理器注销当前message信息 (无论是否回复,只要处理过就删除)
willing_manager.delete(message.message_info.message_id)
async def _generate_normal_response(
self, message: MessageRecv, available_actions: Optional[list]
) -> Optional[list]:
"""生成普通回复"""
try:
logger.info(
f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
)
person_info_manager = get_person_info_manager()
person_id = person_info_manager.get_person_id(
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
)
person_name = await person_info_manager.get_value(person_id, "person_name")
reply_to_str = f"{person_name}:{message.processed_plain_text}"
success, reply_set = await generator_api.generate_reply(
chat_stream=message.chat_stream,
reply_to=reply_to_str,
available_actions=available_actions,
enable_tool=global_config.tool.enable_in_normal_chat,
request_type="normal.replyer",
)
if not success or not reply_set:
logger.info(f"{message.processed_plain_text} 的回复生成失败")
return None
content = " ".join([item[1] for item in reply_set if item[0] == "text"])
if content:
logger.info(f"{global_config.bot.nickname}的备选回复是:{content}")
return reply_set
except Exception as e:
logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}")
return None
async def _plan_and_execute_actions(self, message: MessageRecv, thinking_id: str) -> Optional[dict]:
"""规划和执行额外动作"""
no_action = {
"action_result": {
"action_type": "no_action",
"action_data": {},
"reasoning": "规划器初始化默认",
"is_parallel": True,
},
"chat_context": "",
"action_prompt": "",
}
if not self.enable_planner:
logger.debug(f"[{self.stream_name}] Planner未启用跳过动作规划")
return no_action
try:
# 检查是否应该跳过规划
if self.action_modifier.should_skip_planning():
logger.debug(f"[{self.stream_name}] 没有可用动作,跳过规划")
self.action_type = "no_action"
return no_action
# 执行规划
plan_result = await self.planner.plan(message)
action_type = plan_result["action_result"]["action_type"]
action_data = plan_result["action_result"]["action_data"]
reasoning = plan_result["action_result"]["reasoning"]
is_parallel = plan_result["action_result"].get("is_parallel", False)
logger.info(f"[{self.stream_name}] Planner决策: {action_type}, 理由: {reasoning}, 并行执行: {is_parallel}")
self.action_type = action_type # 更新实例属性
self.is_parallel_action = is_parallel # 新增:保存并行执行标志
# 如果规划器决定不执行任何动作
if action_type == "no_action":
logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作")
return no_action
# 执行额外的动作(不影响回复生成)
action_result = await self._execute_action(action_type, action_data, message, thinking_id)
if action_result is not None:
logger.info(f"[{self.stream_name}] 额外动作 {action_type} 执行完成")
else:
logger.warning(f"[{self.stream_name}] 额外动作 {action_type} 执行失败")
return {
"action_type": action_type,
"action_data": action_data,
"reasoning": reasoning,
"is_parallel": is_parallel,
}
except Exception as e:
logger.error(f"[{self.stream_name}] Planner执行失败: {e}")
return no_action
async def reply_one_message(self, message: MessageRecv) -> None:
# 回复前处理
await self.relationship_builder.build_relation()
thinking_id = await self._create_thinking_message(message)
# 如果启用planner预先修改可用actions避免在并行任务中重复调用
@@ -832,87 +577,15 @@ class NormalChat:
logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}")
available_actions = None
# 定义并行执行的任务
async def generate_normal_response():
"""生成普通回复"""
try:
return await self.gpt.generate_response(
message=message,
available_actions=available_actions,
)
except Exception as e:
logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}")
return None
async def plan_and_execute_actions():
"""规划和执行额外动作"""
if not self.enable_planner:
logger.debug(f"[{self.stream_name}] Planner未启用跳过动作规划")
return None
try:
no_action = {
"action_result": {
"action_type": "no_action",
"action_data": {},
"reasoning": "规划器初始化默认",
"is_parallel": True,
},
"chat_context": "",
"action_prompt": "",
}
# 检查是否应该跳过规划
if self.action_modifier.should_skip_planning():
logger.debug(f"[{self.stream_name}] 没有可用动作,跳过规划")
self.action_type = "no_action"
return no_action
# 执行规划
plan_result = await self.planner.plan(message)
action_type = plan_result["action_result"]["action_type"]
action_data = plan_result["action_result"]["action_data"]
reasoning = plan_result["action_result"]["reasoning"]
is_parallel = plan_result["action_result"].get("is_parallel", False)
logger.info(
f"[{self.stream_name}] Planner决策: {action_type}, 理由: {reasoning}, 并行执行: {is_parallel}"
)
self.action_type = action_type # 更新实例属性
self.is_parallel_action = is_parallel # 新增:保存并行执行标志
# 如果规划器决定不执行任何动作
if action_type == "no_action":
logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作")
return no_action
# 执行额外的动作(不影响回复生成)
action_result = await self._execute_action(action_type, action_data, message, thinking_id)
if action_result is not None:
logger.info(f"[{self.stream_name}] 额外动作 {action_type} 执行完成")
else:
logger.warning(f"[{self.stream_name}] 额外动作 {action_type} 执行失败")
return {
"action_type": action_type,
"action_data": action_data,
"reasoning": reasoning,
"is_parallel": is_parallel,
}
except Exception as e:
logger.error(f"[{self.stream_name}] Planner执行失败: {e}")
return no_action
# 并行执行回复生成和动作规划
self.action_type = None # 初始化动作类型
self.is_parallel_action = False # 初始化并行动作标志
gen_task = asyncio.create_task(generate_normal_response())
plan_task = asyncio.create_task(plan_and_execute_actions())
gen_task = asyncio.create_task(self._generate_normal_response(message, available_actions))
plan_task = asyncio.create_task(self._plan_and_execute_actions(message, thinking_id))
try:
gather_timeout = global_config.normal_chat.thinking_timeout
gather_timeout = global_config.chat.thinking_timeout
results = await asyncio.wait_for(
asyncio.gather(gen_task, plan_task, return_exceptions=True),
timeout=gather_timeout,
@@ -922,12 +595,12 @@ class NormalChat:
logger.warning(
f"[{self.stream_name}] 并行执行回复生成和动作规划超时 ({gather_timeout}秒),正在取消相关任务..."
)
print(f"111{self.timeout_count}")
self.timeout_count += 1
if self.timeout_count > 5:
logger.error(
f"[{self.stream_name}] 连续回复超时,{global_config.normal_chat.thinking_timeout}秒 内大模型没有返回有效内容请检查你的api是否速度过慢或配置错误。建议不要使用推理模型推理模型生成速度过慢。"
logger.warning(
f"[{self.stream_name}] 连续回复超时次数过多{global_config.chat.thinking_timeout}秒 内大模型没有返回有效内容请检查你的api是否速度过慢或配置错误。建议不要使用推理模型推理模型生成速度过慢。或者尝试拉高thinking_timeout参数这可能导致回复时间过长。"
)
return False
# 取消未完成的任务
if not gen_task.done():
@@ -969,8 +642,15 @@ class NormalChat:
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
return False
# 提取回复文本
reply_texts = [item[1] for item in response_set if item[0] == "text"]
if not reply_texts:
logger.info(f"[{self.stream_name}] 回复内容中没有文本,不发送消息")
await self._cleanup_thinking_message_by_id(thinking_id)
return False
# 发送回复 (不再需要传入 chat)
first_bot_msg = await self._add_messages_to_manager(message, response_set, thinking_id)
first_bot_msg = await self._add_messages_to_manager(message, reply_texts, thinking_id)
# 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况)
if first_bot_msg:
@@ -1252,100 +932,6 @@ class NormalChat:
"""获取动作管理器实例"""
return self.action_manager
async def _check_relation_building_conditions(self, message: MessageRecv):
"""检查person_engaged_cache中是否有满足关系构建条件的用户"""
# 执行定期清理
self._cleanup_old_segments()
# 更新消息段信息
self._update_user_message_segments(message)
users_to_build_relationship = []
for person_id, segments in list(self.person_engaged_cache.items()):
total_message_count = self._get_total_message_count(person_id)
if total_message_count >= 45:
users_to_build_relationship.append(person_id)
logger.info(
f"[{self.stream_name}] 用户 {person_id} 满足关系构建条件,总消息数:{total_message_count},消息段数:{len(segments)}"
)
elif total_message_count > 0:
# 记录进度信息
logger.debug(
f"[{self.stream_name}] 用户 {person_id} 进度:{total_message_count}/45 条消息,{len(segments)} 个消息段"
)
# 为满足条件的用户构建关系
for person_id in users_to_build_relationship:
segments = self.person_engaged_cache[person_id]
# 异步执行关系构建
asyncio.create_task(self._build_relation_for_person_segments(person_id, segments))
# 移除已处理的用户缓存
del self.person_engaged_cache[person_id]
self._save_cache()
logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建已启动,缓存已清理")
async def _build_relation_for_person_segments(self, person_id: str, segments: List[Dict[str, any]]):
"""基于消息段更新用户印象统一使用focus chat的构建方式"""
if not segments:
return
logger.debug(f"[{self.stream_name}] 开始为 {person_id} 基于 {len(segments)} 个消息段更新印象")
try:
processed_messages = []
for i, segment in enumerate(segments):
start_time = segment["start_time"]
end_time = segment["end_time"]
segment["message_count"]
start_date = time.strftime("%Y-%m-%d %H:%M", time.localtime(start_time))
# 获取该段的消息(包含边界)
segment_messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time)
logger.debug(
f"[{self.stream_name}] 消息段 {i + 1}: {start_date} - {time.strftime('%Y-%m-%d %H:%M', time.localtime(end_time))}, 消息数: {len(segment_messages)}"
)
if segment_messages:
# 如果不是第一个消息段,在消息列表前添加间隔标识
if i > 0:
# 创建一个特殊的间隔消息
gap_message = {
"time": start_time - 0.1, # 稍微早于段开始时间
"user_id": "system",
"user_platform": "system",
"user_nickname": "系统",
"user_cardname": "",
"display_message": f"...(中间省略一些消息){start_date} 之后的消息如下...",
"is_action_record": True,
"chat_info_platform": segment_messages[0].get("chat_info_platform", ""),
"chat_id": self.stream_id,
}
processed_messages.append(gap_message)
# 添加该段的所有消息
processed_messages.extend(segment_messages)
if processed_messages:
# 按时间排序所有消息(包括间隔标识)
processed_messages.sort(key=lambda x: x["time"])
logger.debug(
f"[{self.stream_name}] 为 {person_id} 获取到总共 {len(processed_messages)} 条消息(包含间隔标识)用于印象更新"
)
relationship_manager = get_relationship_manager()
# 调用统一的更新方法
await relationship_manager.update_person_impression(
person_id=person_id, timestamp=time.time(), bot_engaged_messages=processed_messages
)
else:
logger.debug(f"[{self.stream_name}] 没有找到 {person_id} 的消息段对应的消息,不更新印象")
except Exception as e:
logger.error(f"[{self.stream_name}] 为 {person_id} 更新印象时发生错误: {e}")
logger.error(traceback.format_exc())
def _get_fatigue_reply_multiplier(self) -> float:
"""获取疲劳期回复频率调整系数
@@ -1369,7 +955,6 @@ class NormalChat:
except Exception as e:
logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}")
return 1.0 # 出错时返回正常系数
async def _check_should_switch_to_focus(self) -> bool:
"""
检查是否满足切换到focus模式的条件
@@ -1417,3 +1002,4 @@ class NormalChat:
break
except Exception as e:
logger.error(f"[{self.stream_name}] 清理思考消息 {thinking_id} 时出错: {e}")

View File

@@ -5,6 +5,7 @@ from src.chat.utils.chat_message_builder import build_readable_messages, get_raw
from src.config.config import global_config
import random
import time
import asyncio
logger = get_logger("normal_chat_action_modifier")
@@ -184,6 +185,7 @@ class NormalChatActionModifier:
always_actions = {}
random_actions = {}
keyword_actions = {}
llm_judge_actions = {}
for action_name, action_info in actions_with_info.items():
# 使用normal_activation_type
@@ -192,8 +194,10 @@ class NormalChatActionModifier:
# 现在统一是字符串格式的激活类型值
if activation_type == "always":
always_actions[action_name] = action_info
elif activation_type == "random" or activation_type == "llm_judge":
elif activation_type == "random":
random_actions[action_name] = action_info
elif activation_type == "llm_judge":
llm_judge_actions[action_name] = action_info
elif activation_type == "keyword":
keyword_actions[action_name] = action_info
else:
@@ -225,6 +229,24 @@ class NormalChatActionModifier:
keywords = action_info.get("activation_keywords", [])
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词{keywords}")
# 4. 处理LLM_JUDGE类型并行判定
if llm_judge_actions:
# 直接并行处理所有LLM判定actions
llm_results = await self._process_llm_judge_actions_parallel(
llm_judge_actions,
chat_content,
)
# 添加激活的LLM判定actions
for action_name, should_activate in llm_results.items():
if should_activate:
activated_actions[action_name] = llm_judge_actions[action_name]
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: LLM_JUDGE类型判定通过")
else:
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: LLM_JUDGE类型判定未通过")
logger.debug(f"{self.log_prefix}Normal模式激活类型过滤完成: {list(activated_actions.keys())}")
return activated_actions
@@ -277,6 +299,93 @@ class NormalChatActionModifier:
else:
logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}")
return False
async def _process_llm_judge_actions_parallel(
self,
llm_judge_actions: Dict[str, Any],
chat_content: str = "",
) -> Dict[str, bool]:
"""
并行处理LLM判定actions支持智能缓存
Args:
llm_judge_actions: 需要LLM判定的actions
chat_content: 聊天内容
Returns:
Dict[str, bool]: action名称到激活结果的映射
"""
# 生成当前上下文的哈希值
current_context_hash = self._generate_context_hash(chat_content)
current_time = time.time()
results = {}
tasks_to_run = {}
# 检查缓存
for action_name, action_info in llm_judge_actions.items():
cache_key = f"{action_name}_{current_context_hash}"
# 检查是否有有效的缓存
if (
cache_key in self._llm_judge_cache
and current_time - self._llm_judge_cache[cache_key]["timestamp"] < self._cache_expiry_time
):
results[action_name] = self._llm_judge_cache[cache_key]["result"]
logger.debug(
f"{self.log_prefix}使用缓存结果 {action_name}: {'激活' if results[action_name] else '未激活'}"
)
else:
# 需要进行LLM判定
tasks_to_run[action_name] = action_info
# 如果有需要运行的任务,并行执行
if tasks_to_run:
logger.debug(f"{self.log_prefix}并行执行LLM判定任务数: {len(tasks_to_run)}")
# 创建并行任务
tasks = []
task_names = []
for action_name, action_info in tasks_to_run.items():
task = self._llm_judge_action(
action_name,
action_info,
chat_content,
)
tasks.append(task)
task_names.append(action_name)
# 并行执行所有任务
try:
task_results = await asyncio.gather(*tasks, return_exceptions=True)
# 处理结果并更新缓存
for _, (action_name, result) in enumerate(zip(task_names, task_results)):
if isinstance(result, Exception):
logger.error(f"{self.log_prefix}LLM判定action {action_name} 时出错: {result}")
results[action_name] = False
else:
results[action_name] = result
# 更新缓存
cache_key = f"{action_name}_{current_context_hash}"
self._llm_judge_cache[cache_key] = {"result": result, "timestamp": current_time}
logger.debug(f"{self.log_prefix}并行LLM判定完成耗时: {time.time() - current_time:.2f}s")
except Exception as e:
logger.error(f"{self.log_prefix}并行LLM判定失败: {e}")
# 如果并行执行失败为所有任务返回False
for action_name in tasks_to_run.keys():
results[action_name] = False
# 清理过期缓存
self._cleanup_expired_cache(current_time)
return results
def get_available_actions_count(self) -> int:
"""获取当前可用动作数量排除默认的no_action"""

View File

@@ -1,72 +0,0 @@
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.message_receive.message import MessageThinking
from src.common.logger import get_logger
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
from src.chat.utils.utils import process_llm_response
from src.plugin_system.apis import generator_api
from src.chat.focus_chat.memory_activator import MemoryActivator
logger = get_logger("normal_chat_response")
class NormalChatGenerator:
def __init__(self):
model_config_1 = global_config.model.replyer_1.copy()
model_config_2 = global_config.model.replyer_2.copy()
prob_first = global_config.chat.replyer_random_probability
model_config_1["weight"] = prob_first
model_config_2["weight"] = 1.0 - prob_first
self.model_configs = [model_config_1, model_config_2]
self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation")
self.memory_activator = MemoryActivator()
async def generate_response(
self,
message: MessageThinking,
available_actions=None,
):
logger.info(
f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
)
person_id = PersonInfoManager.get_person_id(
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
)
person_info_manager = get_person_info_manager()
person_name = await person_info_manager.get_value(person_id, "person_name")
relation_info = await person_info_manager.get_value(person_id, "short_impression")
reply_to_str = f"{person_name}:{message.processed_plain_text}"
try:
success, reply_set, prompt = await generator_api.generate_reply(
chat_stream=message.chat_stream,
reply_to=reply_to_str,
relation_info=relation_info,
available_actions=available_actions,
enable_tool=global_config.tool.enable_in_normal_chat,
model_configs=self.model_configs,
request_type="normal.replyer",
return_prompt=True,
)
if not success or not reply_set:
logger.info(f"{message.processed_plain_text} 的回复生成失败")
return None
content = " ".join([item[1] for item in reply_set if item[0] == "text"])
logger.debug(f"{message.processed_plain_text} 的回复:{content}")
if content:
logger.info(f"{global_config.bot.nickname}的备选回复是:{content}")
content = process_llm_response(content)
return content
except Exception:
logger.exception("生成回复时出错")
return None

View File

@@ -49,10 +49,8 @@ def init_prompt():
Prompt(
"""
动作:{action_name}
动作描述:{action_description}
使用该动作的场景:
动作描述:{action_description}
{action_require}
输出要求:
{{
"action": "{action_name}",{action_parameters}
}}
@@ -160,8 +158,8 @@ class NormalChatPlanner:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {content}")
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
logger.info(f"{self.log_prefix}规划器模型: {model_name}")
if reasoning_content:
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
# 解析JSON响应
try: