fix - 优化normal_chat代码,采用和focus一致的关系构建,优化log,添加超时检查,允许normal使用llm激活
This commit is contained in:
@@ -5,6 +5,11 @@
|
||||
优化和修复:
|
||||
|
||||
- 修复在auto模式下,私聊会转为normal的bug
|
||||
- 修复一般过滤次序问题
|
||||
- 优化normal_chat代码,采用和focus一致的关系构建
|
||||
- 优化计时信息和Log
|
||||
- 添加回复超时检查
|
||||
- normal的插件允许llm激活
|
||||
|
||||
## [0.8.1] - 2025-7-5
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ def init_prompt() -> None:
|
||||
4. 思考有没有特殊的梗,一并总结成语言风格
|
||||
5. 例子仅供参考,请严格根据群聊内容总结!!!
|
||||
注意:总结成如下格式的规律,总结的内容要详细,但具有概括性:
|
||||
当"xxxxxx"时,可以"xxxxxx", xxxxxx不超过20个字,为特定句式或表达
|
||||
例如:当"AAAAA"时,可以"BBBBB", AAAAA代表某个具体的场景,不超过20个字。BBBBB代表对应的语言风格,特定句式或表达方式,不超过20个字。
|
||||
|
||||
例如:
|
||||
当"对某件事表示十分惊叹,有些意外"时,使用"我嘞个xxxx"
|
||||
@@ -69,7 +69,7 @@ class ExpressionLearner:
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_learn_model: LLMRequest = LLMRequest(
|
||||
model=global_config.model.replyer_1,
|
||||
temperature=0.2,
|
||||
temperature=0.3,
|
||||
request_type="expressor.learner",
|
||||
)
|
||||
self.llm_model = None
|
||||
|
||||
@@ -21,7 +21,7 @@ from src.chat.heart_flow.observation.actions_observation import ActionObservatio
|
||||
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
from src.chat.focus_chat.info_processors.base_processor import BaseProcessor
|
||||
from src.chat.focus_chat.planners.planner_factory import PlannerFactory
|
||||
from src.chat.focus_chat.planners.planner_simple import ActionPlanner
|
||||
from src.chat.focus_chat.planners.modify_actions import ActionModifier
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.config.config import global_config
|
||||
@@ -119,7 +119,7 @@ class HeartFChatting:
|
||||
self._register_default_processors()
|
||||
|
||||
self.action_manager = ActionManager()
|
||||
self.action_planner = PlannerFactory.create_planner(
|
||||
self.action_planner = ActionPlanner(
|
||||
log_prefix=self.log_prefix, action_manager=self.action_manager
|
||||
)
|
||||
self.action_modifier = ActionModifier(action_manager=self.action_manager)
|
||||
@@ -141,6 +141,9 @@ class HeartFChatting:
|
||||
# 存储回调函数
|
||||
self.on_stop_focus_chat = on_stop_focus_chat
|
||||
|
||||
self.reply_timeout_count = 0
|
||||
self.plan_timeout_count = 0
|
||||
|
||||
# 初始化性能记录器
|
||||
# 如果没有指定版本号,则使用全局版本管理器的版本号
|
||||
actual_version = performance_version or get_hfc_version()
|
||||
@@ -382,24 +385,12 @@ class HeartFChatting:
|
||||
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒"
|
||||
timer_strings.append(f"{name}: {formatted_time}")
|
||||
|
||||
# 新增:输出每个处理器的耗时
|
||||
processor_time_costs = self._current_cycle_detail.loop_processor_info.get(
|
||||
"processor_time_costs", {}
|
||||
)
|
||||
processor_time_strings = []
|
||||
for pname, ptime in processor_time_costs.items():
|
||||
formatted_ptime = f"{ptime * 1000:.2f}毫秒" if ptime < 1 else f"{ptime:.2f}秒"
|
||||
processor_time_strings.append(f"{pname}: {formatted_ptime}")
|
||||
processor_time_log = (
|
||||
("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else ""
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考,"
|
||||
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
|
||||
f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
|
||||
f"选择动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
|
||||
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
|
||||
+ processor_time_log
|
||||
)
|
||||
|
||||
# 记录性能数据
|
||||
@@ -410,7 +401,6 @@ class HeartFChatting:
|
||||
"action_type": action_result.get("action_type", "unknown"),
|
||||
"total_time": self._current_cycle_detail.end_time - self._current_cycle_detail.start_time,
|
||||
"step_times": cycle_timers.copy(),
|
||||
"processor_time_costs": processor_time_costs, # 处理器时间
|
||||
"reasoning": action_result.get("reasoning", ""),
|
||||
"success": self._current_cycle_detail.loop_action_info.get("action_taken", False),
|
||||
}
|
||||
@@ -491,13 +481,12 @@ class HeartFChatting:
|
||||
|
||||
processor_tasks = []
|
||||
task_to_name_map = {}
|
||||
processor_time_costs = {} # 新增: 记录每个处理器耗时
|
||||
|
||||
for processor in self.processors:
|
||||
processor_name = processor.__class__.log_prefix
|
||||
|
||||
async def run_with_timeout(proc=processor):
|
||||
return await asyncio.wait_for(proc.process_info(observations=observations), 30)
|
||||
return await proc.process_info(observations=observations)
|
||||
|
||||
task = asyncio.create_task(run_with_timeout())
|
||||
|
||||
@@ -518,39 +507,20 @@ class HeartFChatting:
|
||||
|
||||
try:
|
||||
result_list = await task
|
||||
logger.info(f"{self.log_prefix} 处理器 {processor_name} 已完成!")
|
||||
logger.debug(f"{self.log_prefix} 处理器 {processor_name} 已完成!")
|
||||
if result_list is not None:
|
||||
all_plan_info.extend(result_list)
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 处理器 {processor_name} 返回了 None")
|
||||
# 记录耗时
|
||||
processor_time_costs[processor_name] = duration_since_parallel_start
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(f"{self.log_prefix} 处理器 {processor_name} 超时(>30s),已跳过")
|
||||
processor_time_costs[processor_name] = 30
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
traceback.print_exc()
|
||||
processor_time_costs[processor_name] = duration_since_parallel_start
|
||||
|
||||
if pending_tasks:
|
||||
current_progress_time = time.time()
|
||||
elapsed_for_log = current_progress_time - parallel_start_time
|
||||
pending_names_for_log = [task_to_name_map[t] for t in pending_tasks]
|
||||
logger.info(
|
||||
f"{self.log_prefix} 信息处理已进行 {elapsed_for_log:.2f}秒,待完成任务: {', '.join(pending_names_for_log)}"
|
||||
)
|
||||
|
||||
# 所有任务完成后的最终日志
|
||||
parallel_end_time = time.time()
|
||||
total_duration = parallel_end_time - parallel_start_time
|
||||
logger.info(f"{self.log_prefix} 所有处理器任务全部完成,总耗时: {total_duration:.2f}秒")
|
||||
# logger.debug(f"{self.log_prefix} 所有信息处理器处理后的信息: {all_plan_info}")
|
||||
|
||||
return all_plan_info, processor_time_costs
|
||||
return all_plan_info
|
||||
|
||||
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
|
||||
try:
|
||||
@@ -582,19 +552,16 @@ class HeartFChatting:
|
||||
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
|
||||
# 继续执行,不中断流程
|
||||
|
||||
# 第二步:信息处理器
|
||||
with Timer("信息处理器", cycle_timers):
|
||||
try:
|
||||
all_plan_info, processor_time_costs = await self._process_processors(self.observations)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 信息处理器失败: {e}")
|
||||
# 设置默认值以继续执行
|
||||
all_plan_info = []
|
||||
processor_time_costs = {}
|
||||
|
||||
try:
|
||||
all_plan_info = await self._process_processors(self.observations)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 信息处理器失败: {e}")
|
||||
# 设置默认值以继续执行
|
||||
all_plan_info = []
|
||||
|
||||
loop_processor_info = {
|
||||
"all_plan_info": all_plan_info,
|
||||
"processor_time_costs": processor_time_costs,
|
||||
}
|
||||
|
||||
logger.debug(f"{self.log_prefix} 并行阶段完成,准备进入规划器,plan_info数量: {len(all_plan_info)}")
|
||||
@@ -737,8 +704,15 @@ class HeartFChatting:
|
||||
logger.info(
|
||||
f"{self.log_prefix} [非auto模式] 已发送 {self._message_count} 条消息,达到疲惫阈值 {current_threshold},但非auto模式不会自动退出"
|
||||
)
|
||||
|
||||
logger.debug(f"{self.log_prefix} 麦麦执行了'{action}', 返回结果'{success}', '{reply_text}', '{command}'")
|
||||
else:
|
||||
if reply_text == "timeout":
|
||||
self.reply_timeout_count += 1
|
||||
if self.reply_timeout_count > 5:
|
||||
logger.warning(
|
||||
f"[{self.log_prefix} ] 连续回复超时次数过多,{global_config.chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。或者尝试拉高thinking_timeout参数,这可能导致回复时间过长。"
|
||||
)
|
||||
logger.warning(f"{self.log_prefix} 回复生成超时{global_config.chat.thinking_timeout}s,已跳过")
|
||||
return False, "", ""
|
||||
|
||||
return success, reply_text, command
|
||||
|
||||
|
||||
@@ -117,14 +117,14 @@ class MemoryActivator:
|
||||
|
||||
# 添加新的关键词到缓存
|
||||
self.cached_keywords.update(keywords)
|
||||
logger.info(f"当前激活的记忆关键词: {self.cached_keywords}")
|
||||
|
||||
|
||||
# 调用记忆系统获取相关记忆
|
||||
related_memory = await hippocampus_manager.get_memory_from_topic(
|
||||
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||
)
|
||||
|
||||
logger.info(f"获取到的记忆: {related_memory}")
|
||||
logger.info(f"当前记忆关键词: {self.cached_keywords} 。获取到的记忆: {related_memory}")
|
||||
|
||||
# 激活时,所有已有记忆的duration+1,达到3则移除
|
||||
for m in self.running_memory[:]:
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
from typing import Dict, Type
|
||||
from src.chat.focus_chat.planners.base_planner import BasePlanner
|
||||
from src.chat.focus_chat.planners.planner_simple import ActionPlanner as SimpleActionPlanner
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("planner_factory")
|
||||
|
||||
|
||||
class PlannerFactory:
|
||||
"""规划器工厂类,用于创建不同类型的规划器实例"""
|
||||
|
||||
# 注册所有可用的规划器类型
|
||||
_planner_types: Dict[str, Type[BasePlanner]] = {
|
||||
"simple": SimpleActionPlanner,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def register_planner(cls, name: str, planner_class: Type[BasePlanner]) -> None:
|
||||
"""
|
||||
注册新的规划器类型
|
||||
|
||||
Args:
|
||||
name: 规划器类型名称
|
||||
planner_class: 规划器类
|
||||
"""
|
||||
cls._planner_types[name] = planner_class
|
||||
logger.info(f"注册新的规划器类型: {name}")
|
||||
|
||||
@classmethod
|
||||
def create_planner(cls, log_prefix: str, action_manager: ActionManager) -> BasePlanner:
|
||||
"""
|
||||
创建规划器实例
|
||||
|
||||
Args:
|
||||
log_prefix: 日志前缀
|
||||
action_manager: 动作管理器实例
|
||||
|
||||
Returns:
|
||||
BasePlanner: 规划器实例
|
||||
"""
|
||||
|
||||
planner_class = cls._planner_types["simple"]
|
||||
logger.info(f"{log_prefix} 使用simple规划器")
|
||||
return planner_class(log_prefix=log_prefix, action_manager=action_manager)
|
||||
@@ -58,6 +58,8 @@ def init_prompt():
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
动作:{action_name}
|
||||
动作描述:{action_description}
|
||||
{action_require}
|
||||
{{
|
||||
"action": "{action_name}",{action_parameters}
|
||||
@@ -66,16 +68,6 @@ def init_prompt():
|
||||
"action_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{action_require}
|
||||
{{
|
||||
"action": "{action_name}",{action_parameters}
|
||||
}}
|
||||
""",
|
||||
"action_prompt_private",
|
||||
)
|
||||
|
||||
|
||||
class ActionPlanner(BasePlanner):
|
||||
def __init__(self, log_prefix: str, action_manager: ActionManager):
|
||||
@@ -191,7 +183,8 @@ class ActionPlanner(BasePlanner):
|
||||
|
||||
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
|
||||
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
if reasoning_content:
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
|
||||
except Exception as req_e:
|
||||
logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}")
|
||||
|
||||
@@ -784,12 +784,12 @@ class Hippocampus:
|
||||
|
||||
# 计算激活节点数与总节点数的比值
|
||||
total_activation = sum(activate_map.values())
|
||||
logger.debug(f"总激活值: {total_activation:.2f}")
|
||||
# logger.debug(f"总激活值: {total_activation:.2f}")
|
||||
total_nodes = len(self.memory_graph.G.nodes())
|
||||
# activated_nodes = len(activate_map)
|
||||
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
|
||||
activation_ratio = activation_ratio * 60
|
||||
logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
|
||||
logger.debug(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
|
||||
|
||||
return activation_ratio
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import asyncio
|
||||
import time
|
||||
from random import random
|
||||
from typing import List, Dict, Optional
|
||||
import os
|
||||
import pickle
|
||||
from maim_message import UserInfo, Seg
|
||||
from typing import List, Optional
|
||||
from src.config.config import global_config
|
||||
from src.common.logger import get_logger
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
from src.plugin_system.apis import generator_api
|
||||
from maim_message import UserInfo, Seg
|
||||
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
|
||||
from src.chat.utils.timer_calculator import Timer
|
||||
|
||||
@@ -14,20 +15,10 @@ from ..message_receive.message import MessageSending, MessageRecv, MessageThinki
|
||||
from src.chat.message_receive.message_sender import message_manager
|
||||
from src.chat.normal_chat.willing.willing_manager import get_willing_manager
|
||||
from src.chat.normal_chat.normal_chat_utils import get_recent_message_stats
|
||||
from src.config.config import global_config
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.person_info.person_info import PersonInfoManager
|
||||
from src.person_info.relationship_manager import get_relationship_manager
|
||||
from src.chat.utils.chat_message_builder import (
|
||||
get_raw_msg_by_timestamp_with_chat,
|
||||
get_raw_msg_by_timestamp_with_chat_inclusive,
|
||||
get_raw_msg_before_timestamp_with_chat,
|
||||
num_new_messages_since,
|
||||
)
|
||||
from src.person_info.relationship_builder_manager import relationship_builder_manager
|
||||
from .priority_manager import PriorityManager
|
||||
import traceback
|
||||
|
||||
from .normal_chat_generator import NormalChatGenerator
|
||||
from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner
|
||||
from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier
|
||||
|
||||
@@ -38,15 +29,6 @@ willing_manager = get_willing_manager()
|
||||
|
||||
logger = get_logger("normal_chat")
|
||||
|
||||
# 消息段清理配置
|
||||
SEGMENT_CLEANUP_CONFIG = {
|
||||
"enable_cleanup": True, # 是否启用清理
|
||||
"max_segment_age_days": 7, # 消息段最大保存天数
|
||||
"max_segments_per_user": 10, # 每用户最大消息段数
|
||||
"cleanup_interval_hours": 1, # 清理间隔(小时)
|
||||
}
|
||||
|
||||
|
||||
class NormalChat:
|
||||
"""
|
||||
普通聊天处理类,负责处理非核心对话的聊天逻辑。
|
||||
@@ -71,6 +53,8 @@ class NormalChat:
|
||||
|
||||
self.stream_name = get_chat_manager().get_stream_name(self.stream_id) or self.stream_id
|
||||
|
||||
self.relationship_builder = relationship_builder_manager.get_or_create_builder(self.stream_id)
|
||||
|
||||
# Interest dict
|
||||
self.interest_dict = interest_dict
|
||||
|
||||
@@ -78,9 +62,7 @@ class NormalChat:
|
||||
|
||||
self.willing_amplifier = 1
|
||||
self.start_time = time.time()
|
||||
|
||||
# Other sync initializations
|
||||
self.gpt = NormalChatGenerator()
|
||||
|
||||
self.mood_manager = mood_manager
|
||||
self.start_time = time.time()
|
||||
|
||||
@@ -96,18 +78,6 @@ class NormalChat:
|
||||
self.recent_replies = []
|
||||
self.max_replies_history = 20 # 最多保存最近20条回复记录
|
||||
|
||||
# 新的消息段缓存结构:
|
||||
# {person_id: [{"start_time": float, "end_time": float, "last_msg_time": float, "message_count": int}, ...]}
|
||||
self.person_engaged_cache: Dict[str, List[Dict[str, any]]] = {}
|
||||
|
||||
# 持久化存储文件路径
|
||||
self.cache_file_path = os.path.join("data", "relationship", f"relationship_cache_{self.stream_id}.pkl")
|
||||
|
||||
# 最后处理的消息时间,避免重复处理相同消息
|
||||
self.last_processed_message_time = 0.0
|
||||
|
||||
# 最后清理时间,用于定期清理老消息段
|
||||
self.last_cleanup_time = 0.0
|
||||
|
||||
# 添加回调函数,用于在满足条件时通知切换到focus_chat模式
|
||||
self.on_switch_to_focus_callback = on_switch_to_focus_callback
|
||||
@@ -119,11 +89,6 @@ class NormalChat:
|
||||
|
||||
self.timeout_count = 0
|
||||
|
||||
# 加载持久化的缓存
|
||||
self._load_cache()
|
||||
|
||||
logger.debug(f"[{self.stream_name}] NormalChat 初始化完成 (异步部分)。")
|
||||
|
||||
self.action_type: Optional[str] = None # 当前动作类型
|
||||
self.is_parallel_action: bool = False # 是否是可并行动作
|
||||
|
||||
@@ -151,320 +116,25 @@ class NormalChat:
|
||||
self._priority_chat_task.cancel()
|
||||
logger.info(f"[{self.stream_name}] NormalChat 已停用。")
|
||||
|
||||
# ================================
|
||||
# 缓存管理模块
|
||||
# 负责持久化存储、状态管理、缓存读写
|
||||
# ================================
|
||||
|
||||
def _load_cache(self):
|
||||
"""从文件加载持久化的缓存"""
|
||||
if os.path.exists(self.cache_file_path):
|
||||
try:
|
||||
with open(self.cache_file_path, "rb") as f:
|
||||
cache_data = pickle.load(f)
|
||||
# 新格式:包含额外信息的缓存
|
||||
self.person_engaged_cache = cache_data.get("person_engaged_cache", {})
|
||||
self.last_processed_message_time = cache_data.get("last_processed_message_time", 0.0)
|
||||
self.last_cleanup_time = cache_data.get("last_cleanup_time", 0.0)
|
||||
|
||||
logger.info(
|
||||
f"[{self.stream_name}] 成功加载关系缓存,包含 {len(self.person_engaged_cache)} 个用户,最后处理时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_processed_message_time)) if self.last_processed_message_time > 0 else '未设置'}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] 加载关系缓存失败: {e}")
|
||||
self.person_engaged_cache = {}
|
||||
self.last_processed_message_time = 0.0
|
||||
else:
|
||||
logger.info(f"[{self.stream_name}] 关系缓存文件不存在,使用空缓存")
|
||||
|
||||
def _save_cache(self):
|
||||
"""保存缓存到文件"""
|
||||
try:
|
||||
os.makedirs(os.path.dirname(self.cache_file_path), exist_ok=True)
|
||||
cache_data = {
|
||||
"person_engaged_cache": self.person_engaged_cache,
|
||||
"last_processed_message_time": self.last_processed_message_time,
|
||||
"last_cleanup_time": self.last_cleanup_time,
|
||||
}
|
||||
with open(self.cache_file_path, "wb") as f:
|
||||
pickle.dump(cache_data, f)
|
||||
logger.debug(f"[{self.stream_name}] 成功保存关系缓存")
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] 保存关系缓存失败: {e}")
|
||||
|
||||
# ================================
|
||||
# 消息段管理模块
|
||||
# 负责跟踪用户消息活动、管理消息段、清理过期数据
|
||||
# ================================
|
||||
|
||||
def _update_message_segments(self, person_id: str, message_time: float):
|
||||
"""更新用户的消息段
|
||||
|
||||
Args:
|
||||
person_id: 用户ID
|
||||
message_time: 消息时间戳
|
||||
"""
|
||||
if person_id not in self.person_engaged_cache:
|
||||
self.person_engaged_cache[person_id] = []
|
||||
|
||||
segments = self.person_engaged_cache[person_id]
|
||||
current_time = time.time()
|
||||
|
||||
# 获取该消息前5条消息的时间作为潜在的开始时间
|
||||
before_messages = get_raw_msg_before_timestamp_with_chat(self.stream_id, message_time, limit=5)
|
||||
if before_messages:
|
||||
# 由于get_raw_msg_before_timestamp_with_chat返回按时间升序排序的消息,最后一个是最接近message_time的
|
||||
# 我们需要第一个消息作为开始时间,但应该确保至少包含5条消息或该用户之前的消息
|
||||
potential_start_time = before_messages[0]["time"]
|
||||
else:
|
||||
# 如果没有前面的消息,就从当前消息开始
|
||||
potential_start_time = message_time
|
||||
|
||||
# 如果没有现有消息段,创建新的
|
||||
if not segments:
|
||||
new_segment = {
|
||||
"start_time": potential_start_time,
|
||||
"end_time": message_time,
|
||||
"last_msg_time": message_time,
|
||||
"message_count": self._count_messages_in_timerange(potential_start_time, message_time),
|
||||
}
|
||||
segments.append(new_segment)
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 为用户 {person_id} 创建新消息段: 时间范围 {time.strftime('%H:%M:%S', time.localtime(potential_start_time))} - {time.strftime('%H:%M:%S', time.localtime(message_time))}, 消息数: {new_segment['message_count']}"
|
||||
)
|
||||
self._save_cache()
|
||||
return
|
||||
|
||||
# 获取最后一个消息段
|
||||
last_segment = segments[-1]
|
||||
|
||||
# 计算从最后一条消息到当前消息之间的消息数量(不包含边界)
|
||||
messages_between = self._count_messages_between(last_segment["last_msg_time"], message_time)
|
||||
|
||||
if messages_between <= 10:
|
||||
# 在10条消息内,延伸当前消息段
|
||||
last_segment["end_time"] = message_time
|
||||
last_segment["last_msg_time"] = message_time
|
||||
# 重新计算整个消息段的消息数量
|
||||
last_segment["message_count"] = self._count_messages_in_timerange(
|
||||
last_segment["start_time"], last_segment["end_time"]
|
||||
)
|
||||
logger.debug(f"[{self.stream_name}] 延伸用户 {person_id} 的消息段: {last_segment}")
|
||||
else:
|
||||
# 超过10条消息,结束当前消息段并创建新的
|
||||
# 结束当前消息段:延伸到原消息段最后一条消息后5条消息的时间
|
||||
after_messages = get_raw_msg_by_timestamp_with_chat(
|
||||
self.stream_id, last_segment["last_msg_time"], current_time, limit=5, limit_mode="earliest"
|
||||
)
|
||||
if after_messages and len(after_messages) >= 5:
|
||||
# 如果有足够的后续消息,使用第5条消息的时间作为结束时间
|
||||
last_segment["end_time"] = after_messages[4]["time"]
|
||||
else:
|
||||
# 如果没有足够的后续消息,保持原有的结束时间
|
||||
pass
|
||||
|
||||
# 重新计算当前消息段的消息数量
|
||||
last_segment["message_count"] = self._count_messages_in_timerange(
|
||||
last_segment["start_time"], last_segment["end_time"]
|
||||
)
|
||||
|
||||
# 创建新的消息段
|
||||
new_segment = {
|
||||
"start_time": potential_start_time,
|
||||
"end_time": message_time,
|
||||
"last_msg_time": message_time,
|
||||
"message_count": self._count_messages_in_timerange(potential_start_time, message_time),
|
||||
}
|
||||
segments.append(new_segment)
|
||||
logger.debug(f"[{self.stream_name}] 为用户 {person_id} 创建新消息段(超过10条消息间隔): {new_segment}")
|
||||
|
||||
self._save_cache()
|
||||
|
||||
def _count_messages_in_timerange(self, start_time: float, end_time: float) -> int:
|
||||
"""计算指定时间范围内的消息数量(包含边界)"""
|
||||
messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time)
|
||||
return len(messages)
|
||||
|
||||
def _count_messages_between(self, start_time: float, end_time: float) -> int:
|
||||
"""计算两个时间点之间的消息数量(不包含边界),用于间隔检查"""
|
||||
return num_new_messages_since(self.stream_id, start_time, end_time)
|
||||
|
||||
def _get_total_message_count(self, person_id: str) -> int:
|
||||
"""获取用户所有消息段的总消息数量"""
|
||||
if person_id not in self.person_engaged_cache:
|
||||
return 0
|
||||
|
||||
total_count = 0
|
||||
for segment in self.person_engaged_cache[person_id]:
|
||||
total_count += segment["message_count"]
|
||||
|
||||
return total_count
|
||||
|
||||
def _cleanup_old_segments(self) -> bool:
|
||||
"""清理老旧的消息段
|
||||
|
||||
Returns:
|
||||
bool: 是否执行了清理操作
|
||||
"""
|
||||
if not SEGMENT_CLEANUP_CONFIG["enable_cleanup"]:
|
||||
return False
|
||||
|
||||
current_time = time.time()
|
||||
|
||||
# 检查是否需要执行清理(基于时间间隔)
|
||||
cleanup_interval_seconds = SEGMENT_CLEANUP_CONFIG["cleanup_interval_hours"] * 3600
|
||||
if current_time - self.last_cleanup_time < cleanup_interval_seconds:
|
||||
return False
|
||||
|
||||
logger.info(f"[{self.stream_name}] 开始执行老消息段清理...")
|
||||
|
||||
cleanup_stats = {
|
||||
"users_cleaned": 0,
|
||||
"segments_removed": 0,
|
||||
"total_segments_before": 0,
|
||||
"total_segments_after": 0,
|
||||
}
|
||||
|
||||
max_age_seconds = SEGMENT_CLEANUP_CONFIG["max_segment_age_days"] * 24 * 3600
|
||||
max_segments_per_user = SEGMENT_CLEANUP_CONFIG["max_segments_per_user"]
|
||||
|
||||
users_to_remove = []
|
||||
|
||||
for person_id, segments in self.person_engaged_cache.items():
|
||||
cleanup_stats["total_segments_before"] += len(segments)
|
||||
original_segment_count = len(segments)
|
||||
|
||||
# 1. 按时间清理:移除过期的消息段
|
||||
segments_after_age_cleanup = []
|
||||
for segment in segments:
|
||||
segment_age = current_time - segment["end_time"]
|
||||
if segment_age <= max_age_seconds:
|
||||
segments_after_age_cleanup.append(segment)
|
||||
else:
|
||||
cleanup_stats["segments_removed"] += 1
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 移除用户 {person_id} 的过期消息段: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(segment['start_time']))} - {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(segment['end_time']))}"
|
||||
)
|
||||
|
||||
# 2. 按数量清理:如果消息段数量仍然过多,保留最新的
|
||||
if len(segments_after_age_cleanup) > max_segments_per_user:
|
||||
# 按end_time排序,保留最新的
|
||||
segments_after_age_cleanup.sort(key=lambda x: x["end_time"], reverse=True)
|
||||
segments_removed_count = len(segments_after_age_cleanup) - max_segments_per_user
|
||||
cleanup_stats["segments_removed"] += segments_removed_count
|
||||
segments_after_age_cleanup = segments_after_age_cleanup[:max_segments_per_user]
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 用户 {person_id} 消息段数量过多,移除 {segments_removed_count} 个最老的消息段"
|
||||
)
|
||||
|
||||
# 使用清理后的消息段
|
||||
|
||||
# 更新缓存
|
||||
if len(segments_after_age_cleanup) == 0:
|
||||
# 如果没有剩余消息段,标记用户为待移除
|
||||
users_to_remove.append(person_id)
|
||||
else:
|
||||
self.person_engaged_cache[person_id] = segments_after_age_cleanup
|
||||
cleanup_stats["total_segments_after"] += len(segments_after_age_cleanup)
|
||||
|
||||
if original_segment_count != len(segments_after_age_cleanup):
|
||||
cleanup_stats["users_cleaned"] += 1
|
||||
|
||||
# 移除没有消息段的用户
|
||||
for person_id in users_to_remove:
|
||||
del self.person_engaged_cache[person_id]
|
||||
logger.debug(f"[{self.stream_name}] 移除用户 {person_id}:没有剩余消息段")
|
||||
|
||||
# 更新最后清理时间
|
||||
self.last_cleanup_time = current_time
|
||||
|
||||
# 保存缓存
|
||||
if cleanup_stats["segments_removed"] > 0 or len(users_to_remove) > 0:
|
||||
self._save_cache()
|
||||
logger.info(
|
||||
f"[{self.stream_name}] 清理完成 - 影响用户: {cleanup_stats['users_cleaned']}, 移除消息段: {cleanup_stats['segments_removed']}, 移除用户: {len(users_to_remove)}"
|
||||
)
|
||||
logger.info(
|
||||
f"[{self.stream_name}] 消息段统计 - 清理前: {cleanup_stats['total_segments_before']}, 清理后: {cleanup_stats['total_segments_after']}"
|
||||
)
|
||||
else:
|
||||
logger.debug(f"[{self.stream_name}] 清理完成 - 无需清理任何内容")
|
||||
|
||||
return cleanup_stats["segments_removed"] > 0 or len(users_to_remove) > 0
|
||||
|
||||
def get_cache_status(self) -> str:
|
||||
"""获取缓存状态信息,用于调试和监控"""
|
||||
if not self.person_engaged_cache:
|
||||
return f"[{self.stream_name}] 关系缓存为空"
|
||||
|
||||
status_lines = [f"[{self.stream_name}] 关系缓存状态:"]
|
||||
status_lines.append(
|
||||
f"最后处理消息时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_processed_message_time)) if self.last_processed_message_time > 0 else '未设置'}"
|
||||
)
|
||||
status_lines.append(
|
||||
f"最后清理时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.last_cleanup_time)) if self.last_cleanup_time > 0 else '未执行'}"
|
||||
)
|
||||
status_lines.append(f"总用户数:{len(self.person_engaged_cache)}")
|
||||
status_lines.append(
|
||||
f"清理配置:{'启用' if SEGMENT_CLEANUP_CONFIG['enable_cleanup'] else '禁用'} (最大保存{SEGMENT_CLEANUP_CONFIG['max_segment_age_days']}天, 每用户最多{SEGMENT_CLEANUP_CONFIG['max_segments_per_user']}段)"
|
||||
)
|
||||
status_lines.append("")
|
||||
|
||||
for person_id, segments in self.person_engaged_cache.items():
|
||||
total_count = self._get_total_message_count(person_id)
|
||||
status_lines.append(f"用户 {person_id}:")
|
||||
status_lines.append(f" 总消息数:{total_count} ({total_count}/45)")
|
||||
status_lines.append(f" 消息段数:{len(segments)}")
|
||||
|
||||
for i, segment in enumerate(segments):
|
||||
start_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["start_time"]))
|
||||
end_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["end_time"]))
|
||||
last_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(segment["last_msg_time"]))
|
||||
status_lines.append(
|
||||
f" 段{i + 1}: {start_str} -> {end_str} (最后消息: {last_str}, 消息数: {segment['message_count']})"
|
||||
)
|
||||
status_lines.append("")
|
||||
|
||||
return "\n".join(status_lines)
|
||||
|
||||
def _update_user_message_segments(self, message: MessageRecv):
|
||||
"""更新用户消息段信息"""
|
||||
time.time()
|
||||
user_id = message.message_info.user_info.user_id
|
||||
platform = message.message_info.platform
|
||||
msg_time = message.message_info.time
|
||||
|
||||
# 跳过机器人自己的消息
|
||||
if user_id == global_config.bot.qq_account:
|
||||
return
|
||||
|
||||
# 只处理新消息(避免重复处理)
|
||||
if msg_time <= self.last_processed_message_time:
|
||||
return
|
||||
|
||||
person_id = PersonInfoManager.get_person_id(platform, user_id)
|
||||
self._update_message_segments(person_id, msg_time)
|
||||
|
||||
# 更新最后处理时间
|
||||
self.last_processed_message_time = max(self.last_processed_message_time, msg_time)
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 更新用户 {person_id} 的消息段,消息时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg_time))}"
|
||||
)
|
||||
|
||||
async def _priority_chat_loop_add_message(self):
|
||||
while not self._disabled:
|
||||
try:
|
||||
ids = list(self.interest_dict.keys())
|
||||
for msg_id in ids:
|
||||
message, interest_value, _ = self.interest_dict[msg_id]
|
||||
# 创建字典条目的副本以避免在迭代时发生修改
|
||||
items_to_process = list(self.interest_dict.items())
|
||||
for msg_id, value in items_to_process:
|
||||
# 尝试从原始字典中弹出条目,如果它已被其他任务处理,则跳过
|
||||
if self.interest_dict.pop(msg_id, None) is None:
|
||||
continue # 条目已被其他任务处理
|
||||
|
||||
message, interest_value, _ = value
|
||||
if not self._disabled:
|
||||
# 更新消息段信息
|
||||
self._update_user_message_segments(message)
|
||||
# self._update_user_message_segments(message)
|
||||
|
||||
# 添加消息到优先级管理器
|
||||
if self.priority_manager:
|
||||
self.priority_manager.add_message(message, interest_value)
|
||||
self.interest_dict.pop(msg_id, None)
|
||||
|
||||
except Exception:
|
||||
logger.error(
|
||||
f"[{self.stream_name}] 优先级聊天循环添加消息时出现错误: {traceback.format_exc()}", exc_info=True
|
||||
@@ -489,9 +159,6 @@ class NormalChat:
|
||||
f"[{self.stream_name}] 从队列中取出消息进行处理: User {message.message_info.user_info.user_id}, Time: {time.strftime('%H:%M:%S', time.localtime(message.message_info.time))}"
|
||||
)
|
||||
|
||||
# 检查是否有用户满足关系构建条件
|
||||
asyncio.create_task(self._check_relation_building_conditions(message))
|
||||
|
||||
do_reply = await self.reply_one_message(message)
|
||||
response_set = do_reply if do_reply else []
|
||||
factor = 0.5
|
||||
@@ -708,19 +375,12 @@ class NormalChat:
|
||||
async def normal_response(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None:
|
||||
"""
|
||||
处理接收到的消息。
|
||||
根据回复模式,决定是立即处理还是放入优先级队列。
|
||||
在"兴趣"模式下,判断是否回复并生成内容。
|
||||
"""
|
||||
if self._disabled:
|
||||
return
|
||||
|
||||
# 根据回复模式决定行为
|
||||
if self.reply_mode == "priority":
|
||||
# 优先模式下,所有消息都进入管理器
|
||||
if self.priority_manager:
|
||||
self.priority_manager.add_message(message)
|
||||
return
|
||||
|
||||
# 新增:在auto模式下检查是否需要直接切换到focus模式
|
||||
# 新增:在auto模式下检查是否需要直接切换到focus模式
|
||||
if global_config.chat.chat_mode == "auto":
|
||||
if await self._check_should_switch_to_focus():
|
||||
logger.info(f"[{self.stream_name}] 检测到切换到focus聊天模式的条件,尝试执行切换")
|
||||
@@ -734,19 +394,7 @@ class NormalChat:
|
||||
else:
|
||||
logger.warning(f"[{self.stream_name}] 没有设置切换到focus聊天模式的回调函数,无法执行切换")
|
||||
|
||||
# --- 以下为原有的 "兴趣" 模式逻辑 ---
|
||||
await self._process_message(message, is_mentioned, interested_rate)
|
||||
|
||||
async def _process_message(self, message: MessageRecv, is_mentioned: bool, interested_rate: float) -> None:
|
||||
"""
|
||||
实际处理单条消息的逻辑,包括意愿判断、回复生成、动作执行等。
|
||||
"""
|
||||
if self._disabled:
|
||||
return
|
||||
|
||||
# 检查是否有用户满足关系构建条件
|
||||
asyncio.create_task(self._check_relation_building_conditions(message))
|
||||
|
||||
# --- 以下为 "兴趣" 模式逻辑 (从 _process_message 合并而来) ---
|
||||
timing_results = {}
|
||||
reply_probability = (
|
||||
1.0 if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply else 0.0
|
||||
@@ -804,7 +452,7 @@ class NormalChat:
|
||||
if do_reply and response_set: # 确保 response_set 不是 None
|
||||
timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()])
|
||||
trigger_msg = message.processed_plain_text
|
||||
response_msg = " ".join(response_set)
|
||||
response_msg = " ".join([item[1] for item in response_set if item[0] == "text"])
|
||||
logger.info(
|
||||
f"[{self.stream_name}]回复消息: {trigger_msg[:30]}... | 回复内容: {response_msg[:30]}... | 计时: {timing_str}"
|
||||
)
|
||||
@@ -816,8 +464,105 @@ class NormalChat:
|
||||
# 意愿管理器:注销当前message信息 (无论是否回复,只要处理过就删除)
|
||||
willing_manager.delete(message.message_info.message_id)
|
||||
|
||||
async def _generate_normal_response(
|
||||
self, message: MessageRecv, available_actions: Optional[list]
|
||||
) -> Optional[list]:
|
||||
"""生成普通回复"""
|
||||
try:
|
||||
logger.info(
|
||||
f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||
)
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_id = person_info_manager.get_person_id(
|
||||
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
|
||||
)
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
reply_to_str = f"{person_name}:{message.processed_plain_text}"
|
||||
|
||||
success, reply_set = await generator_api.generate_reply(
|
||||
chat_stream=message.chat_stream,
|
||||
reply_to=reply_to_str,
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||
request_type="normal.replyer",
|
||||
)
|
||||
|
||||
if not success or not reply_set:
|
||||
logger.info(f"对 {message.processed_plain_text} 的回复生成失败")
|
||||
return None
|
||||
|
||||
content = " ".join([item[1] for item in reply_set if item[0] == "text"])
|
||||
if content:
|
||||
logger.info(f"{global_config.bot.nickname}的备选回复是:{content}")
|
||||
|
||||
return reply_set
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}")
|
||||
return None
|
||||
|
||||
async def _plan_and_execute_actions(self, message: MessageRecv, thinking_id: str) -> Optional[dict]:
|
||||
"""规划和执行额外动作"""
|
||||
no_action = {
|
||||
"action_result": {
|
||||
"action_type": "no_action",
|
||||
"action_data": {},
|
||||
"reasoning": "规划器初始化默认",
|
||||
"is_parallel": True,
|
||||
},
|
||||
"chat_context": "",
|
||||
"action_prompt": "",
|
||||
}
|
||||
|
||||
if not self.enable_planner:
|
||||
logger.debug(f"[{self.stream_name}] Planner未启用,跳过动作规划")
|
||||
return no_action
|
||||
|
||||
try:
|
||||
# 检查是否应该跳过规划
|
||||
if self.action_modifier.should_skip_planning():
|
||||
logger.debug(f"[{self.stream_name}] 没有可用动作,跳过规划")
|
||||
self.action_type = "no_action"
|
||||
return no_action
|
||||
|
||||
# 执行规划
|
||||
plan_result = await self.planner.plan(message)
|
||||
action_type = plan_result["action_result"]["action_type"]
|
||||
action_data = plan_result["action_result"]["action_data"]
|
||||
reasoning = plan_result["action_result"]["reasoning"]
|
||||
is_parallel = plan_result["action_result"].get("is_parallel", False)
|
||||
|
||||
logger.info(f"[{self.stream_name}] Planner决策: {action_type}, 理由: {reasoning}, 并行执行: {is_parallel}")
|
||||
self.action_type = action_type # 更新实例属性
|
||||
self.is_parallel_action = is_parallel # 新增:保存并行执行标志
|
||||
|
||||
# 如果规划器决定不执行任何动作
|
||||
if action_type == "no_action":
|
||||
logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作")
|
||||
return no_action
|
||||
|
||||
# 执行额外的动作(不影响回复生成)
|
||||
action_result = await self._execute_action(action_type, action_data, message, thinking_id)
|
||||
if action_result is not None:
|
||||
logger.info(f"[{self.stream_name}] 额外动作 {action_type} 执行完成")
|
||||
else:
|
||||
logger.warning(f"[{self.stream_name}] 额外动作 {action_type} 执行失败")
|
||||
|
||||
return {
|
||||
"action_type": action_type,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"is_parallel": is_parallel,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] Planner执行失败: {e}")
|
||||
return no_action
|
||||
|
||||
async def reply_one_message(self, message: MessageRecv) -> None:
|
||||
# 回复前处理
|
||||
await self.relationship_builder.build_relation()
|
||||
|
||||
thinking_id = await self._create_thinking_message(message)
|
||||
|
||||
# 如果启用planner,预先修改可用actions(避免在并行任务中重复调用)
|
||||
@@ -832,87 +577,15 @@ class NormalChat:
|
||||
logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}")
|
||||
available_actions = None
|
||||
|
||||
# 定义并行执行的任务
|
||||
async def generate_normal_response():
|
||||
"""生成普通回复"""
|
||||
try:
|
||||
return await self.gpt.generate_response(
|
||||
message=message,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] 回复生成出现错误:{str(e)} {traceback.format_exc()}")
|
||||
return None
|
||||
|
||||
async def plan_and_execute_actions():
|
||||
"""规划和执行额外动作"""
|
||||
if not self.enable_planner:
|
||||
logger.debug(f"[{self.stream_name}] Planner未启用,跳过动作规划")
|
||||
return None
|
||||
|
||||
try:
|
||||
no_action = {
|
||||
"action_result": {
|
||||
"action_type": "no_action",
|
||||
"action_data": {},
|
||||
"reasoning": "规划器初始化默认",
|
||||
"is_parallel": True,
|
||||
},
|
||||
"chat_context": "",
|
||||
"action_prompt": "",
|
||||
}
|
||||
|
||||
# 检查是否应该跳过规划
|
||||
if self.action_modifier.should_skip_planning():
|
||||
logger.debug(f"[{self.stream_name}] 没有可用动作,跳过规划")
|
||||
self.action_type = "no_action"
|
||||
return no_action
|
||||
|
||||
# 执行规划
|
||||
plan_result = await self.planner.plan(message)
|
||||
action_type = plan_result["action_result"]["action_type"]
|
||||
action_data = plan_result["action_result"]["action_data"]
|
||||
reasoning = plan_result["action_result"]["reasoning"]
|
||||
is_parallel = plan_result["action_result"].get("is_parallel", False)
|
||||
|
||||
logger.info(
|
||||
f"[{self.stream_name}] Planner决策: {action_type}, 理由: {reasoning}, 并行执行: {is_parallel}"
|
||||
)
|
||||
self.action_type = action_type # 更新实例属性
|
||||
self.is_parallel_action = is_parallel # 新增:保存并行执行标志
|
||||
|
||||
# 如果规划器决定不执行任何动作
|
||||
if action_type == "no_action":
|
||||
logger.debug(f"[{self.stream_name}] Planner决定不执行任何额外动作")
|
||||
return no_action
|
||||
|
||||
# 执行额外的动作(不影响回复生成)
|
||||
action_result = await self._execute_action(action_type, action_data, message, thinking_id)
|
||||
if action_result is not None:
|
||||
logger.info(f"[{self.stream_name}] 额外动作 {action_type} 执行完成")
|
||||
else:
|
||||
logger.warning(f"[{self.stream_name}] 额外动作 {action_type} 执行失败")
|
||||
|
||||
return {
|
||||
"action_type": action_type,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"is_parallel": is_parallel,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] Planner执行失败: {e}")
|
||||
return no_action
|
||||
|
||||
# 并行执行回复生成和动作规划
|
||||
self.action_type = None # 初始化动作类型
|
||||
self.is_parallel_action = False # 初始化并行动作标志
|
||||
|
||||
gen_task = asyncio.create_task(generate_normal_response())
|
||||
plan_task = asyncio.create_task(plan_and_execute_actions())
|
||||
gen_task = asyncio.create_task(self._generate_normal_response(message, available_actions))
|
||||
plan_task = asyncio.create_task(self._plan_and_execute_actions(message, thinking_id))
|
||||
|
||||
try:
|
||||
gather_timeout = global_config.normal_chat.thinking_timeout
|
||||
gather_timeout = global_config.chat.thinking_timeout
|
||||
results = await asyncio.wait_for(
|
||||
asyncio.gather(gen_task, plan_task, return_exceptions=True),
|
||||
timeout=gather_timeout,
|
||||
@@ -922,12 +595,12 @@ class NormalChat:
|
||||
logger.warning(
|
||||
f"[{self.stream_name}] 并行执行回复生成和动作规划超时 ({gather_timeout}秒),正在取消相关任务..."
|
||||
)
|
||||
print(f"111{self.timeout_count}")
|
||||
self.timeout_count += 1
|
||||
if self.timeout_count > 5:
|
||||
logger.error(
|
||||
f"[{self.stream_name}] 连续回复超时,{global_config.normal_chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。"
|
||||
logger.warning(
|
||||
f"[{self.stream_name}] 连续回复超时次数过多,{global_config.chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。或者尝试拉高thinking_timeout参数,这可能导致回复时间过长。"
|
||||
)
|
||||
return False
|
||||
|
||||
# 取消未完成的任务
|
||||
if not gen_task.done():
|
||||
@@ -969,8 +642,15 @@ class NormalChat:
|
||||
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
|
||||
return False
|
||||
|
||||
# 提取回复文本
|
||||
reply_texts = [item[1] for item in response_set if item[0] == "text"]
|
||||
if not reply_texts:
|
||||
logger.info(f"[{self.stream_name}] 回复内容中没有文本,不发送消息")
|
||||
await self._cleanup_thinking_message_by_id(thinking_id)
|
||||
return False
|
||||
|
||||
# 发送回复 (不再需要传入 chat)
|
||||
first_bot_msg = await self._add_messages_to_manager(message, response_set, thinking_id)
|
||||
first_bot_msg = await self._add_messages_to_manager(message, reply_texts, thinking_id)
|
||||
|
||||
# 检查 first_bot_msg 是否为 None (例如思考消息已被移除的情况)
|
||||
if first_bot_msg:
|
||||
@@ -1252,100 +932,6 @@ class NormalChat:
|
||||
"""获取动作管理器实例"""
|
||||
return self.action_manager
|
||||
|
||||
async def _check_relation_building_conditions(self, message: MessageRecv):
|
||||
"""检查person_engaged_cache中是否有满足关系构建条件的用户"""
|
||||
# 执行定期清理
|
||||
self._cleanup_old_segments()
|
||||
|
||||
# 更新消息段信息
|
||||
self._update_user_message_segments(message)
|
||||
|
||||
users_to_build_relationship = []
|
||||
|
||||
for person_id, segments in list(self.person_engaged_cache.items()):
|
||||
total_message_count = self._get_total_message_count(person_id)
|
||||
if total_message_count >= 45:
|
||||
users_to_build_relationship.append(person_id)
|
||||
logger.info(
|
||||
f"[{self.stream_name}] 用户 {person_id} 满足关系构建条件,总消息数:{total_message_count},消息段数:{len(segments)}"
|
||||
)
|
||||
elif total_message_count > 0:
|
||||
# 记录进度信息
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 用户 {person_id} 进度:{total_message_count}/45 条消息,{len(segments)} 个消息段"
|
||||
)
|
||||
|
||||
# 为满足条件的用户构建关系
|
||||
for person_id in users_to_build_relationship:
|
||||
segments = self.person_engaged_cache[person_id]
|
||||
# 异步执行关系构建
|
||||
asyncio.create_task(self._build_relation_for_person_segments(person_id, segments))
|
||||
# 移除已处理的用户缓存
|
||||
del self.person_engaged_cache[person_id]
|
||||
self._save_cache()
|
||||
logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建已启动,缓存已清理")
|
||||
|
||||
async def _build_relation_for_person_segments(self, person_id: str, segments: List[Dict[str, any]]):
|
||||
"""基于消息段更新用户印象,统一使用focus chat的构建方式"""
|
||||
if not segments:
|
||||
return
|
||||
|
||||
logger.debug(f"[{self.stream_name}] 开始为 {person_id} 基于 {len(segments)} 个消息段更新印象")
|
||||
try:
|
||||
processed_messages = []
|
||||
|
||||
for i, segment in enumerate(segments):
|
||||
start_time = segment["start_time"]
|
||||
end_time = segment["end_time"]
|
||||
segment["message_count"]
|
||||
start_date = time.strftime("%Y-%m-%d %H:%M", time.localtime(start_time))
|
||||
|
||||
# 获取该段的消息(包含边界)
|
||||
segment_messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.stream_id, start_time, end_time)
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 消息段 {i + 1}: {start_date} - {time.strftime('%Y-%m-%d %H:%M', time.localtime(end_time))}, 消息数: {len(segment_messages)}"
|
||||
)
|
||||
|
||||
if segment_messages:
|
||||
# 如果不是第一个消息段,在消息列表前添加间隔标识
|
||||
if i > 0:
|
||||
# 创建一个特殊的间隔消息
|
||||
gap_message = {
|
||||
"time": start_time - 0.1, # 稍微早于段开始时间
|
||||
"user_id": "system",
|
||||
"user_platform": "system",
|
||||
"user_nickname": "系统",
|
||||
"user_cardname": "",
|
||||
"display_message": f"...(中间省略一些消息){start_date} 之后的消息如下...",
|
||||
"is_action_record": True,
|
||||
"chat_info_platform": segment_messages[0].get("chat_info_platform", ""),
|
||||
"chat_id": self.stream_id,
|
||||
}
|
||||
processed_messages.append(gap_message)
|
||||
|
||||
# 添加该段的所有消息
|
||||
processed_messages.extend(segment_messages)
|
||||
|
||||
if processed_messages:
|
||||
# 按时间排序所有消息(包括间隔标识)
|
||||
processed_messages.sort(key=lambda x: x["time"])
|
||||
|
||||
logger.debug(
|
||||
f"[{self.stream_name}] 为 {person_id} 获取到总共 {len(processed_messages)} 条消息(包含间隔标识)用于印象更新"
|
||||
)
|
||||
relationship_manager = get_relationship_manager()
|
||||
|
||||
# 调用统一的更新方法
|
||||
await relationship_manager.update_person_impression(
|
||||
person_id=person_id, timestamp=time.time(), bot_engaged_messages=processed_messages
|
||||
)
|
||||
else:
|
||||
logger.debug(f"[{self.stream_name}] 没有找到 {person_id} 的消息段对应的消息,不更新印象")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] 为 {person_id} 更新印象时发生错误: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
def _get_fatigue_reply_multiplier(self) -> float:
|
||||
"""获取疲劳期回复频率调整系数
|
||||
|
||||
@@ -1369,7 +955,6 @@ class NormalChat:
|
||||
except Exception as e:
|
||||
logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}")
|
||||
return 1.0 # 出错时返回正常系数
|
||||
|
||||
async def _check_should_switch_to_focus(self) -> bool:
|
||||
"""
|
||||
检查是否满足切换到focus模式的条件
|
||||
@@ -1417,3 +1002,4 @@ class NormalChat:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.stream_name}] 清理思考消息 {thinking_id} 时出错: {e}")
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ from src.chat.utils.chat_message_builder import build_readable_messages, get_raw
|
||||
from src.config.config import global_config
|
||||
import random
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
logger = get_logger("normal_chat_action_modifier")
|
||||
|
||||
@@ -184,6 +185,7 @@ class NormalChatActionModifier:
|
||||
always_actions = {}
|
||||
random_actions = {}
|
||||
keyword_actions = {}
|
||||
llm_judge_actions = {}
|
||||
|
||||
for action_name, action_info in actions_with_info.items():
|
||||
# 使用normal_activation_type
|
||||
@@ -192,8 +194,10 @@ class NormalChatActionModifier:
|
||||
# 现在统一是字符串格式的激活类型值
|
||||
if activation_type == "always":
|
||||
always_actions[action_name] = action_info
|
||||
elif activation_type == "random" or activation_type == "llm_judge":
|
||||
elif activation_type == "random":
|
||||
random_actions[action_name] = action_info
|
||||
elif activation_type == "llm_judge":
|
||||
llm_judge_actions[action_name] = action_info
|
||||
elif activation_type == "keyword":
|
||||
keyword_actions[action_name] = action_info
|
||||
else:
|
||||
@@ -225,6 +229,24 @@ class NormalChatActionModifier:
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词({keywords})")
|
||||
|
||||
# 4. 处理LLM_JUDGE类型(并行判定)
|
||||
if llm_judge_actions:
|
||||
# 直接并行处理所有LLM判定actions
|
||||
llm_results = await self._process_llm_judge_actions_parallel(
|
||||
llm_judge_actions,
|
||||
chat_content,
|
||||
)
|
||||
|
||||
# 添加激活的LLM判定actions
|
||||
for action_name, should_activate in llm_results.items():
|
||||
if should_activate:
|
||||
activated_actions[action_name] = llm_judge_actions[action_name]
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: LLM_JUDGE类型判定通过")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: LLM_JUDGE类型判定未通过")
|
||||
|
||||
|
||||
|
||||
logger.debug(f"{self.log_prefix}Normal模式激活类型过滤完成: {list(activated_actions.keys())}")
|
||||
return activated_actions
|
||||
|
||||
@@ -277,6 +299,93 @@ class NormalChatActionModifier:
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}")
|
||||
return False
|
||||
|
||||
|
||||
async def _process_llm_judge_actions_parallel(
|
||||
self,
|
||||
llm_judge_actions: Dict[str, Any],
|
||||
chat_content: str = "",
|
||||
) -> Dict[str, bool]:
|
||||
"""
|
||||
并行处理LLM判定actions,支持智能缓存
|
||||
|
||||
Args:
|
||||
llm_judge_actions: 需要LLM判定的actions
|
||||
chat_content: 聊天内容
|
||||
|
||||
Returns:
|
||||
Dict[str, bool]: action名称到激活结果的映射
|
||||
"""
|
||||
|
||||
# 生成当前上下文的哈希值
|
||||
current_context_hash = self._generate_context_hash(chat_content)
|
||||
current_time = time.time()
|
||||
|
||||
results = {}
|
||||
tasks_to_run = {}
|
||||
|
||||
# 检查缓存
|
||||
for action_name, action_info in llm_judge_actions.items():
|
||||
cache_key = f"{action_name}_{current_context_hash}"
|
||||
|
||||
# 检查是否有有效的缓存
|
||||
if (
|
||||
cache_key in self._llm_judge_cache
|
||||
and current_time - self._llm_judge_cache[cache_key]["timestamp"] < self._cache_expiry_time
|
||||
):
|
||||
results[action_name] = self._llm_judge_cache[cache_key]["result"]
|
||||
logger.debug(
|
||||
f"{self.log_prefix}使用缓存结果 {action_name}: {'激活' if results[action_name] else '未激活'}"
|
||||
)
|
||||
else:
|
||||
# 需要进行LLM判定
|
||||
tasks_to_run[action_name] = action_info
|
||||
|
||||
# 如果有需要运行的任务,并行执行
|
||||
if tasks_to_run:
|
||||
logger.debug(f"{self.log_prefix}并行执行LLM判定,任务数: {len(tasks_to_run)}")
|
||||
|
||||
# 创建并行任务
|
||||
tasks = []
|
||||
task_names = []
|
||||
|
||||
for action_name, action_info in tasks_to_run.items():
|
||||
task = self._llm_judge_action(
|
||||
action_name,
|
||||
action_info,
|
||||
chat_content,
|
||||
)
|
||||
tasks.append(task)
|
||||
task_names.append(action_name)
|
||||
|
||||
# 并行执行所有任务
|
||||
try:
|
||||
task_results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# 处理结果并更新缓存
|
||||
for _, (action_name, result) in enumerate(zip(task_names, task_results)):
|
||||
if isinstance(result, Exception):
|
||||
logger.error(f"{self.log_prefix}LLM判定action {action_name} 时出错: {result}")
|
||||
results[action_name] = False
|
||||
else:
|
||||
results[action_name] = result
|
||||
|
||||
# 更新缓存
|
||||
cache_key = f"{action_name}_{current_context_hash}"
|
||||
self._llm_judge_cache[cache_key] = {"result": result, "timestamp": current_time}
|
||||
|
||||
logger.debug(f"{self.log_prefix}并行LLM判定完成,耗时: {time.time() - current_time:.2f}s")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}并行LLM判定失败: {e}")
|
||||
# 如果并行执行失败,为所有任务返回False
|
||||
for action_name in tasks_to_run.keys():
|
||||
results[action_name] = False
|
||||
|
||||
# 清理过期缓存
|
||||
self._cleanup_expired_cache(current_time)
|
||||
|
||||
return results
|
||||
|
||||
def get_available_actions_count(self) -> int:
|
||||
"""获取当前可用动作数量(排除默认的no_action)"""
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.message import MessageThinking
|
||||
from src.common.logger import get_logger
|
||||
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
|
||||
from src.chat.utils.utils import process_llm_response
|
||||
from src.plugin_system.apis import generator_api
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
|
||||
|
||||
logger = get_logger("normal_chat_response")
|
||||
|
||||
|
||||
class NormalChatGenerator:
|
||||
def __init__(self):
|
||||
model_config_1 = global_config.model.replyer_1.copy()
|
||||
model_config_2 = global_config.model.replyer_2.copy()
|
||||
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
model_config_1["weight"] = prob_first
|
||||
model_config_2["weight"] = 1.0 - prob_first
|
||||
|
||||
self.model_configs = [model_config_1, model_config_2]
|
||||
|
||||
self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation")
|
||||
self.memory_activator = MemoryActivator()
|
||||
|
||||
async def generate_response(
|
||||
self,
|
||||
message: MessageThinking,
|
||||
available_actions=None,
|
||||
):
|
||||
logger.info(
|
||||
f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||
)
|
||||
person_id = PersonInfoManager.get_person_id(
|
||||
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
|
||||
)
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
relation_info = await person_info_manager.get_value(person_id, "short_impression")
|
||||
reply_to_str = f"{person_name}:{message.processed_plain_text}"
|
||||
|
||||
try:
|
||||
success, reply_set, prompt = await generator_api.generate_reply(
|
||||
chat_stream=message.chat_stream,
|
||||
reply_to=reply_to_str,
|
||||
relation_info=relation_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||
model_configs=self.model_configs,
|
||||
request_type="normal.replyer",
|
||||
return_prompt=True,
|
||||
)
|
||||
|
||||
if not success or not reply_set:
|
||||
logger.info(f"对 {message.processed_plain_text} 的回复生成失败")
|
||||
return None
|
||||
|
||||
content = " ".join([item[1] for item in reply_set if item[0] == "text"])
|
||||
logger.debug(f"对 {message.processed_plain_text} 的回复:{content}")
|
||||
|
||||
if content:
|
||||
logger.info(f"{global_config.bot.nickname}的备选回复是:{content}")
|
||||
content = process_llm_response(content)
|
||||
|
||||
return content
|
||||
|
||||
except Exception:
|
||||
logger.exception("生成回复时出错")
|
||||
return None
|
||||
@@ -49,10 +49,8 @@ def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
动作:{action_name}
|
||||
该动作的描述:{action_description}
|
||||
使用该动作的场景:
|
||||
动作描述:{action_description}
|
||||
{action_require}
|
||||
输出要求:
|
||||
{{
|
||||
"action": "{action_name}",{action_parameters}
|
||||
}}
|
||||
@@ -160,8 +158,8 @@ class NormalChatPlanner:
|
||||
|
||||
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
|
||||
logger.info(f"{self.log_prefix}规划器原始响应: {content}")
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
logger.info(f"{self.log_prefix}规划器模型: {model_name}")
|
||||
if reasoning_content:
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
|
||||
# 解析JSON响应
|
||||
try:
|
||||
|
||||
@@ -92,14 +92,12 @@ class DefaultReplyer:
|
||||
def __init__(
|
||||
self,
|
||||
chat_stream: ChatStream,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "focus.replyer",
|
||||
):
|
||||
self.log_prefix = "replyer"
|
||||
self.request_type = request_type
|
||||
|
||||
self.enable_tool = enable_tool
|
||||
|
||||
if model_configs:
|
||||
self.express_model_configs = model_configs
|
||||
@@ -170,9 +168,10 @@ class DefaultReplyer:
|
||||
self,
|
||||
reply_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
enable_tool: bool = True,
|
||||
enable_timeout: bool = False,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
@@ -186,7 +185,6 @@ class DefaultReplyer:
|
||||
if not reply_data:
|
||||
reply_data = {
|
||||
"reply_to": reply_to,
|
||||
"relation_info": relation_info,
|
||||
"extra_info": extra_info,
|
||||
}
|
||||
for key, value in reply_data.items():
|
||||
@@ -198,6 +196,8 @@ class DefaultReplyer:
|
||||
prompt = await self.build_prompt_reply_context(
|
||||
reply_data=reply_data, # 传递action_data
|
||||
available_actions=available_actions,
|
||||
enable_timeout=enable_timeout,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
@@ -311,7 +311,7 @@ class DefaultReplyer:
|
||||
person_id = person_info_manager.get_person_id_by_person_name(sender)
|
||||
if not person_id:
|
||||
logger.warning(f"{self.log_prefix} 未找到用户 {sender} 的ID,跳过信息提取")
|
||||
return None
|
||||
return f"你完全不认识{sender},不理解ta的相关信息。"
|
||||
|
||||
relation_info = await relationship_fetcher.build_relation_info(person_id, text, chat_history)
|
||||
return relation_info
|
||||
@@ -367,13 +367,12 @@ class DefaultReplyer:
|
||||
for running_memory in running_memorys:
|
||||
memory_str += f"- {running_memory['content']}\n"
|
||||
memory_block = memory_str
|
||||
logger.info(f"{self.log_prefix} 添加了 {len(running_memorys)} 个激活的记忆到prompt")
|
||||
else:
|
||||
memory_block = ""
|
||||
|
||||
return memory_block
|
||||
|
||||
async def build_tool_info(self, reply_data=None, chat_history=None):
|
||||
async def build_tool_info(self, reply_data=None, chat_history=None, enable_tool: bool = True):
|
||||
"""构建工具信息块
|
||||
|
||||
Args:
|
||||
@@ -384,6 +383,9 @@ class DefaultReplyer:
|
||||
str: 工具信息字符串
|
||||
"""
|
||||
|
||||
if not enable_tool:
|
||||
return ""
|
||||
|
||||
if not reply_data:
|
||||
return ""
|
||||
|
||||
@@ -460,7 +462,15 @@ class DefaultReplyer:
|
||||
|
||||
return keywords_reaction_prompt
|
||||
|
||||
async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None) -> str:
|
||||
async def _time_and_run_task(self, coro, name: str):
|
||||
"""一个简单的帮助函数,用于计时和运行异步任务,返回任务名、结果和耗时"""
|
||||
start_time = time.time()
|
||||
result = await coro
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
return name, result, duration
|
||||
|
||||
async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None, enable_timeout: bool = False, enable_tool: bool = True) -> str:
|
||||
"""
|
||||
构建回复器上下文
|
||||
|
||||
@@ -526,13 +536,26 @@ class DefaultReplyer:
|
||||
)
|
||||
|
||||
# 并行执行四个构建任务
|
||||
expression_habits_block, relation_info, memory_block, tool_info = await asyncio.gather(
|
||||
self.build_expression_habits(chat_talking_prompt_half, target),
|
||||
self.build_relation_info(reply_data, chat_talking_prompt_half),
|
||||
self.build_memory_block(chat_talking_prompt_half, target),
|
||||
self.build_tool_info(reply_data, chat_talking_prompt_half),
|
||||
task_results = await asyncio.gather(
|
||||
self._time_and_run_task(self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits"),
|
||||
self._time_and_run_task(self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info"),
|
||||
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_half, target), "build_memory_block"),
|
||||
self._time_and_run_task(self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info"),
|
||||
)
|
||||
|
||||
# 处理结果
|
||||
timing_logs = []
|
||||
results_dict = {}
|
||||
for name, result, duration in task_results:
|
||||
results_dict[name] = result
|
||||
timing_logs.append(f"{name}: {duration:.4f}s")
|
||||
logger.info(f"回复生成前信息获取时间: {'; '.join(timing_logs)}")
|
||||
|
||||
expression_habits_block = results_dict["build_expression_habits"]
|
||||
relation_info = results_dict["build_relation_info"]
|
||||
memory_block = results_dict["build_memory_block"]
|
||||
tool_info = results_dict["build_tool_info"]
|
||||
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
|
||||
if tool_info:
|
||||
|
||||
@@ -14,7 +14,6 @@ class ReplyerManager:
|
||||
self,
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -50,7 +49,6 @@ class ReplyerManager:
|
||||
# model_configs 只在此时(初始化时)生效
|
||||
replyer = DefaultReplyer(
|
||||
chat_stream=target_stream,
|
||||
enable_tool=enable_tool,
|
||||
model_configs=model_configs, # 可以是None,此时使用默认模型
|
||||
request_type=request_type,
|
||||
)
|
||||
|
||||
@@ -81,7 +81,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
|
||||
if is_at and global_config.normal_chat.at_bot_inevitable_reply:
|
||||
reply_probability = 1.0
|
||||
logger.info("被@,回复概率设置为100%")
|
||||
logger.debug("被@,回复概率设置为100%")
|
||||
else:
|
||||
if not is_mentioned:
|
||||
# 判断是否被回复
|
||||
@@ -106,7 +106,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
is_mentioned = True
|
||||
if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply:
|
||||
reply_probability = 1.0
|
||||
logger.info("被提及,回复概率设置为100%")
|
||||
logger.debug("被提及,回复概率设置为100%")
|
||||
return is_mentioned, reply_probability
|
||||
|
||||
|
||||
|
||||
@@ -346,7 +346,6 @@ MODULE_COLORS = {
|
||||
# 聊天相关模块
|
||||
"normal_chat": "\033[38;5;81m", # 亮蓝绿色
|
||||
"normal_chat_response": "\033[38;5;123m", # 青绿色
|
||||
"normal_chat_expressor": "\033[38;5;117m", # 浅蓝色
|
||||
"normal_chat_action_modifier": "\033[38;5;111m", # 蓝色
|
||||
"normal_chat_planner": "\033[38;5;75m", # 浅蓝色
|
||||
"heartflow": "\033[38;5;213m", # 粉色
|
||||
@@ -362,7 +361,6 @@ MODULE_COLORS = {
|
||||
# 专注聊天模块
|
||||
"replyer": "\033[38;5;166m", # 橙色
|
||||
"expressor": "\033[38;5;172m", # 黄橙色
|
||||
"planner_factory": "\033[38;5;178m", # 黄色
|
||||
"processor": "\033[38;5;184m", # 黄绿色
|
||||
"base_processor": "\033[38;5;190m", # 绿黄色
|
||||
"working_memory": "\033[38;5;22m", # 深绿色
|
||||
@@ -370,6 +368,7 @@ MODULE_COLORS = {
|
||||
# 插件系统
|
||||
"plugin_manager": "\033[38;5;208m", # 红色
|
||||
"base_plugin": "\033[38;5;202m", # 橙红色
|
||||
"send_api": "\033[38;5;208m", # 橙色
|
||||
"base_command": "\033[38;5;208m", # 橙色
|
||||
"component_registry": "\033[38;5;214m", # 橙黄色
|
||||
"stream_api": "\033[38;5;220m", # 黄色
|
||||
@@ -388,10 +387,8 @@ MODULE_COLORS = {
|
||||
"willing": "\033[38;5;147m", # 浅紫色
|
||||
# 工具模块
|
||||
"tool_use": "\033[38;5;64m", # 深绿色
|
||||
"tool_executor": "\033[38;5;64m", # 深绿色
|
||||
"base_tool": "\033[38;5;70m", # 绿色
|
||||
"compare_numbers_tool": "\033[38;5;76m", # 浅绿色
|
||||
"change_mood_tool": "\033[38;5;82m", # 绿色
|
||||
"relationship_tool": "\033[38;5;88m", # 深红色
|
||||
# 工具和实用模块
|
||||
"prompt": "\033[38;5;99m", # 紫色
|
||||
"prompt_build": "\033[38;5;105m", # 紫色
|
||||
@@ -417,6 +414,8 @@ MODULE_COLORS = {
|
||||
"confirm": "\033[1;93m", # 黄色+粗体
|
||||
# 模型相关
|
||||
"model_utils": "\033[38;5;164m", # 紫红色
|
||||
|
||||
"relationship_builder": "\033[38;5;117m", # 浅蓝色
|
||||
}
|
||||
|
||||
RESET_COLOR = "\033[0m"
|
||||
|
||||
@@ -84,6 +84,9 @@ class ChatConfig(ConfigBase):
|
||||
选择普通模型的概率为 1 - reasoning_normal_model_probability
|
||||
"""
|
||||
|
||||
thinking_timeout: int = 30
|
||||
"""麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)"""
|
||||
|
||||
talk_frequency: float = 1
|
||||
"""回复频率阈值"""
|
||||
|
||||
@@ -276,8 +279,6 @@ class NormalChatConfig(ConfigBase):
|
||||
emoji_chance: float = 0.2
|
||||
"""发送表情包的基础概率"""
|
||||
|
||||
thinking_timeout: int = 120
|
||||
"""最长思考时间"""
|
||||
|
||||
willing_mode: str = "classical"
|
||||
"""意愿模式"""
|
||||
|
||||
@@ -25,7 +25,7 @@ class RelationshipBuilderManager:
|
||||
"""
|
||||
if chat_id not in self.builders:
|
||||
self.builders[chat_id] = RelationshipBuilder(chat_id)
|
||||
logger.info(f"创建聊天 {chat_id} 的关系构建器")
|
||||
logger.debug(f"创建聊天 {chat_id} 的关系构建器")
|
||||
|
||||
return self.builders[chat_id]
|
||||
|
||||
@@ -51,7 +51,7 @@ class RelationshipBuilderManager:
|
||||
"""
|
||||
if chat_id in self.builders:
|
||||
del self.builders[chat_id]
|
||||
logger.info(f"移除聊天 {chat_id} 的关系构建器")
|
||||
logger.debug(f"移除聊天 {chat_id} 的关系构建器")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -106,7 +106,15 @@ class RelationshipFetcher:
|
||||
await self._extract_single_info(person_id, info_type, person_name)
|
||||
|
||||
relation_info = self._organize_known_info()
|
||||
relation_info = f"你对{person_name}的印象是:{short_impression}\n{relation_info}"
|
||||
if short_impression and relation_info:
|
||||
relation_info = f"你对{person_name}的印象是:{short_impression}。具体来说:{relation_info}"
|
||||
elif short_impression:
|
||||
relation_info = f"你对{person_name}的印象是:{short_impression}"
|
||||
elif relation_info:
|
||||
relation_info = f"你对{person_name}的了解:{relation_info}"
|
||||
else:
|
||||
relation_info = ""
|
||||
|
||||
return relation_info
|
||||
|
||||
async def _build_fetch_query(self, person_id, target_message, chat_history):
|
||||
|
||||
@@ -374,7 +374,7 @@ async def store_action_info(
|
||||
)
|
||||
|
||||
if saved_record:
|
||||
logger.info(f"[DatabaseAPI] 成功存储动作信息: {action_name} (ID: {record_data['action_id']})")
|
||||
logger.debug(f"[DatabaseAPI] 成功存储动作信息: {action_name} (ID: {record_data['action_id']})")
|
||||
else:
|
||||
logger.error(f"[DatabaseAPI] 存储动作信息失败: {action_name}")
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ logger = get_logger("generator_api")
|
||||
def get_replyer(
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -52,7 +51,6 @@ def get_replyer(
|
||||
chat_id=chat_id,
|
||||
model_configs=model_configs,
|
||||
request_type=request_type,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True)
|
||||
@@ -70,7 +68,6 @@ async def generate_reply(
|
||||
chat_id: str = None,
|
||||
action_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
enable_tool: bool = False,
|
||||
@@ -79,6 +76,7 @@ async def generate_reply(
|
||||
return_prompt: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "",
|
||||
enable_timeout: bool = False,
|
||||
) -> Tuple[bool, List[Tuple[str, Any]]]:
|
||||
"""生成回复
|
||||
|
||||
@@ -95,27 +93,28 @@ async def generate_reply(
|
||||
try:
|
||||
# 获取回复器
|
||||
replyer = get_replyer(
|
||||
chat_stream, chat_id, model_configs=model_configs, request_type=request_type, enable_tool=enable_tool
|
||||
chat_stream, chat_id, model_configs=model_configs, request_type=request_type
|
||||
)
|
||||
if not replyer:
|
||||
logger.error("[GeneratorAPI] 无法获取回复器")
|
||||
return False, []
|
||||
|
||||
logger.info("[GeneratorAPI] 开始生成回复")
|
||||
logger.debug("[GeneratorAPI] 开始生成回复")
|
||||
|
||||
# 调用回复器生成回复
|
||||
success, content, prompt = await replyer.generate_reply_with_context(
|
||||
reply_data=action_data or {},
|
||||
reply_to=reply_to,
|
||||
relation_info=relation_info,
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
enable_timeout=enable_timeout,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
|
||||
reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
|
||||
if success:
|
||||
logger.info(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
else:
|
||||
logger.warning("[GeneratorAPI] 回复生成失败")
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ async def _send_to_target(
|
||||
bool: 是否发送成功
|
||||
"""
|
||||
try:
|
||||
logger.info(f"[SendAPI] 发送{message_type}消息到 {stream_id}")
|
||||
logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}")
|
||||
|
||||
# 查找目标聊天流
|
||||
target_stream = get_chat_manager().get_stream(stream_id)
|
||||
|
||||
@@ -77,7 +77,7 @@ class NoReplyAction(BaseAction):
|
||||
|
||||
reason = self.action_data.get("reason", "")
|
||||
start_time = time.time()
|
||||
last_judge_time = 0 # 上次进行LLM判断的时间
|
||||
last_judge_time = start_time # 上次进行LLM判断的时间
|
||||
min_judge_interval = self._min_judge_interval # 最小判断间隔,从配置获取
|
||||
check_interval = 0.2 # 检查新消息的间隔,设为0.2秒提高响应性
|
||||
|
||||
@@ -357,7 +357,7 @@ class NoReplyAction(BaseAction):
|
||||
judge_history.append((current_time, judge_result, reason))
|
||||
|
||||
if judge_result == "需要回复":
|
||||
logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待")
|
||||
# logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待")
|
||||
|
||||
full_prompt = f"{global_config.bot.nickname}(你)的想法是:{reason}"
|
||||
await self.store_action_info(
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
import random
|
||||
import time
|
||||
from typing import List, Tuple, Type
|
||||
import asyncio
|
||||
|
||||
# 导入新插件系统
|
||||
from src.plugin_system import BasePlugin, register_plugin, BaseAction, ComponentInfo, ActionActivationType, ChatMode
|
||||
@@ -55,17 +56,24 @@ class ReplyAction(BaseAction):
|
||||
|
||||
async def execute(self) -> Tuple[bool, str]:
|
||||
"""执行回复动作"""
|
||||
logger.info(f"{self.log_prefix} 决定回复: {self.reasoning}")
|
||||
logger.info(f"{self.log_prefix} 决定进行回复")
|
||||
|
||||
start_time = self.action_data.get("loop_start_time", time.time())
|
||||
|
||||
try:
|
||||
success, reply_set = await generator_api.generate_reply(
|
||||
action_data=self.action_data,
|
||||
chat_id=self.chat_id,
|
||||
request_type="focus.replyer",
|
||||
enable_tool=global_config.tool.enable_in_focus_chat,
|
||||
)
|
||||
try:
|
||||
success, reply_set = await asyncio.wait_for(
|
||||
generator_api.generate_reply(
|
||||
action_data=self.action_data,
|
||||
chat_id=self.chat_id,
|
||||
request_type="focus.replyer",
|
||||
enable_tool=global_config.tool.enable_in_focus_chat,
|
||||
),
|
||||
timeout=global_config.chat.thinking_timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"{self.log_prefix} 回复生成超时 ({global_config.chat.thinking_timeout}s)")
|
||||
return False, "timeout"
|
||||
|
||||
# 检查从start_time以来的新消息数量
|
||||
# 获取动作触发时间或使用默认值
|
||||
@@ -77,7 +85,7 @@ class ReplyAction(BaseAction):
|
||||
# 根据新消息数量决定是否使用reply_to
|
||||
need_reply = new_message_count >= random.randint(2, 5)
|
||||
logger.info(
|
||||
f"{self.log_prefix} 从{start_time}到{current_time}共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}reply_to"
|
||||
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复"
|
||||
)
|
||||
|
||||
# 构建回复文本
|
||||
|
||||
@@ -6,6 +6,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.chat.utils.json_utils import process_llm_tool_calls
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
|
||||
logger = get_logger("tool_executor")
|
||||
|
||||
@@ -42,7 +43,9 @@ class ToolExecutor:
|
||||
cache_ttl: 缓存生存时间(周期数)
|
||||
"""
|
||||
self.chat_id = chat_id
|
||||
self.log_prefix = f"[ToolExecutor:{self.chat_id}] "
|
||||
self.chat_stream = get_chat_manager().get_stream(self.chat_id)
|
||||
self.log_prefix = f"[{get_chat_manager().get_stream_name(self.chat_id) or self.chat_id}]"
|
||||
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.model.tool_use,
|
||||
request_type="tool_executor",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[inner]
|
||||
version = "3.2.0"
|
||||
version = "3.3.0"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请在修改后将version的值进行变更
|
||||
@@ -102,6 +102,8 @@ exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易
|
||||
# 专注模式下,麦麦会进行主动的观察和回复,并给出回复,token消耗量较高
|
||||
# 自动模式下,麦麦会根据消息内容自动切换到专注模式或普通模式
|
||||
|
||||
thinking_timeout = 30 # 麦麦一次回复最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
|
||||
[message_receive]
|
||||
# 以下是消息过滤,可以根据规则过滤特定消息,将不会读取这些消息
|
||||
ban_words = [
|
||||
@@ -117,18 +119,12 @@ ban_msgs_regex = [
|
||||
[normal_chat] #普通聊天
|
||||
#一般回复参数
|
||||
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率
|
||||
thinking_timeout = 30 # 麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
|
||||
willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现)
|
||||
|
||||
response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数
|
||||
|
||||
mentioned_bot_inevitable_reply = true # 提及 bot 必然回复
|
||||
at_bot_inevitable_reply = true # @bot 必然回复(包含提及)
|
||||
|
||||
enable_planner = true # 是否启用动作规划器(与focus_chat共享actions)
|
||||
|
||||
|
||||
[focus_chat] #专注聊天
|
||||
think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
|
||||
consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高
|
||||
@@ -228,7 +224,7 @@ console_log_level = "INFO" # 控制台日志级别,可选: DEBUG, INFO, WARNIN
|
||||
file_log_level = "DEBUG" # 文件日志级别,可选: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
|
||||
# 第三方库日志控制
|
||||
suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn"] # 完全屏蔽的库
|
||||
suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn","jieba"] # 完全屏蔽的库
|
||||
library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别
|
||||
|
||||
#下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env自定义的宏,使用自定义模型则选择定位相似的模型自己填写
|
||||
|
||||
Reference in New Issue
Block a user