Merge branch 'dev' of https://github.com/infinitycat233/MaiBot into dev
This commit is contained in:
@@ -44,7 +44,9 @@
|
||||
|
||||
## 🔥 更新和安装
|
||||
|
||||
|
||||
**最新版本: v0.8.1** ([更新日志](changelogs/changelog.md))
|
||||
|
||||
可前往 [Release](https://github.com/MaiM-with-u/MaiBot/releases/) 页面下载最新版本
|
||||
可前往 [启动器发布页面](https://github.com/MaiM-with-u/mailauncher/releases/tag/v0.1.0)下载最新启动器
|
||||
**GitHub 分支说明:**
|
||||
|
||||
@@ -1,12 +1,39 @@
|
||||
# Changelog
|
||||
|
||||
## [0.8.1] - 2025-6-27
|
||||
## [0.8.2] - 2025-7-5
|
||||
|
||||
优化和修复:
|
||||
|
||||
- 修复在auto模式下,私聊会转为normal的bug
|
||||
- 修复一般过滤次序问题
|
||||
- 优化normal_chat代码,采用和focus一致的关系构建
|
||||
- 优化计时信息和Log
|
||||
- 添加回复超时检查
|
||||
- normal的插件允许llm激活
|
||||
- 合并action激活器
|
||||
- emoji统一可选随机激活或llm激活
|
||||
- 移除observation和processor,简化focus的代码逻辑
|
||||
|
||||
## [0.8.1] - 2025-7-5
|
||||
|
||||
功能更新:
|
||||
|
||||
- normal现在和focus一样支持tool
|
||||
- focus现在和normal一样每次调用lpmm
|
||||
- 移除人格表达
|
||||
|
||||
优化和修复:
|
||||
|
||||
- 修复表情包配置无效问题
|
||||
- 合并normal和focus的prompt构建
|
||||
|
||||
|
||||
|
||||
- 非TTY环境禁用console_input_loop
|
||||
- 修复过滤消息仍被存储至数据库的问题
|
||||
- 私聊强制开启focus模式
|
||||
- 支持解析reply_to和at
|
||||
- 修复focus冷却时间导致的固定沉默
|
||||
- 移除豆包画图插件,此插件现在插件广场提供
|
||||
- 修复表达器无法读取原始文本
|
||||
- 修复normal planner没有超时退出问题
|
||||
|
||||
## [0.8.0] - 2025-6-27
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from src.chat.heart_flow.heartflow import heartflow
|
||||
from src.chat.heart_flow.sub_heartflow import ChatState
|
||||
from src.common.logger import get_logger
|
||||
import time
|
||||
|
||||
logger = get_logger("api")
|
||||
|
||||
@@ -20,40 +19,6 @@ async def forced_change_subheartflow_status(subheartflow_id: str, status: ChatSt
|
||||
return False
|
||||
|
||||
|
||||
async def get_subheartflow_cycle_info(subheartflow_id: str, history_len: int) -> dict:
|
||||
"""获取子心流的循环信息"""
|
||||
subheartflow_cycle_info = await heartflow.api_get_subheartflow_cycle_info(subheartflow_id, history_len)
|
||||
logger.debug(f"子心流 {subheartflow_id} 循环信息: {subheartflow_cycle_info}")
|
||||
if subheartflow_cycle_info:
|
||||
return subheartflow_cycle_info
|
||||
else:
|
||||
logger.warning(f"子心流 {subheartflow_id} 循环信息未找到")
|
||||
return None
|
||||
|
||||
|
||||
async def get_normal_chat_replies(subheartflow_id: str, limit: int = 10) -> list:
|
||||
"""获取子心流的NormalChat回复记录
|
||||
|
||||
Args:
|
||||
subheartflow_id: 子心流ID
|
||||
limit: 最大返回数量,默认10条
|
||||
|
||||
Returns:
|
||||
list: 回复记录列表,如果未找到则返回空列表
|
||||
"""
|
||||
replies = await heartflow.api_get_normal_chat_replies(subheartflow_id, limit)
|
||||
logger.debug(f"子心流 {subheartflow_id} NormalChat回复记录: 获取到 {len(replies) if replies else 0} 条")
|
||||
if replies:
|
||||
# 格式化时间戳为可读时间
|
||||
for reply in replies:
|
||||
if "time" in reply:
|
||||
reply["formatted_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(reply["time"]))
|
||||
return replies
|
||||
else:
|
||||
logger.warning(f"子心流 {subheartflow_id} NormalChat回复记录未找到")
|
||||
return []
|
||||
|
||||
|
||||
async def get_all_states():
|
||||
"""获取所有状态"""
|
||||
all_states = await heartflow.api_get_all_states()
|
||||
|
||||
@@ -29,7 +29,7 @@ def init_prompt() -> None:
|
||||
4. 思考有没有特殊的梗,一并总结成语言风格
|
||||
5. 例子仅供参考,请严格根据群聊内容总结!!!
|
||||
注意:总结成如下格式的规律,总结的内容要详细,但具有概括性:
|
||||
当"xxxxxx"时,可以"xxxxxx", xxxxxx不超过20个字,为特定句式或表达
|
||||
例如:当"AAAAA"时,可以"BBBBB", AAAAA代表某个具体的场景,不超过20个字。BBBBB代表对应的语言风格,特定句式或表达方式,不超过20个字。
|
||||
|
||||
例如:
|
||||
当"对某件事表示十分惊叹,有些意外"时,使用"我嘞个xxxx"
|
||||
@@ -69,7 +69,7 @@ class ExpressionLearner:
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_learn_model: LLMRequest = LLMRequest(
|
||||
model=global_config.model.replyer_1,
|
||||
temperature=0.2,
|
||||
temperature=0.3,
|
||||
request_type="expressor.learner",
|
||||
)
|
||||
self.llm_model = None
|
||||
|
||||
@@ -2,24 +2,20 @@
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
|
||||
from src.chat.focus_chat.hfc_utils import CycleDetail
|
||||
from typing import List
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
logger = get_logger("loop_info")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class HFCloopObservation:
|
||||
class FocusLoopInfo:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
self.history_loop: List[CycleDetail] = []
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.observe_info
|
||||
|
||||
def add_loop_info(self, loop_info: CycleDetail):
|
||||
self.history_loop.append(loop_info)
|
||||
|
||||
@@ -50,11 +46,6 @@ class HFCloopObservation:
|
||||
action_taken_time_str = (
|
||||
datetime.fromtimestamp(action_taken_time).strftime("%H:%M:%S") if action_taken_time > 0 else "未知时间"
|
||||
)
|
||||
# print(action_type)
|
||||
# print(action_reasoning)
|
||||
# print(is_taken)
|
||||
# print(action_taken_time_str)
|
||||
# print("--------------------------------")
|
||||
if action_reasoning != cycle_last_reason:
|
||||
cycle_last_reason = action_reasoning
|
||||
action_reasoning_str = f"你选择这个action的原因是:{action_reasoning}"
|
||||
@@ -71,9 +62,6 @@ class HFCloopObservation:
|
||||
else:
|
||||
action_detailed_str += f"{action_taken_time_str}时,你选择回复(action:{action_type},内容是:'{response_text}'),但是动作失败了。{action_reasoning_str}\n"
|
||||
elif action_type == "no_reply":
|
||||
# action_detailed_str += (
|
||||
# f"{action_taken_time_str}时,你选择不回复(action:{action_type}),{action_reasoning_str}\n"
|
||||
# )
|
||||
pass
|
||||
else:
|
||||
if is_taken:
|
||||
@@ -88,18 +76,6 @@ class HFCloopObservation:
|
||||
else:
|
||||
cycle_info_block = "\n"
|
||||
|
||||
# 根据连续文本回复的数量构建提示信息
|
||||
if consecutive_text_replies >= 3: # 如果最近的三个活动都是文本回复
|
||||
cycle_info_block = f'你已经连续回复了三条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}",第三近: "{responses_for_prompt[2]}")。你回复的有点多了,请注意'
|
||||
elif consecutive_text_replies == 2: # 如果最近的两个活动是文本回复
|
||||
cycle_info_block = f'你已经连续回复了两条消息(最近: "{responses_for_prompt[0]}",第二近: "{responses_for_prompt[1]}"),请注意'
|
||||
|
||||
# 包装提示块,增加可读性,即使没有连续回复也给个标记
|
||||
# if cycle_info_block:
|
||||
# cycle_info_block = f"\n你最近的回复\n{cycle_info_block}\n"
|
||||
# else:
|
||||
# cycle_info_block = "\n"
|
||||
|
||||
# 获取history_loop中最新添加的
|
||||
if self.history_loop:
|
||||
last_loop = self.history_loop[0]
|
||||
@@ -113,16 +89,3 @@ class HFCloopObservation:
|
||||
cycle_info_block += f"距离你上一次阅读消息并思考和规划,已经过去了{time_diff}秒\n"
|
||||
else:
|
||||
cycle_info_block += "你还没看过消息\n"
|
||||
|
||||
self.observe_info = cycle_info_block
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""将观察对象转换为可序列化的字典"""
|
||||
# 只序列化基本信息,避免循环引用
|
||||
return {
|
||||
"observe_info": self.observe_info,
|
||||
"observe_id": self.observe_id,
|
||||
"last_observe_time": self.last_observe_time,
|
||||
# 不序列化history_loop,避免循环引用
|
||||
"history_loop_count": len(self.history_loop),
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
import time
|
||||
import os
|
||||
from typing import Optional, Dict, Any
|
||||
from src.common.logger import get_logger
|
||||
import json
|
||||
|
||||
logger = get_logger("hfc") # Logger Name Changed
|
||||
|
||||
log_dir = "log/log_cycle_debug/"
|
||||
|
||||
|
||||
class CycleDetail:
|
||||
"""循环信息记录类"""
|
||||
|
||||
def __init__(self, cycle_id: int):
|
||||
self.cycle_id = cycle_id
|
||||
self.prefix = ""
|
||||
self.thinking_id = ""
|
||||
self.start_time = time.time()
|
||||
self.end_time: Optional[float] = None
|
||||
self.timers: Dict[str, float] = {}
|
||||
|
||||
# 新字段
|
||||
self.loop_observation_info: Dict[str, Any] = {}
|
||||
self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息
|
||||
self.loop_plan_info: Dict[str, Any] = {}
|
||||
self.loop_action_info: Dict[str, Any] = {}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""将循环信息转换为字典格式"""
|
||||
|
||||
def convert_to_serializable(obj, depth=0, seen=None):
|
||||
if seen is None:
|
||||
seen = set()
|
||||
|
||||
# 防止递归过深
|
||||
if depth > 5: # 降低递归深度限制
|
||||
return str(obj)
|
||||
|
||||
# 防止循环引用
|
||||
obj_id = id(obj)
|
||||
if obj_id in seen:
|
||||
return str(obj)
|
||||
seen.add(obj_id)
|
||||
|
||||
try:
|
||||
if hasattr(obj, "to_dict"):
|
||||
# 对于有to_dict方法的对象,直接调用其to_dict方法
|
||||
return obj.to_dict()
|
||||
elif isinstance(obj, dict):
|
||||
# 对于字典,只保留基本类型和可序列化的值
|
||||
return {
|
||||
k: convert_to_serializable(v, depth + 1, seen)
|
||||
for k, v in obj.items()
|
||||
if isinstance(k, (str, int, float, bool))
|
||||
}
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
# 对于列表和元组,只保留可序列化的元素
|
||||
return [
|
||||
convert_to_serializable(item, depth + 1, seen)
|
||||
for item in obj
|
||||
if not isinstance(item, (dict, list, tuple))
|
||||
or isinstance(item, (str, int, float, bool, type(None)))
|
||||
]
|
||||
elif isinstance(obj, (str, int, float, bool, type(None))):
|
||||
return obj
|
||||
else:
|
||||
return str(obj)
|
||||
finally:
|
||||
seen.remove(obj_id)
|
||||
|
||||
return {
|
||||
"cycle_id": self.cycle_id,
|
||||
"start_time": self.start_time,
|
||||
"end_time": self.end_time,
|
||||
"timers": self.timers,
|
||||
"thinking_id": self.thinking_id,
|
||||
"loop_observation_info": convert_to_serializable(self.loop_observation_info),
|
||||
"loop_processor_info": convert_to_serializable(self.loop_processor_info),
|
||||
"loop_plan_info": convert_to_serializable(self.loop_plan_info),
|
||||
"loop_action_info": convert_to_serializable(self.loop_action_info),
|
||||
}
|
||||
|
||||
def complete_cycle(self):
|
||||
"""完成循环,记录结束时间"""
|
||||
self.end_time = time.time()
|
||||
|
||||
# 处理 prefix,只保留中英文字符和基本标点
|
||||
if not self.prefix:
|
||||
self.prefix = "group"
|
||||
else:
|
||||
# 只保留中文、英文字母、数字和基本标点
|
||||
allowed_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_")
|
||||
self.prefix = (
|
||||
"".join(char for char in self.prefix if "\u4e00" <= char <= "\u9fff" or char in allowed_chars)
|
||||
or "group"
|
||||
)
|
||||
|
||||
# current_time_minute = time.strftime("%Y%m%d_%H%M", time.localtime())
|
||||
|
||||
# try:
|
||||
# self.log_cycle_to_file(
|
||||
# log_dir + self.prefix + f"/{current_time_minute}_cycle_" + str(self.cycle_id) + ".json"
|
||||
# )
|
||||
# except Exception as e:
|
||||
# logger.warning(f"写入文件日志,可能是群名称包含非法字符: {e}")
|
||||
|
||||
def log_cycle_to_file(self, file_path: str):
|
||||
"""将循环信息写入文件"""
|
||||
# 如果目录不存在,则创建目
|
||||
dir_name = os.path.dirname(file_path)
|
||||
# 去除特殊字符,保留字母、数字、下划线、中划线和中文
|
||||
dir_name = "".join(
|
||||
char for char in dir_name if char.isalnum() or char in ["_", "-", "/"] or "\u4e00" <= char <= "\u9fff"
|
||||
)
|
||||
# print("dir_name:", dir_name)
|
||||
if dir_name and not os.path.exists(dir_name):
|
||||
os.makedirs(dir_name, exist_ok=True)
|
||||
# 写入文件
|
||||
|
||||
file_path = os.path.join(dir_name, os.path.basename(file_path))
|
||||
# print("file_path:", file_path)
|
||||
with open(file_path, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(self.to_dict(), ensure_ascii=False) + "\n")
|
||||
|
||||
def set_thinking_id(self, thinking_id: str):
|
||||
"""设置思考消息ID"""
|
||||
self.thinking_id = thinking_id
|
||||
|
||||
def set_loop_info(self, loop_info: Dict[str, Any]):
|
||||
"""设置循环信息"""
|
||||
self.loop_observation_info = loop_info["loop_observation_info"]
|
||||
self.loop_processor_info = loop_info["loop_processor_info"]
|
||||
self.loop_plan_info = loop_info["loop_plan_info"]
|
||||
self.loop_action_info = loop_info["loop_action_info"]
|
||||
@@ -9,66 +9,23 @@ from rich.traceback import install
|
||||
from src.chat.utils.prompt_builder import global_prompt_manager
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.timer_calculator import Timer
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from src.chat.focus_chat.info_processors.chattinginfo_processor import ChattingInfoProcessor
|
||||
from src.chat.focus_chat.info_processors.working_memory_processor import WorkingMemoryProcessor
|
||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.chat.heart_flow.observation.actions_observation import ActionObservation
|
||||
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
from src.chat.focus_chat.info_processors.base_processor import BaseProcessor
|
||||
from src.chat.focus_chat.planners.planner_factory import PlannerFactory
|
||||
from src.chat.focus_chat.planners.modify_actions import ActionModifier
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.focus_chat.focus_loop_info import FocusLoopInfo
|
||||
from src.chat.planner_actions.planner import ActionPlanner
|
||||
from src.chat.planner_actions.action_modifier import ActionModifier
|
||||
from src.chat.planner_actions.action_manager import ActionManager
|
||||
from src.config.config import global_config
|
||||
from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger
|
||||
from src.chat.focus_chat.hfc_version_manager import get_hfc_version
|
||||
from src.person_info.relationship_builder_manager import relationship_builder_manager
|
||||
from src.chat.focus_chat.hfc_utils import CycleDetail
|
||||
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
# 注释:原来的动作修改超时常量已移除,因为改为顺序执行
|
||||
|
||||
# 定义观察器映射:键是观察器名称,值是 (观察器类, 初始化参数)
|
||||
OBSERVATION_CLASSES = {
|
||||
"ChattingObservation": (ChattingObservation, "chat_id"),
|
||||
"WorkingMemoryObservation": (WorkingMemoryObservation, "observe_id"),
|
||||
"HFCloopObservation": (HFCloopObservation, "observe_id"),
|
||||
}
|
||||
|
||||
# 定义处理器映射:键是处理器名称,值是 (处理器类, 可选的配置键名)
|
||||
PROCESSOR_CLASSES = {
|
||||
"ChattingInfoProcessor": (ChattingInfoProcessor, None),
|
||||
"WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"),
|
||||
}
|
||||
|
||||
logger = get_logger("hfc") # Logger Name Changed
|
||||
|
||||
|
||||
async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str):
|
||||
"""处理循环延迟"""
|
||||
cycle_duration = time.monotonic() - cycle_start_time
|
||||
|
||||
try:
|
||||
sleep_duration = 0.0
|
||||
if not action_taken_this_cycle and cycle_duration < 1:
|
||||
sleep_duration = 1 - cycle_duration
|
||||
elif cycle_duration < 0.2:
|
||||
sleep_duration = 0.2
|
||||
|
||||
if sleep_duration > 0:
|
||||
await asyncio.sleep(sleep_duration)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"{log_prefix} Sleep interrupted, loop likely cancelling.")
|
||||
raise
|
||||
|
||||
|
||||
class HeartFChatting:
|
||||
"""
|
||||
管理一个连续的Focus Chat循环
|
||||
@@ -80,7 +37,6 @@ class HeartFChatting:
|
||||
self,
|
||||
chat_id: str,
|
||||
on_stop_focus_chat: Optional[Callable[[], Awaitable[None]]] = None,
|
||||
performance_version: str = None,
|
||||
):
|
||||
"""
|
||||
HeartFChatting 初始化函数
|
||||
@@ -95,8 +51,6 @@ class HeartFChatting:
|
||||
self.chat_stream = get_chat_manager().get_stream(self.stream_id)
|
||||
self.log_prefix = f"[{get_chat_manager().get_stream_name(self.stream_id) or self.stream_id}]"
|
||||
|
||||
self.memory_activator = MemoryActivator()
|
||||
|
||||
self.relationship_builder = relationship_builder_manager.get_or_create_builder(self.stream_id)
|
||||
|
||||
# 新增:消息计数器和疲惫阈值
|
||||
@@ -106,25 +60,11 @@ class HeartFChatting:
|
||||
self._message_threshold = max(10, int(30 * global_config.chat.exit_focus_threshold))
|
||||
self._fatigue_triggered = False # 是否已触发疲惫退出
|
||||
|
||||
# 初始化观察器
|
||||
self.observations: List[Observation] = []
|
||||
self._register_observations()
|
||||
|
||||
# 根据配置文件和默认规则确定启用的处理器
|
||||
self.enabled_processor_names = ["ChattingInfoProcessor"]
|
||||
if global_config.focus_chat.working_memory_processor:
|
||||
self.enabled_processor_names.append("WorkingMemoryProcessor")
|
||||
|
||||
self.processors: List[BaseProcessor] = []
|
||||
self._register_default_processors()
|
||||
self.loop_info: FocusLoopInfo = FocusLoopInfo(observe_id=self.stream_id)
|
||||
|
||||
self.action_manager = ActionManager()
|
||||
self.action_planner = PlannerFactory.create_planner(
|
||||
log_prefix=self.log_prefix, action_manager=self.action_manager
|
||||
)
|
||||
self.action_modifier = ActionModifier(action_manager=self.action_manager)
|
||||
self.action_observation = ActionObservation(observe_id=self.stream_id)
|
||||
self.action_observation.set_action_manager(self.action_manager)
|
||||
self.action_planner = ActionPlanner(chat_id=self.stream_id, action_manager=self.action_manager)
|
||||
self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id)
|
||||
|
||||
self._processing_lock = asyncio.Lock()
|
||||
|
||||
@@ -141,75 +81,20 @@ class HeartFChatting:
|
||||
# 存储回调函数
|
||||
self.on_stop_focus_chat = on_stop_focus_chat
|
||||
|
||||
self.reply_timeout_count = 0
|
||||
self.plan_timeout_count = 0
|
||||
|
||||
# 初始化性能记录器
|
||||
# 如果没有指定版本号,则使用全局版本管理器的版本号
|
||||
actual_version = performance_version or get_hfc_version()
|
||||
self.performance_logger = HFCPerformanceLogger(chat_id, actual_version)
|
||||
|
||||
self.performance_logger = HFCPerformanceLogger(chat_id)
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} HeartFChatting 初始化完成,消息疲惫阈值: {self._message_threshold}条(基于exit_focus_threshold={global_config.chat.exit_focus_threshold}计算,仅在auto模式下生效)"
|
||||
)
|
||||
|
||||
def _register_observations(self):
|
||||
"""注册所有观察器"""
|
||||
self.observations = [] # 清空已有的
|
||||
|
||||
for name, (observation_class, param_name) in OBSERVATION_CLASSES.items():
|
||||
try:
|
||||
# 检查是否需要跳过WorkingMemoryObservation
|
||||
if name == "WorkingMemoryObservation":
|
||||
# 如果工作记忆处理器被禁用,则跳过WorkingMemoryObservation
|
||||
if not global_config.focus_chat.working_memory_processor:
|
||||
logger.debug(f"{self.log_prefix} 工作记忆处理器已禁用,跳过注册观察器 {name}")
|
||||
continue
|
||||
|
||||
# 根据参数名使用正确的参数
|
||||
kwargs = {param_name: self.stream_id}
|
||||
observation = observation_class(**kwargs)
|
||||
self.observations.append(observation)
|
||||
logger.debug(f"{self.log_prefix} 注册观察器 {name}")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 观察器 {name} 构造失败: {e}")
|
||||
|
||||
if self.observations:
|
||||
logger.info(f"{self.log_prefix} 已注册观察器: {[o.__class__.__name__ for o in self.observations]}")
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 没有注册任何观察器")
|
||||
|
||||
def _register_default_processors(self):
|
||||
"""根据 self.enabled_processor_names 注册信息处理器"""
|
||||
self.processors = [] # 清空已有的
|
||||
|
||||
for name in self.enabled_processor_names: # 'name' is "ChattingInfoProcessor", etc.
|
||||
processor_info = PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key)
|
||||
if processor_info:
|
||||
processor_actual_class = processor_info[0] # 获取实际的类定义
|
||||
# 根据处理器类名判断构造参数
|
||||
if name == "ChattingInfoProcessor":
|
||||
self.processors.append(processor_actual_class())
|
||||
elif name == "WorkingMemoryProcessor":
|
||||
self.processors.append(processor_actual_class(subheartflow_id=self.stream_id))
|
||||
else:
|
||||
try:
|
||||
self.processors.append(processor_actual_class()) # 尝试无参构造
|
||||
logger.debug(f"{self.log_prefix} 注册处理器 {name} (尝试无参构造).")
|
||||
except TypeError:
|
||||
logger.error(
|
||||
f"{self.log_prefix} 处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id)但未在注册逻辑中明确处理。"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"{self.log_prefix} 在 PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。"
|
||||
)
|
||||
|
||||
if self.processors:
|
||||
logger.info(f"{self.log_prefix} 已注册处理器: {[p.__class__.__name__ for p in self.processors]}")
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 没有注册任何处理器。这可能是由于配置错误或所有处理器都被禁用了。")
|
||||
|
||||
async def start(self):
|
||||
"""检查是否需要启动主循环,如果未激活则启动。"""
|
||||
logger.debug(f"{self.log_prefix} 开始启动 HeartFChatting")
|
||||
|
||||
# 如果循环已经激活,直接返回
|
||||
if self._loop_active:
|
||||
@@ -230,8 +115,6 @@ class HeartFChatting:
|
||||
try:
|
||||
# 等待旧任务确实被取消
|
||||
await asyncio.wait_for(self._loop_task, timeout=5.0)
|
||||
except (asyncio.CancelledError, asyncio.TimeoutError):
|
||||
pass # 忽略取消或超时错误
|
||||
except Exception as e:
|
||||
logger.warning(f"{self.log_prefix} 等待旧任务取消时出错: {e}")
|
||||
self._loop_task = None # 清理旧任务引用
|
||||
@@ -284,7 +167,6 @@ class HeartFChatting:
|
||||
|
||||
# 初始化周期状态
|
||||
cycle_timers = {}
|
||||
loop_cycle_start_time = time.monotonic()
|
||||
|
||||
# 执行规划和处理阶段
|
||||
try:
|
||||
@@ -307,6 +189,13 @@ class HeartFChatting:
|
||||
|
||||
if loop_info["loop_action_info"]["command"] == "stop_focus_chat":
|
||||
logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天")
|
||||
|
||||
# 如果是私聊,则不停止,而是重置疲劳度并继续
|
||||
if not self.chat_stream.group_info:
|
||||
logger.info(f"{self.log_prefix} 私聊模式下收到停止请求,不退出。")
|
||||
continue # 继续下一次循环,而不是退出
|
||||
|
||||
# 如果是群聊,则执行原来的停止逻辑
|
||||
# 如果设置了回调函数,则调用它
|
||||
if self.on_stop_focus_chat:
|
||||
try:
|
||||
@@ -324,14 +213,11 @@ class HeartFChatting:
|
||||
logger.error(f"{self.log_prefix} 处理上下文时出错: {e}")
|
||||
# 为当前循环设置错误状态,防止后续重复报错
|
||||
error_loop_info = {
|
||||
"loop_observation_info": {},
|
||||
"loop_processor_info": {},
|
||||
"loop_plan_info": {
|
||||
"action_result": {
|
||||
"action_type": "error",
|
||||
"action_data": {},
|
||||
},
|
||||
"observed_messages": "",
|
||||
},
|
||||
"loop_action_info": {
|
||||
"action_taken": False,
|
||||
@@ -349,22 +235,10 @@ class HeartFChatting:
|
||||
|
||||
self._current_cycle_detail.set_loop_info(loop_info)
|
||||
|
||||
# 从observations列表中获取HFCloopObservation
|
||||
hfcloop_observation = next(
|
||||
(obs for obs in self.observations if isinstance(obs, HFCloopObservation)), None
|
||||
)
|
||||
if hfcloop_observation:
|
||||
hfcloop_observation.add_loop_info(self._current_cycle_detail)
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 未找到HFCloopObservation实例")
|
||||
self.loop_info.add_loop_info(self._current_cycle_detail)
|
||||
|
||||
self._current_cycle_detail.timers = cycle_timers
|
||||
|
||||
# 防止循环过快消耗资源
|
||||
await _handle_cycle_delay(
|
||||
loop_info["loop_action_info"]["action_taken"], loop_cycle_start_time, self.log_prefix
|
||||
)
|
||||
|
||||
# 完成当前循环并保存历史
|
||||
self._current_cycle_detail.complete_cycle()
|
||||
self._cycle_history.append(self._current_cycle_detail)
|
||||
@@ -375,24 +249,11 @@ class HeartFChatting:
|
||||
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒"
|
||||
timer_strings.append(f"{name}: {formatted_time}")
|
||||
|
||||
# 新增:输出每个处理器的耗时
|
||||
processor_time_costs = self._current_cycle_detail.loop_processor_info.get(
|
||||
"processor_time_costs", {}
|
||||
)
|
||||
processor_time_strings = []
|
||||
for pname, ptime in processor_time_costs.items():
|
||||
formatted_ptime = f"{ptime * 1000:.2f}毫秒" if ptime < 1 else f"{ptime:.2f}秒"
|
||||
processor_time_strings.append(f"{pname}: {formatted_ptime}")
|
||||
processor_time_log = (
|
||||
("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else ""
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考,"
|
||||
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
|
||||
f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
|
||||
f"选择动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
|
||||
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
|
||||
+ processor_time_log
|
||||
)
|
||||
|
||||
# 记录性能数据
|
||||
@@ -403,7 +264,6 @@ class HeartFChatting:
|
||||
"action_type": action_result.get("action_type", "unknown"),
|
||||
"total_time": self._current_cycle_detail.end_time - self._current_cycle_detail.start_time,
|
||||
"step_times": cycle_timers.copy(),
|
||||
"processor_time_costs": processor_time_costs, # 处理器时间
|
||||
"reasoning": action_result.get("reasoning", ""),
|
||||
"success": self._current_cycle_detail.loop_action_info.get("action_taken", False),
|
||||
}
|
||||
@@ -423,15 +283,12 @@ class HeartFChatting:
|
||||
# 如果_current_cycle_detail存在但未完成,为其设置错误状态
|
||||
if self._current_cycle_detail and not hasattr(self._current_cycle_detail, "end_time"):
|
||||
error_loop_info = {
|
||||
"loop_observation_info": {},
|
||||
"loop_processor_info": {},
|
||||
"loop_plan_info": {
|
||||
"action_result": {
|
||||
"action_type": "error",
|
||||
"action_data": {},
|
||||
"reasoning": f"循环处理失败: {e}",
|
||||
},
|
||||
"observed_messages": "",
|
||||
},
|
||||
"loop_action_info": {
|
||||
"action_taken": False,
|
||||
@@ -477,85 +334,10 @@ class HeartFChatting:
|
||||
if acquired and self._processing_lock.locked():
|
||||
self._processing_lock.release()
|
||||
|
||||
async def _process_processors(self, observations: List[Observation]) -> tuple[List[InfoBase], Dict[str, float]]:
|
||||
# 记录并行任务开始时间
|
||||
parallel_start_time = time.time()
|
||||
logger.debug(f"{self.log_prefix} 开始信息处理器并行任务")
|
||||
|
||||
processor_tasks = []
|
||||
task_to_name_map = {}
|
||||
processor_time_costs = {} # 新增: 记录每个处理器耗时
|
||||
|
||||
for processor in self.processors:
|
||||
processor_name = processor.__class__.log_prefix
|
||||
|
||||
async def run_with_timeout(proc=processor):
|
||||
return await asyncio.wait_for(proc.process_info(observations=observations), 30)
|
||||
|
||||
task = asyncio.create_task(run_with_timeout())
|
||||
|
||||
processor_tasks.append(task)
|
||||
task_to_name_map[task] = processor_name
|
||||
logger.debug(f"{self.log_prefix} 启动处理器任务: {processor_name}")
|
||||
|
||||
pending_tasks = set(processor_tasks)
|
||||
all_plan_info: List[InfoBase] = []
|
||||
|
||||
while pending_tasks:
|
||||
done, pending_tasks = await asyncio.wait(pending_tasks, return_when=asyncio.FIRST_COMPLETED)
|
||||
|
||||
for task in done:
|
||||
processor_name = task_to_name_map[task]
|
||||
task_completed_time = time.time()
|
||||
duration_since_parallel_start = task_completed_time - parallel_start_time
|
||||
|
||||
try:
|
||||
result_list = await task
|
||||
logger.info(f"{self.log_prefix} 处理器 {processor_name} 已完成!")
|
||||
if result_list is not None:
|
||||
all_plan_info.extend(result_list)
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 处理器 {processor_name} 返回了 None")
|
||||
# 记录耗时
|
||||
processor_time_costs[processor_name] = duration_since_parallel_start
|
||||
except asyncio.TimeoutError:
|
||||
logger.info(f"{self.log_prefix} 处理器 {processor_name} 超时(>30s),已跳过")
|
||||
processor_time_costs[processor_name] = 30
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
traceback.print_exc()
|
||||
processor_time_costs[processor_name] = duration_since_parallel_start
|
||||
|
||||
if pending_tasks:
|
||||
current_progress_time = time.time()
|
||||
elapsed_for_log = current_progress_time - parallel_start_time
|
||||
pending_names_for_log = [task_to_name_map[t] for t in pending_tasks]
|
||||
logger.info(
|
||||
f"{self.log_prefix} 信息处理已进行 {elapsed_for_log:.2f}秒,待完成任务: {', '.join(pending_names_for_log)}"
|
||||
)
|
||||
|
||||
# 所有任务完成后的最终日志
|
||||
parallel_end_time = time.time()
|
||||
total_duration = parallel_end_time - parallel_start_time
|
||||
logger.info(f"{self.log_prefix} 所有处理器任务全部完成,总耗时: {total_duration:.2f}秒")
|
||||
# logger.debug(f"{self.log_prefix} 所有信息处理器处理后的信息: {all_plan_info}")
|
||||
|
||||
return all_plan_info, processor_time_costs
|
||||
|
||||
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
|
||||
try:
|
||||
loop_start_time = time.time()
|
||||
with Timer("观察", cycle_timers):
|
||||
# 执行所有观察器的观察
|
||||
for observation in self.observations:
|
||||
await observation.observe()
|
||||
|
||||
loop_observation_info = {
|
||||
"observations": self.observations,
|
||||
}
|
||||
await self.loop_info.observe()
|
||||
|
||||
await self.relationship_builder.build_relation()
|
||||
|
||||
@@ -565,39 +347,18 @@ class HeartFChatting:
|
||||
try:
|
||||
# 调用完整的动作修改流程
|
||||
await self.action_modifier.modify_actions(
|
||||
observations=self.observations,
|
||||
loop_info=self.loop_info,
|
||||
mode="focus",
|
||||
)
|
||||
|
||||
await self.action_observation.observe()
|
||||
self.observations.append(self.action_observation)
|
||||
logger.debug(f"{self.log_prefix} 动作修改完成")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
|
||||
# 继续执行,不中断流程
|
||||
|
||||
# 第二步:信息处理器
|
||||
with Timer("信息处理器", cycle_timers):
|
||||
try:
|
||||
all_plan_info, processor_time_costs = await self._process_processors(self.observations)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 信息处理器失败: {e}")
|
||||
# 设置默认值以继续执行
|
||||
all_plan_info = []
|
||||
processor_time_costs = {}
|
||||
|
||||
loop_processor_info = {
|
||||
"all_plan_info": all_plan_info,
|
||||
"processor_time_costs": processor_time_costs,
|
||||
}
|
||||
|
||||
logger.debug(f"{self.log_prefix} 并行阶段完成,准备进入规划器,plan_info数量: {len(all_plan_info)}")
|
||||
|
||||
with Timer("规划器", cycle_timers):
|
||||
plan_result = await self.action_planner.plan(all_plan_info, self.observations, loop_start_time)
|
||||
plan_result = await self.action_planner.plan()
|
||||
|
||||
loop_plan_info = {
|
||||
"action_result": plan_result.get("action_result", {}),
|
||||
"observed_messages": plan_result.get("observed_messages", ""),
|
||||
}
|
||||
|
||||
action_type, action_data, reasoning = (
|
||||
@@ -606,6 +367,8 @@ class HeartFChatting:
|
||||
plan_result.get("action_result", {}).get("reasoning", "未提供理由"),
|
||||
)
|
||||
|
||||
action_data["loop_start_time"] = loop_start_time
|
||||
|
||||
if action_type == "reply":
|
||||
action_str = "回复"
|
||||
elif action_type == "no_reply":
|
||||
@@ -613,7 +376,7 @@ class HeartFChatting:
|
||||
else:
|
||||
action_str = action_type
|
||||
|
||||
logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}'")
|
||||
logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}',理由是:{reasoning}")
|
||||
|
||||
# 动作执行计时
|
||||
with Timer("动作执行", cycle_timers):
|
||||
@@ -629,8 +392,6 @@ class HeartFChatting:
|
||||
}
|
||||
|
||||
loop_info = {
|
||||
"loop_observation_info": loop_observation_info,
|
||||
"loop_processor_info": loop_processor_info,
|
||||
"loop_plan_info": loop_plan_info,
|
||||
"loop_action_info": loop_action_info,
|
||||
}
|
||||
@@ -641,11 +402,8 @@ class HeartFChatting:
|
||||
logger.error(f"{self.log_prefix} FOCUS聊天处理失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
return {
|
||||
"loop_observation_info": {},
|
||||
"loop_processor_info": {},
|
||||
"loop_plan_info": {
|
||||
"action_result": {"action_type": "error", "action_data": {}, "reasoning": f"处理失败: {e}"},
|
||||
"observed_messages": "",
|
||||
},
|
||||
"loop_action_info": {"action_taken": False, "reply_text": "", "command": "", "taken_time": time.time()},
|
||||
}
|
||||
@@ -690,7 +448,7 @@ class HeartFChatting:
|
||||
return False, "", ""
|
||||
|
||||
if not action_handler:
|
||||
logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}, 原因: {reasoning}")
|
||||
logger.warning(f"{self.log_prefix} 未能创建动作处理器: {action}")
|
||||
return False, "", ""
|
||||
|
||||
# 处理动作并获取结果
|
||||
@@ -730,8 +488,15 @@ class HeartFChatting:
|
||||
logger.info(
|
||||
f"{self.log_prefix} [非auto模式] 已发送 {self._message_count} 条消息,达到疲惫阈值 {current_threshold},但非auto模式不会自动退出"
|
||||
)
|
||||
|
||||
logger.debug(f"{self.log_prefix} 麦麦执行了'{action}', 返回结果'{success}', '{reply_text}', '{command}'")
|
||||
else:
|
||||
if reply_text == "timeout":
|
||||
self.reply_timeout_count += 1
|
||||
if self.reply_timeout_count > 5:
|
||||
logger.warning(
|
||||
f"[{self.log_prefix} ] 连续回复超时次数过多,{global_config.chat.thinking_timeout}秒 内大模型没有返回有效内容,请检查你的api是否速度过慢或配置错误。建议不要使用推理模型,推理模型生成速度过慢。或者尝试拉高thinking_timeout参数,这可能导致回复时间过长。"
|
||||
)
|
||||
logger.warning(f"{self.log_prefix} 回复生成超时{global_config.chat.thinking_timeout}s,已跳过")
|
||||
return False, "", ""
|
||||
|
||||
return success, reply_text, command
|
||||
|
||||
|
||||
@@ -11,11 +11,11 @@ class HFCPerformanceLogger:
|
||||
"""HFC性能记录管理器"""
|
||||
|
||||
# 版本号常量,可在启动时修改
|
||||
INTERNAL_VERSION = "v1.0.0"
|
||||
INTERNAL_VERSION = "v7.0.0"
|
||||
|
||||
def __init__(self, chat_id: str, version: str = None):
|
||||
def __init__(self, chat_id: str):
|
||||
self.chat_id = chat_id
|
||||
self.version = version or self.INTERNAL_VERSION
|
||||
self.version = self.INTERNAL_VERSION
|
||||
self.log_dir = Path("log/hfc_loop")
|
||||
self.session_start_time = datetime.now()
|
||||
|
||||
@@ -41,7 +41,6 @@ class HFCPerformanceLogger:
|
||||
"action_type": cycle_data.get("action_type", "unknown"),
|
||||
"total_time": cycle_data.get("total_time", 0),
|
||||
"step_times": cycle_data.get("step_times", {}),
|
||||
"processor_time_costs": cycle_data.get("processor_time_costs", {}), # 前处理器时间
|
||||
"reasoning": cycle_data.get("reasoning", ""),
|
||||
"success": cycle_data.get("success", False),
|
||||
}
|
||||
|
||||
@@ -5,9 +5,104 @@ from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.message_receive.message import UserInfo
|
||||
from src.common.logger import get_logger
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
log_dir = "log/log_cycle_debug/"
|
||||
|
||||
|
||||
class CycleDetail:
|
||||
"""循环信息记录类"""
|
||||
|
||||
def __init__(self, cycle_id: int):
|
||||
self.cycle_id = cycle_id
|
||||
self.prefix = ""
|
||||
self.thinking_id = ""
|
||||
self.start_time = time.time()
|
||||
self.end_time: Optional[float] = None
|
||||
self.timers: Dict[str, float] = {}
|
||||
|
||||
self.loop_plan_info: Dict[str, Any] = {}
|
||||
self.loop_action_info: Dict[str, Any] = {}
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""将循环信息转换为字典格式"""
|
||||
|
||||
def convert_to_serializable(obj, depth=0, seen=None):
|
||||
if seen is None:
|
||||
seen = set()
|
||||
|
||||
# 防止递归过深
|
||||
if depth > 5: # 降低递归深度限制
|
||||
return str(obj)
|
||||
|
||||
# 防止循环引用
|
||||
obj_id = id(obj)
|
||||
if obj_id in seen:
|
||||
return str(obj)
|
||||
seen.add(obj_id)
|
||||
|
||||
try:
|
||||
if hasattr(obj, "to_dict"):
|
||||
# 对于有to_dict方法的对象,直接调用其to_dict方法
|
||||
return obj.to_dict()
|
||||
elif isinstance(obj, dict):
|
||||
# 对于字典,只保留基本类型和可序列化的值
|
||||
return {
|
||||
k: convert_to_serializable(v, depth + 1, seen)
|
||||
for k, v in obj.items()
|
||||
if isinstance(k, (str, int, float, bool))
|
||||
}
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
# 对于列表和元组,只保留可序列化的元素
|
||||
return [
|
||||
convert_to_serializable(item, depth + 1, seen)
|
||||
for item in obj
|
||||
if not isinstance(item, (dict, list, tuple))
|
||||
or isinstance(item, (str, int, float, bool, type(None)))
|
||||
]
|
||||
elif isinstance(obj, (str, int, float, bool, type(None))):
|
||||
return obj
|
||||
else:
|
||||
return str(obj)
|
||||
finally:
|
||||
seen.remove(obj_id)
|
||||
|
||||
return {
|
||||
"cycle_id": self.cycle_id,
|
||||
"start_time": self.start_time,
|
||||
"end_time": self.end_time,
|
||||
"timers": self.timers,
|
||||
"thinking_id": self.thinking_id,
|
||||
"loop_plan_info": convert_to_serializable(self.loop_plan_info),
|
||||
"loop_action_info": convert_to_serializable(self.loop_action_info),
|
||||
}
|
||||
|
||||
def complete_cycle(self):
|
||||
"""完成循环,记录结束时间"""
|
||||
self.end_time = time.time()
|
||||
|
||||
# 处理 prefix,只保留中英文字符和基本标点
|
||||
if not self.prefix:
|
||||
self.prefix = "group"
|
||||
else:
|
||||
# 只保留中文、英文字母、数字和基本标点
|
||||
allowed_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_")
|
||||
self.prefix = (
|
||||
"".join(char for char in self.prefix if "\u4e00" <= char <= "\u9fff" or char in allowed_chars)
|
||||
or "group"
|
||||
)
|
||||
|
||||
def set_thinking_id(self, thinking_id: str):
|
||||
"""设置思考消息ID"""
|
||||
self.thinking_id = thinking_id
|
||||
|
||||
def set_loop_info(self, loop_info: Dict[str, Any]):
|
||||
"""设置循环信息"""
|
||||
self.loop_plan_info = loop_info["loop_plan_info"]
|
||||
self.loop_action_info = loop_info["loop_action_info"]
|
||||
|
||||
|
||||
async def create_empty_anchor_message(
|
||||
platform: str, group_info: dict, chat_stream: ChatStream
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
"""
|
||||
HFC性能记录版本号管理器
|
||||
|
||||
用于管理HFC性能记录的内部版本号,支持:
|
||||
1. 默认版本号设置
|
||||
2. 启动时版本号配置
|
||||
3. 版本号验证和格式化
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("hfc_version")
|
||||
|
||||
|
||||
class HFCVersionManager:
|
||||
"""HFC版本号管理器"""
|
||||
|
||||
# 默认版本号
|
||||
DEFAULT_VERSION = "v5.0.0"
|
||||
|
||||
# 当前运行时版本号
|
||||
_current_version: Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def set_version(cls, version: str) -> bool:
|
||||
"""
|
||||
设置当前运行时版本号
|
||||
|
||||
参数:
|
||||
version: 版本号字符串,格式如 v1.0.0 或 1.0.0
|
||||
|
||||
返回:
|
||||
bool: 设置是否成功
|
||||
"""
|
||||
try:
|
||||
validated_version = cls._validate_version(version)
|
||||
if validated_version:
|
||||
cls._current_version = validated_version
|
||||
logger.info(f"HFC性能记录版本已设置为: {validated_version}")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"无效的版本号格式: {version}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"设置版本号失败: {e}")
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def get_version(cls) -> str:
|
||||
"""
|
||||
获取当前版本号
|
||||
|
||||
返回:
|
||||
str: 当前版本号
|
||||
"""
|
||||
if cls._current_version:
|
||||
return cls._current_version
|
||||
|
||||
# 尝试从环境变量获取
|
||||
env_version = os.getenv("HFC_PERFORMANCE_VERSION")
|
||||
if env_version:
|
||||
if cls.set_version(env_version):
|
||||
return cls._current_version
|
||||
|
||||
# 返回默认版本号
|
||||
return cls.DEFAULT_VERSION
|
||||
|
||||
@classmethod
|
||||
def auto_generate_version(cls, base_version: str = None) -> str:
|
||||
"""
|
||||
自动生成版本号(基于时间戳)
|
||||
|
||||
参数:
|
||||
base_version: 基础版本号,如果不提供则使用默认版本
|
||||
|
||||
返回:
|
||||
str: 生成的版本号
|
||||
"""
|
||||
if not base_version:
|
||||
base_version = cls.DEFAULT_VERSION
|
||||
|
||||
# 提取基础版本号的主要部分
|
||||
base_match = re.match(r"v?(\d+\.\d+)", base_version)
|
||||
if base_match:
|
||||
base_part = base_match.group(1)
|
||||
else:
|
||||
base_part = "1.0"
|
||||
|
||||
# 添加时间戳
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M")
|
||||
generated_version = f"v{base_part}.{timestamp}"
|
||||
|
||||
cls.set_version(generated_version)
|
||||
logger.info(f"自动生成版本号: {generated_version}")
|
||||
|
||||
return generated_version
|
||||
|
||||
@classmethod
|
||||
def _validate_version(cls, version: str) -> Optional[str]:
|
||||
"""
|
||||
验证版本号格式
|
||||
|
||||
参数:
|
||||
version: 待验证的版本号
|
||||
|
||||
返回:
|
||||
Optional[str]: 验证后的版本号,失败返回None
|
||||
"""
|
||||
if not version or not isinstance(version, str):
|
||||
return None
|
||||
|
||||
version = version.strip()
|
||||
|
||||
# 支持的格式:
|
||||
# v1.0.0, 1.0.0, v1.0, 1.0, v1.0.0.20241222_1530 等
|
||||
patterns = [
|
||||
r"^v?(\d+\.\d+\.\d+)$", # v1.0.0 或 1.0.0
|
||||
r"^v?(\d+\.\d+)$", # v1.0 或 1.0
|
||||
r"^v?(\d+\.\d+\.\d+\.\w+)$", # v1.0.0.build 或 1.0.0.build
|
||||
r"^v?(\d+\.\d+\.\w+)$", # v1.0.build 或 1.0.build
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
match = re.match(pattern, version)
|
||||
if match:
|
||||
# 确保版本号以v开头
|
||||
if not version.startswith("v"):
|
||||
version = "v" + version
|
||||
return version
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def reset_version(cls):
|
||||
"""重置版本号为默认值"""
|
||||
cls._current_version = None
|
||||
logger.info("HFC版本号已重置为默认值")
|
||||
|
||||
@classmethod
|
||||
def get_version_info(cls) -> dict:
|
||||
"""
|
||||
获取版本信息
|
||||
|
||||
返回:
|
||||
dict: 版本相关信息
|
||||
"""
|
||||
current = cls.get_version()
|
||||
return {
|
||||
"current_version": current,
|
||||
"default_version": cls.DEFAULT_VERSION,
|
||||
"is_custom": current != cls.DEFAULT_VERSION,
|
||||
"env_version": os.getenv("HFC_PERFORMANCE_VERSION"),
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
|
||||
# 全局函数,方便使用
|
||||
def set_hfc_version(version: str) -> bool:
|
||||
"""设置HFC性能记录版本号"""
|
||||
return HFCVersionManager.set_version(version)
|
||||
|
||||
|
||||
def get_hfc_version() -> str:
|
||||
"""获取当前HFC性能记录版本号"""
|
||||
return HFCVersionManager.get_version()
|
||||
|
||||
|
||||
def auto_generate_hfc_version(base_version: str = None) -> str:
|
||||
"""自动生成HFC版本号"""
|
||||
return HFCVersionManager.auto_generate_version(base_version)
|
||||
|
||||
|
||||
def reset_hfc_version():
|
||||
"""重置HFC版本号"""
|
||||
HFCVersionManager.reset_version()
|
||||
|
||||
|
||||
# 在模块加载时显示当前版本信息
|
||||
if __name__ != "__main__":
|
||||
current_version = HFCVersionManager.get_version()
|
||||
logger.debug(f"HFC性能记录模块已加载,当前版本: {current_version}")
|
||||
@@ -1,83 +0,0 @@
|
||||
from typing import Dict, Optional, Any, List
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ActionInfo(InfoBase):
|
||||
"""动作信息类
|
||||
|
||||
用于管理和记录动作的变更信息,包括需要添加或移除的动作。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "action"
|
||||
|
||||
Data Fields:
|
||||
add_actions (List[str]): 需要添加的动作列表
|
||||
remove_actions (List[str]): 需要移除的动作列表
|
||||
reason (str): 变更原因说明
|
||||
"""
|
||||
|
||||
type: str = "action"
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, Any]:
|
||||
"""获取信息数据"""
|
||||
return self.data
|
||||
|
||||
def set_action_changes(self, action_changes: Dict[str, List[str]]) -> None:
|
||||
"""设置动作变更信息
|
||||
|
||||
Args:
|
||||
action_changes (Dict[str, List[str]]): 包含要增加和删除的动作列表
|
||||
{
|
||||
"add": ["action1", "action2"],
|
||||
"remove": ["action3"]
|
||||
}
|
||||
"""
|
||||
self.data["add_actions"] = action_changes.get("add", [])
|
||||
self.data["remove_actions"] = action_changes.get("remove", [])
|
||||
|
||||
def set_reason(self, reason: str) -> None:
|
||||
"""设置变更原因
|
||||
|
||||
Args:
|
||||
reason (str): 动作变更的原因说明
|
||||
"""
|
||||
self.data["reason"] = reason
|
||||
|
||||
def get_add_actions(self) -> List[str]:
|
||||
"""获取需要添加的动作列表
|
||||
|
||||
Returns:
|
||||
List[str]: 需要添加的动作列表
|
||||
"""
|
||||
return self.data.get("add_actions", [])
|
||||
|
||||
def get_remove_actions(self) -> List[str]:
|
||||
"""获取需要移除的动作列表
|
||||
|
||||
Returns:
|
||||
List[str]: 需要移除的动作列表
|
||||
"""
|
||||
return self.data.get("remove_actions", [])
|
||||
|
||||
def get_reason(self) -> Optional[str]:
|
||||
"""获取变更原因
|
||||
|
||||
Returns:
|
||||
Optional[str]: 动作变更的原因说明,如果未设置则返回 None
|
||||
"""
|
||||
return self.data.get("reason")
|
||||
|
||||
def has_changes(self) -> bool:
|
||||
"""检查是否有动作变更
|
||||
|
||||
Returns:
|
||||
bool: 如果有任何动作需要添加或移除则返回True
|
||||
"""
|
||||
return bool(self.get_add_actions() or self.get_remove_actions())
|
||||
@@ -1,97 +0,0 @@
|
||||
from typing import Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatInfo(InfoBase):
|
||||
"""聊天信息类
|
||||
|
||||
用于记录和管理聊天相关的信息,包括聊天ID、名称和类型等。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "chat"
|
||||
|
||||
Data Fields:
|
||||
chat_id (str): 聊天的唯一标识符
|
||||
chat_name (str): 聊天的名称
|
||||
chat_type (str): 聊天的类型
|
||||
"""
|
||||
|
||||
type: str = "chat"
|
||||
|
||||
def set_chat_id(self, chat_id: str) -> None:
|
||||
"""设置聊天ID
|
||||
|
||||
Args:
|
||||
chat_id (str): 聊天的唯一标识符
|
||||
"""
|
||||
self.data["chat_id"] = chat_id
|
||||
|
||||
def set_chat_name(self, chat_name: str) -> None:
|
||||
"""设置聊天名称
|
||||
|
||||
Args:
|
||||
chat_name (str): 聊天的名称
|
||||
"""
|
||||
self.data["chat_name"] = chat_name
|
||||
|
||||
def set_chat_type(self, chat_type: str) -> None:
|
||||
"""设置聊天类型
|
||||
|
||||
Args:
|
||||
chat_type (str): 聊天的类型
|
||||
"""
|
||||
self.data["chat_type"] = chat_type
|
||||
|
||||
def get_chat_id(self) -> Optional[str]:
|
||||
"""获取聊天ID
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天的唯一标识符,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_id")
|
||||
|
||||
def get_chat_name(self) -> Optional[str]:
|
||||
"""获取聊天名称
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天的名称,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_name")
|
||||
|
||||
def get_chat_type(self) -> Optional[str]:
|
||||
"""获取聊天类型
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天的类型,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_type")
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, str]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[str]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[str]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
@@ -1,157 +0,0 @@
|
||||
from typing import Dict, Optional, Any
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class CycleInfo(InfoBase):
|
||||
"""循环信息类
|
||||
|
||||
用于记录和管理心跳循环的相关信息,包括循环ID、时间信息、动作信息等。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "cycle"
|
||||
|
||||
Data Fields:
|
||||
cycle_id (str): 当前循环的唯一标识符
|
||||
start_time (str): 循环开始的时间
|
||||
end_time (str): 循环结束的时间
|
||||
action (str): 在循环中采取的动作
|
||||
action_data (Dict[str, Any]): 动作相关的详细数据
|
||||
reason (str): 触发循环的原因
|
||||
observe_info (str): 当前的回复信息
|
||||
"""
|
||||
|
||||
type: str = "cycle"
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, str]:
|
||||
"""获取信息数据"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[str]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def set_cycle_id(self, cycle_id: str) -> None:
|
||||
"""设置循环ID
|
||||
|
||||
Args:
|
||||
cycle_id (str): 循环的唯一标识符
|
||||
"""
|
||||
self.data["cycle_id"] = cycle_id
|
||||
|
||||
def set_start_time(self, start_time: str) -> None:
|
||||
"""设置开始时间
|
||||
|
||||
Args:
|
||||
start_time (str): 循环开始的时间,建议使用标准时间格式
|
||||
"""
|
||||
self.data["start_time"] = start_time
|
||||
|
||||
def set_end_time(self, end_time: str) -> None:
|
||||
"""设置结束时间
|
||||
|
||||
Args:
|
||||
end_time (str): 循环结束的时间,建议使用标准时间格式
|
||||
"""
|
||||
self.data["end_time"] = end_time
|
||||
|
||||
def set_action(self, action: str) -> None:
|
||||
"""设置采取的动作
|
||||
|
||||
Args:
|
||||
action (str): 在循环中执行的动作名称
|
||||
"""
|
||||
self.data["action"] = action
|
||||
|
||||
def set_action_data(self, action_data: Dict[str, Any]) -> None:
|
||||
"""设置动作数据
|
||||
|
||||
Args:
|
||||
action_data (Dict[str, Any]): 动作相关的详细数据,将被转换为字符串存储
|
||||
"""
|
||||
self.data["action_data"] = str(action_data)
|
||||
|
||||
def set_reason(self, reason: str) -> None:
|
||||
"""设置原因
|
||||
|
||||
Args:
|
||||
reason (str): 触发循环的原因说明
|
||||
"""
|
||||
self.data["reason"] = reason
|
||||
|
||||
def set_observe_info(self, observe_info: str) -> None:
|
||||
"""设置回复信息
|
||||
|
||||
Args:
|
||||
observe_info (str): 当前的回复信息
|
||||
"""
|
||||
self.data["observe_info"] = observe_info
|
||||
|
||||
def get_cycle_id(self) -> Optional[str]:
|
||||
"""获取循环ID
|
||||
|
||||
Returns:
|
||||
Optional[str]: 循环的唯一标识符,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("cycle_id")
|
||||
|
||||
def get_start_time(self) -> Optional[str]:
|
||||
"""获取开始时间
|
||||
|
||||
Returns:
|
||||
Optional[str]: 循环开始的时间,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("start_time")
|
||||
|
||||
def get_end_time(self) -> Optional[str]:
|
||||
"""获取结束时间
|
||||
|
||||
Returns:
|
||||
Optional[str]: 循环结束的时间,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("end_time")
|
||||
|
||||
def get_action(self) -> Optional[str]:
|
||||
"""获取采取的动作
|
||||
|
||||
Returns:
|
||||
Optional[str]: 在循环中执行的动作名称,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("action")
|
||||
|
||||
def get_action_data(self) -> Optional[str]:
|
||||
"""获取动作数据
|
||||
|
||||
Returns:
|
||||
Optional[str]: 动作相关的详细数据(字符串形式),如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("action_data")
|
||||
|
||||
def get_reason(self) -> Optional[str]:
|
||||
"""获取原因
|
||||
|
||||
Returns:
|
||||
Optional[str]: 触发循环的原因说明,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("reason")
|
||||
|
||||
def get_observe_info(self) -> Optional[str]:
|
||||
"""获取回复信息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 当前的回复信息,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("observe_info")
|
||||
@@ -1,69 +0,0 @@
|
||||
from typing import Dict, Optional, Any, List
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class InfoBase:
|
||||
"""信息基类
|
||||
|
||||
这是一个基础信息类,用于存储和管理各种类型的信息数据。
|
||||
所有具体的信息类都应该继承自这个基类。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "base"
|
||||
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
|
||||
支持存储字符串、字典、列表等嵌套数据结构
|
||||
"""
|
||||
|
||||
type: str = "base"
|
||||
data: Dict[str, Any] = field(default_factory=dict)
|
||||
processed_info: str = ""
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, Any]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[Any]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[Any]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def get_info_list(self, key: str) -> List[Any]:
|
||||
"""获取特定属性的信息列表
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
List[Any]: 属性值列表,如果键不存在则返回空列表
|
||||
"""
|
||||
value = self.data.get(key)
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return []
|
||||
|
||||
def get_processed_info(self) -> str:
|
||||
"""获取处理后的信息
|
||||
|
||||
Returns:
|
||||
str: 处理后的信息字符串
|
||||
"""
|
||||
return self.processed_info
|
||||
@@ -1,165 +0,0 @@
|
||||
from typing import Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObsInfo(InfoBase):
|
||||
"""OBS信息类
|
||||
|
||||
用于记录和管理OBS相关的信息,包括说话消息、截断后的说话消息和聊天类型。
|
||||
继承自 InfoBase 类,使用字典存储具体数据。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,固定为 "obs"
|
||||
|
||||
Data Fields:
|
||||
talking_message (str): 说话消息内容
|
||||
talking_message_str_truncate (str): 截断后的说话消息内容
|
||||
talking_message_str_short (str): 简短版本的说话消息内容(使用最新一半消息)
|
||||
talking_message_str_truncate_short (str): 截断简短版本的说话消息内容(使用最新一半消息)
|
||||
chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
|
||||
"""
|
||||
|
||||
type: str = "obs"
|
||||
|
||||
def set_talking_message(self, message: str) -> None:
|
||||
"""设置说话消息
|
||||
|
||||
Args:
|
||||
message (str): 说话消息内容
|
||||
"""
|
||||
self.data["talking_message"] = message
|
||||
|
||||
def set_talking_message_str_truncate(self, message: str) -> None:
|
||||
"""设置截断后的说话消息
|
||||
|
||||
Args:
|
||||
message (str): 截断后的说话消息内容
|
||||
"""
|
||||
self.data["talking_message_str_truncate"] = message
|
||||
|
||||
def set_talking_message_str_short(self, message: str) -> None:
|
||||
"""设置简短版本的说话消息
|
||||
|
||||
Args:
|
||||
message (str): 简短版本的说话消息内容
|
||||
"""
|
||||
self.data["talking_message_str_short"] = message
|
||||
|
||||
def set_talking_message_str_truncate_short(self, message: str) -> None:
|
||||
"""设置截断简短版本的说话消息
|
||||
|
||||
Args:
|
||||
message (str): 截断简短版本的说话消息内容
|
||||
"""
|
||||
self.data["talking_message_str_truncate_short"] = message
|
||||
|
||||
def set_previous_chat_info(self, message: str) -> None:
|
||||
"""设置之前聊天信息
|
||||
|
||||
Args:
|
||||
message (str): 之前聊天信息内容
|
||||
"""
|
||||
self.data["previous_chat_info"] = message
|
||||
|
||||
def set_chat_type(self, chat_type: str) -> None:
|
||||
"""设置聊天类型
|
||||
|
||||
Args:
|
||||
chat_type (str): 聊天类型,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
|
||||
"""
|
||||
if chat_type not in ["private", "group", "other"]:
|
||||
chat_type = "other"
|
||||
self.data["chat_type"] = chat_type
|
||||
|
||||
def set_chat_target(self, chat_target: str) -> None:
|
||||
"""设置聊天目标
|
||||
|
||||
Args:
|
||||
chat_target (str): 聊天目标,可以是 "private"(私聊)、"group"(群聊)或 "other"(其他)
|
||||
"""
|
||||
self.data["chat_target"] = chat_target
|
||||
|
||||
def set_chat_id(self, chat_id: str) -> None:
|
||||
"""设置聊天ID
|
||||
|
||||
Args:
|
||||
chat_id (str): 聊天ID
|
||||
"""
|
||||
self.data["chat_id"] = chat_id
|
||||
|
||||
def get_chat_id(self) -> Optional[str]:
|
||||
"""获取聊天ID
|
||||
|
||||
Returns:
|
||||
Optional[str]: 聊天ID,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("chat_id")
|
||||
|
||||
def get_talking_message(self) -> Optional[str]:
|
||||
"""获取说话消息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 说话消息内容,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("talking_message")
|
||||
|
||||
def get_talking_message_str_truncate(self) -> Optional[str]:
|
||||
"""获取截断后的说话消息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 截断后的说话消息内容,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("talking_message_str_truncate")
|
||||
|
||||
def get_talking_message_str_short(self) -> Optional[str]:
|
||||
"""获取简短版本的说话消息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 简短版本的说话消息内容,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("talking_message_str_short")
|
||||
|
||||
def get_talking_message_str_truncate_short(self) -> Optional[str]:
|
||||
"""获取截断简短版本的说话消息
|
||||
|
||||
Returns:
|
||||
Optional[str]: 截断简短版本的说话消息内容,如果未设置则返回 None
|
||||
"""
|
||||
return self.get_info("talking_message_str_truncate_short")
|
||||
|
||||
def get_chat_type(self) -> str:
|
||||
"""获取聊天类型
|
||||
|
||||
Returns:
|
||||
str: 聊天类型,默认为 "other"
|
||||
"""
|
||||
return self.get_info("chat_type") or "other"
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, str]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, str]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[str]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[str]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
@@ -1,86 +0,0 @@
|
||||
from typing import Dict, Optional, List
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkingMemoryInfo(InfoBase):
|
||||
type: str = "workingmemory"
|
||||
|
||||
processed_info: str = ""
|
||||
|
||||
def set_talking_message(self, message: str) -> None:
|
||||
"""设置说话消息
|
||||
|
||||
Args:
|
||||
message (str): 说话消息内容
|
||||
"""
|
||||
self.data["talking_message"] = message
|
||||
|
||||
def set_working_memory(self, working_memory: List[str]) -> None:
|
||||
"""设置工作记忆列表
|
||||
|
||||
Args:
|
||||
working_memory (List[str]): 工作记忆内容列表
|
||||
"""
|
||||
self.data["working_memory"] = working_memory
|
||||
|
||||
def add_working_memory(self, working_memory: str) -> None:
|
||||
"""添加一条工作记忆
|
||||
|
||||
Args:
|
||||
working_memory (str): 工作记忆内容,格式为"记忆要点:xxx"
|
||||
"""
|
||||
working_memory_list = self.data.get("working_memory", [])
|
||||
working_memory_list.append(working_memory)
|
||||
self.data["working_memory"] = working_memory_list
|
||||
|
||||
def get_working_memory(self) -> List[str]:
|
||||
"""获取所有工作记忆
|
||||
|
||||
Returns:
|
||||
List[str]: 工作记忆内容列表,每条记忆格式为"记忆要点:xxx"
|
||||
"""
|
||||
return self.data.get("working_memory", [])
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, List[str]]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, List[str]]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[List[str]]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[List[str]]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def get_processed_info(self) -> str:
|
||||
"""获取处理后的信息
|
||||
|
||||
Returns:
|
||||
str: 处理后的信息数据,所有记忆要点按行拼接
|
||||
"""
|
||||
all_memory = self.get_working_memory()
|
||||
memory_str = ""
|
||||
for memory in all_memory:
|
||||
memory_str += f"{memory}\n"
|
||||
|
||||
self.processed_info = memory_str
|
||||
|
||||
return self.processed_info
|
||||
@@ -1,51 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Any
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("base_processor")
|
||||
|
||||
|
||||
class BaseProcessor(ABC):
|
||||
"""信息处理器基类
|
||||
|
||||
所有具体的信息处理器都应该继承这个基类,并实现process_info方法。
|
||||
支持处理InfoBase和Observation类型的输入。
|
||||
"""
|
||||
|
||||
log_prefix = "Base信息处理器"
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self):
|
||||
"""初始化处理器"""
|
||||
|
||||
@abstractmethod
|
||||
async def process_info(
|
||||
self,
|
||||
observations: List[Observation] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象的抽象方法
|
||||
|
||||
Args:
|
||||
infos: InfoBase对象列表
|
||||
observations: 可选的Observation对象列表
|
||||
**kwargs: 其他可选参数
|
||||
|
||||
Returns:
|
||||
List[InfoBase]: 处理后的InfoBase实例列表
|
||||
"""
|
||||
pass
|
||||
|
||||
def _create_processed_item(self, info_type: str, info_data: Any) -> dict:
|
||||
"""创建处理后的信息项
|
||||
|
||||
Args:
|
||||
info_type: 信息类型
|
||||
info_data: 信息数据
|
||||
|
||||
Returns:
|
||||
dict: 处理后的信息项
|
||||
"""
|
||||
return {"type": info_type, "id": f"info_{info_type}", "content": info_data, "ttl": 3}
|
||||
@@ -1,142 +0,0 @@
|
||||
from typing import List, Any
|
||||
from src.chat.focus_chat.info.obs_info import ObsInfo
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from .base_processor import BaseProcessor
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from datetime import datetime
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
|
||||
logger = get_logger("processor")
|
||||
|
||||
|
||||
class ChattingInfoProcessor(BaseProcessor):
|
||||
"""观察处理器
|
||||
|
||||
用于处理Observation对象,将其转换为ObsInfo对象。
|
||||
"""
|
||||
|
||||
log_prefix = "聊天信息处理"
|
||||
|
||||
def __init__(self):
|
||||
"""初始化观察处理器"""
|
||||
super().__init__()
|
||||
# TODO: API-Adapter修改标记
|
||||
self.model_summary = LLMRequest(
|
||||
model=global_config.model.utils_small,
|
||||
temperature=0.7,
|
||||
request_type="focus.observation.chat",
|
||||
)
|
||||
|
||||
async def process_info(
|
||||
self,
|
||||
observations: List[Observation] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[InfoBase]:
|
||||
"""处理Observation对象
|
||||
|
||||
Args:
|
||||
infos: InfoBase对象列表
|
||||
observations: 可选的Observation对象列表
|
||||
**kwargs: 其他可选参数
|
||||
|
||||
Returns:
|
||||
List[InfoBase]: 处理后的ObsInfo实例列表
|
||||
"""
|
||||
# print(f"observations: {observations}")
|
||||
processed_infos = []
|
||||
|
||||
# 处理Observation对象
|
||||
if observations:
|
||||
for obs in observations:
|
||||
# print(f"obs: {obs}")
|
||||
if isinstance(obs, ChattingObservation):
|
||||
obs_info = ObsInfo()
|
||||
|
||||
# 设置聊天ID
|
||||
if hasattr(obs, "chat_id"):
|
||||
obs_info.set_chat_id(obs.chat_id)
|
||||
|
||||
# 设置说话消息
|
||||
if hasattr(obs, "talking_message_str"):
|
||||
# print(f"设置说话消息:obs.talking_message_str: {obs.talking_message_str}")
|
||||
obs_info.set_talking_message(obs.talking_message_str)
|
||||
|
||||
# 设置截断后的说话消息
|
||||
if hasattr(obs, "talking_message_str_truncate"):
|
||||
# print(f"设置截断后的说话消息:obs.talking_message_str_truncate: {obs.talking_message_str_truncate}")
|
||||
obs_info.set_talking_message_str_truncate(obs.talking_message_str_truncate)
|
||||
|
||||
# 设置简短版本的说话消息
|
||||
if hasattr(obs, "talking_message_str_short"):
|
||||
obs_info.set_talking_message_str_short(obs.talking_message_str_short)
|
||||
|
||||
# 设置截断简短版本的说话消息
|
||||
if hasattr(obs, "talking_message_str_truncate_short"):
|
||||
obs_info.set_talking_message_str_truncate_short(obs.talking_message_str_truncate_short)
|
||||
|
||||
if hasattr(obs, "mid_memory_info"):
|
||||
# print(f"设置之前聊天信息:obs.mid_memory_info: {obs.mid_memory_info}")
|
||||
obs_info.set_previous_chat_info(obs.mid_memory_info)
|
||||
|
||||
# 设置聊天类型
|
||||
is_group_chat = obs.is_group_chat
|
||||
if is_group_chat:
|
||||
chat_type = "group"
|
||||
else:
|
||||
chat_type = "private"
|
||||
if hasattr(obs, "chat_target_info") and obs.chat_target_info:
|
||||
obs_info.set_chat_target(obs.chat_target_info.get("person_name", "某人"))
|
||||
obs_info.set_chat_type(chat_type)
|
||||
|
||||
# logger.debug(f"聊天信息处理器处理后的信息: {obs_info}")
|
||||
|
||||
processed_infos.append(obs_info)
|
||||
|
||||
return processed_infos
|
||||
|
||||
async def chat_compress(self, obs: ChattingObservation):
|
||||
log_msg = ""
|
||||
if obs.compressor_prompt:
|
||||
summary = ""
|
||||
try:
|
||||
summary_result, _ = await self.model_summary.generate_response_async(obs.compressor_prompt)
|
||||
summary = "没有主题的闲聊"
|
||||
if summary_result:
|
||||
summary = summary_result
|
||||
except Exception as e:
|
||||
log_msg = f"总结主题失败 for chat {obs.chat_id}: {e}"
|
||||
logger.error(log_msg)
|
||||
else:
|
||||
log_msg = f"chat_compress 完成 for chat {obs.chat_id}, summary: {summary}"
|
||||
logger.info(log_msg)
|
||||
|
||||
mid_memory = {
|
||||
"id": str(int(datetime.now().timestamp())),
|
||||
"theme": summary,
|
||||
"messages": obs.oldest_messages, # 存储原始消息对象
|
||||
"readable_messages": obs.oldest_messages_str,
|
||||
# "timestamps": oldest_timestamps,
|
||||
"chat_id": obs.chat_id,
|
||||
"created_at": datetime.now().timestamp(),
|
||||
}
|
||||
|
||||
obs.mid_memories.append(mid_memory)
|
||||
if len(obs.mid_memories) > obs.max_mid_memory_len:
|
||||
obs.mid_memories.pop(0) # 移除最旧的
|
||||
|
||||
mid_memory_str = "之前聊天的内容概述是:\n"
|
||||
for mid_memory_item in obs.mid_memories: # 重命名循环变量以示区分
|
||||
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
|
||||
mid_memory_str += (
|
||||
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n"
|
||||
)
|
||||
obs.mid_memory_info = mid_memory_str
|
||||
|
||||
obs.compressor_prompt = ""
|
||||
obs.oldest_messages = []
|
||||
obs.oldest_messages_str = ""
|
||||
|
||||
return log_msg
|
||||
@@ -1,28 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
|
||||
|
||||
class BasePlanner(ABC):
|
||||
"""规划器基类"""
|
||||
|
||||
def __init__(self, log_prefix: str, action_manager: ActionManager):
|
||||
self.log_prefix = log_prefix
|
||||
self.action_manager = action_manager
|
||||
|
||||
@abstractmethod
|
||||
async def plan(
|
||||
self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]], loop_start_time: float
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
规划下一步行动
|
||||
|
||||
Args:
|
||||
all_plan_info: 所有计划信息
|
||||
running_memorys: 回忆信息
|
||||
loop_start_time: 循环开始时间
|
||||
Returns:
|
||||
Dict[str, Any]: 规划结果
|
||||
"""
|
||||
pass
|
||||
@@ -1,45 +0,0 @@
|
||||
from typing import Dict, Type
|
||||
from src.chat.focus_chat.planners.base_planner import BasePlanner
|
||||
from src.chat.focus_chat.planners.planner_simple import ActionPlanner as SimpleActionPlanner
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("planner_factory")
|
||||
|
||||
|
||||
class PlannerFactory:
|
||||
"""规划器工厂类,用于创建不同类型的规划器实例"""
|
||||
|
||||
# 注册所有可用的规划器类型
|
||||
_planner_types: Dict[str, Type[BasePlanner]] = {
|
||||
"simple": SimpleActionPlanner,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def register_planner(cls, name: str, planner_class: Type[BasePlanner]) -> None:
|
||||
"""
|
||||
注册新的规划器类型
|
||||
|
||||
Args:
|
||||
name: 规划器类型名称
|
||||
planner_class: 规划器类
|
||||
"""
|
||||
cls._planner_types[name] = planner_class
|
||||
logger.info(f"注册新的规划器类型: {name}")
|
||||
|
||||
@classmethod
|
||||
def create_planner(cls, log_prefix: str, action_manager: ActionManager) -> BasePlanner:
|
||||
"""
|
||||
创建规划器实例
|
||||
|
||||
Args:
|
||||
log_prefix: 日志前缀
|
||||
action_manager: 动作管理器实例
|
||||
|
||||
Returns:
|
||||
BasePlanner: 规划器实例
|
||||
"""
|
||||
|
||||
planner_class = cls._planner_types["simple"]
|
||||
logger.info(f"{log_prefix} 使用simple规划器")
|
||||
return planner_class(log_prefix=log_prefix, action_manager=action_manager)
|
||||
@@ -1,173 +0,0 @@
|
||||
import asyncio
|
||||
import traceback
|
||||
from typing import Optional, Coroutine, Callable, Any, List
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
|
||||
from src.config.config import global_config
|
||||
|
||||
logger = get_logger("background_tasks")
|
||||
|
||||
|
||||
# 新增私聊激活检查间隔
|
||||
PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS = 5 # 与兴趣评估类似,设为5秒
|
||||
|
||||
CLEANUP_INTERVAL_SECONDS = 1200
|
||||
|
||||
|
||||
async def _run_periodic_loop(
|
||||
task_name: str, interval: int, task_func: Callable[..., Coroutine[Any, Any, None]], **kwargs
|
||||
):
|
||||
"""周期性任务主循环"""
|
||||
while True:
|
||||
start_time = asyncio.get_event_loop().time()
|
||||
# logger.debug(f"开始执行后台任务: {task_name}")
|
||||
|
||||
try:
|
||||
await task_func(**kwargs) # 执行实际任务
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"任务 {task_name} 已取消")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"任务 {task_name} 执行出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
# 计算并执行间隔等待
|
||||
elapsed = asyncio.get_event_loop().time() - start_time
|
||||
sleep_time = max(0, interval - elapsed)
|
||||
# if sleep_time < 0.1: # 任务超时处理, DEBUG 时可能干扰断点
|
||||
# logger.warning(f"任务 {task_name} 超时执行 ({elapsed:.2f}s > {interval}s)")
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
logger.debug(f"任务循环结束: {task_name}") # 调整日志信息
|
||||
|
||||
|
||||
class BackgroundTaskManager:
|
||||
"""管理 Heartflow 的后台周期性任务。"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
subheartflow_manager: SubHeartflowManager,
|
||||
):
|
||||
self.subheartflow_manager = subheartflow_manager
|
||||
|
||||
# Task references
|
||||
self._cleanup_task: Optional[asyncio.Task] = None
|
||||
self._hf_judge_state_update_task: Optional[asyncio.Task] = None
|
||||
self._private_chat_activation_task: Optional[asyncio.Task] = None # 新增私聊激活任务引用
|
||||
self._tasks: List[Optional[asyncio.Task]] = [] # Keep track of all tasks
|
||||
|
||||
async def start_tasks(self):
|
||||
"""启动所有后台任务
|
||||
|
||||
功能说明:
|
||||
- 启动核心后台任务: 状态更新、清理、日志记录、兴趣评估和随机停用
|
||||
- 每个任务启动前检查是否已在运行
|
||||
- 将任务引用保存到任务列表
|
||||
"""
|
||||
|
||||
task_configs = []
|
||||
|
||||
# 根据 chat_mode 条件添加其他任务
|
||||
if not (global_config.chat.chat_mode == "normal"):
|
||||
task_configs.extend(
|
||||
[
|
||||
(
|
||||
self._run_cleanup_cycle,
|
||||
"info",
|
||||
f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
|
||||
"_cleanup_task",
|
||||
),
|
||||
# 新增私聊激活任务配置
|
||||
(
|
||||
# Use lambda to pass the interval to the runner function
|
||||
lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
|
||||
"debug",
|
||||
f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
|
||||
"_private_chat_activation_task",
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# 统一启动所有任务
|
||||
for task_func, log_level, log_msg, task_attr_name in task_configs:
|
||||
# 检查任务变量是否存在且未完成
|
||||
current_task_var = getattr(self, task_attr_name)
|
||||
if current_task_var is None or current_task_var.done():
|
||||
new_task = asyncio.create_task(task_func())
|
||||
setattr(self, task_attr_name, new_task) # 更新任务变量
|
||||
if new_task not in self._tasks: # 避免重复添加
|
||||
self._tasks.append(new_task)
|
||||
|
||||
# 根据配置记录不同级别的日志
|
||||
getattr(logger, log_level)(log_msg)
|
||||
else:
|
||||
logger.warning(f"{task_attr_name}任务已在运行")
|
||||
|
||||
async def stop_tasks(self):
|
||||
"""停止所有后台任务。
|
||||
|
||||
该方法会:
|
||||
1. 遍历所有后台任务并取消未完成的任务
|
||||
2. 等待所有取消操作完成
|
||||
3. 清空任务列表
|
||||
"""
|
||||
logger.info("正在停止所有后台任务...")
|
||||
cancelled_count = 0
|
||||
|
||||
# 第一步:取消所有运行中的任务
|
||||
for task in self._tasks:
|
||||
if task and not task.done():
|
||||
task.cancel() # 发送取消请求
|
||||
cancelled_count += 1
|
||||
|
||||
# 第二步:处理取消结果
|
||||
if cancelled_count > 0:
|
||||
logger.debug(f"正在等待{cancelled_count}个任务完成取消...")
|
||||
# 使用gather等待所有取消操作完成,忽略异常
|
||||
await asyncio.gather(*[t for t in self._tasks if t and t.cancelled()], return_exceptions=True)
|
||||
logger.info(f"成功取消{cancelled_count}个后台任务")
|
||||
else:
|
||||
logger.info("没有需要取消的后台任务")
|
||||
|
||||
# 第三步:清空任务列表
|
||||
self._tasks = [] # 重置任务列表
|
||||
|
||||
# 状态转换处理
|
||||
|
||||
async def _perform_cleanup_work(self):
|
||||
"""执行子心流清理任务
|
||||
1. 获取需要清理的不活跃子心流列表
|
||||
2. 逐个停止这些子心流
|
||||
3. 记录清理结果
|
||||
"""
|
||||
# 获取需要清理的子心流列表(包含ID和原因)
|
||||
flows_to_stop = self.subheartflow_manager.get_inactive_subheartflows()
|
||||
|
||||
if not flows_to_stop:
|
||||
return # 没有需要清理的子心流直接返回
|
||||
|
||||
logger.info(f"准备删除 {len(flows_to_stop)} 个不活跃(1h)子心流")
|
||||
stopped_count = 0
|
||||
|
||||
# 逐个停止子心流
|
||||
for flow_id in flows_to_stop:
|
||||
success = await self.subheartflow_manager.delete_subflow(flow_id)
|
||||
if success:
|
||||
stopped_count += 1
|
||||
logger.debug(f"[清理任务] 已停止子心流 {flow_id}")
|
||||
|
||||
# 记录最终清理结果
|
||||
logger.info(f"[清理任务] 清理完成, 共停止 {stopped_count}/{len(flows_to_stop)} 个子心流")
|
||||
|
||||
async def _run_cleanup_cycle(self):
|
||||
await _run_periodic_loop(
|
||||
task_name="Subflow Cleanup", interval=CLEANUP_INTERVAL_SECONDS, task_func=self._perform_cleanup_work
|
||||
)
|
||||
|
||||
# 新增私聊激活任务运行器
|
||||
async def _run_private_chat_activation_cycle(self, interval: int):
|
||||
await _run_periodic_loop(
|
||||
task_name="Private Chat Activation Check",
|
||||
interval=interval,
|
||||
task_func=self.subheartflow_manager.sbhf_absent_private_into_focus,
|
||||
)
|
||||
@@ -1,84 +1,56 @@
|
||||
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
|
||||
from src.common.logger import get_logger
|
||||
from typing import Any, Optional, List
|
||||
from src.chat.heart_flow.subheartflow_manager import SubHeartflowManager
|
||||
from src.chat.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager
|
||||
from typing import Any, Optional
|
||||
from typing import Dict
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
|
||||
logger = get_logger("heartflow")
|
||||
|
||||
|
||||
class Heartflow:
|
||||
"""主心流协调器,负责初始化并协调各个子系统:
|
||||
- 状态管理 (MaiState)
|
||||
- 子心流管理 (SubHeartflow)
|
||||
- 后台任务 (BackgroundTaskManager)
|
||||
"""
|
||||
"""主心流协调器,负责初始化并协调聊天"""
|
||||
|
||||
def __init__(self):
|
||||
# 子心流管理 (在初始化时传入 current_state)
|
||||
self.subheartflow_manager: SubHeartflowManager = SubHeartflowManager()
|
||||
|
||||
# 后台任务管理器 (整合所有定时任务)
|
||||
self.background_task_manager: BackgroundTaskManager = BackgroundTaskManager(
|
||||
subheartflow_manager=self.subheartflow_manager,
|
||||
)
|
||||
self.subheartflows: Dict[Any, "SubHeartflow"] = {}
|
||||
|
||||
async def get_or_create_subheartflow(self, subheartflow_id: Any) -> Optional["SubHeartflow"]:
|
||||
"""获取或创建一个新的SubHeartflow实例 - 委托给 SubHeartflowManager"""
|
||||
# 不再需要传入 self.current_state
|
||||
return await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id)
|
||||
"""获取或创建一个新的SubHeartflow实例"""
|
||||
if subheartflow_id in self.subheartflows:
|
||||
subflow = self.subheartflows.get(subheartflow_id)
|
||||
if subflow:
|
||||
return subflow
|
||||
|
||||
try:
|
||||
new_subflow = SubHeartflow(
|
||||
subheartflow_id,
|
||||
)
|
||||
|
||||
await new_subflow.initialize()
|
||||
|
||||
# 注册子心流
|
||||
self.subheartflows[subheartflow_id] = new_subflow
|
||||
heartflow_name = get_chat_manager().get_stream_name(subheartflow_id) or subheartflow_id
|
||||
logger.info(f"[{heartflow_name}] 开始接收消息")
|
||||
|
||||
return new_subflow
|
||||
except Exception as e:
|
||||
logger.error(f"创建子心流 {subheartflow_id} 失败: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
async def force_change_subheartflow_status(self, subheartflow_id: str, status: ChatState) -> None:
|
||||
"""强制改变子心流的状态"""
|
||||
# 这里的 message 是可选的,可能是一个消息对象,也可能是其他类型的数据
|
||||
return await self.subheartflow_manager.force_change_state(subheartflow_id, status)
|
||||
return await self.force_change_state(subheartflow_id, status)
|
||||
|
||||
async def api_get_all_states(self):
|
||||
"""获取所有状态"""
|
||||
return await self.interest_logger.api_get_all_states()
|
||||
|
||||
async def api_get_subheartflow_cycle_info(self, subheartflow_id: str, history_len: int) -> Optional[dict]:
|
||||
"""获取子心流的循环信息"""
|
||||
subheartflow = await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id)
|
||||
if not subheartflow:
|
||||
logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的周期信息")
|
||||
return None
|
||||
heartfc_instance = subheartflow.heart_fc_instance
|
||||
if not heartfc_instance:
|
||||
logger.warning(f"子心流 {subheartflow_id} 没有心流实例,无法获取周期信息")
|
||||
return None
|
||||
|
||||
return heartfc_instance.get_cycle_history(last_n=history_len)
|
||||
|
||||
async def api_get_normal_chat_replies(self, subheartflow_id: str, limit: int = 10) -> Optional[List[dict]]:
|
||||
"""获取子心流的NormalChat回复记录
|
||||
|
||||
Args:
|
||||
subheartflow_id: 子心流ID
|
||||
limit: 最大返回数量,默认10条
|
||||
|
||||
Returns:
|
||||
Optional[List[dict]]: 回复记录列表,如果子心流不存在则返回None
|
||||
"""
|
||||
subheartflow = await self.subheartflow_manager.get_or_create_subheartflow(subheartflow_id)
|
||||
if not subheartflow:
|
||||
logger.warning(f"尝试获取不存在的子心流 {subheartflow_id} 的NormalChat回复记录")
|
||||
return None
|
||||
|
||||
return subheartflow.get_normal_chat_recent_replies(limit)
|
||||
|
||||
async def heartflow_start_working(self):
|
||||
"""启动后台任务"""
|
||||
await self.background_task_manager.start_tasks()
|
||||
logger.info("[Heartflow] 后台任务已启动")
|
||||
|
||||
# 根本不会用到这个函数吧,那样麦麦直接死了
|
||||
async def stop_working(self):
|
||||
"""停止所有任务和子心流"""
|
||||
logger.info("[Heartflow] 正在停止任务和子心流...")
|
||||
await self.background_task_manager.stop_tasks()
|
||||
await self.subheartflow_manager.deactivate_all_subflows()
|
||||
logger.info("[Heartflow] 所有任务和子心流已停止")
|
||||
async def force_change_state(self, subflow_id: Any, target_state: ChatState) -> bool:
|
||||
"""强制改变指定子心流的状态"""
|
||||
subflow = self.subheartflows.get(subflow_id)
|
||||
if not subflow:
|
||||
logger.warning(f"[强制状态转换]尝试转换不存在的子心流{subflow_id} 到 {target_state.value}")
|
||||
return False
|
||||
await subflow.change_chat_state(target_state)
|
||||
logger.info(f"[强制状态转换]子心流 {subflow_id} 已转换到 {target_state.value}")
|
||||
return True
|
||||
|
||||
|
||||
heartflow = Heartflow()
|
||||
|
||||
@@ -10,29 +10,14 @@ from src.common.logger import get_logger
|
||||
import re
|
||||
import math
|
||||
import traceback
|
||||
from typing import Optional, Tuple
|
||||
from typing import Tuple
|
||||
|
||||
from src.person_info.relationship_manager import get_relationship_manager
|
||||
|
||||
# from ..message_receive.message_buffer import message_buffer
|
||||
|
||||
logger = get_logger("chat")
|
||||
|
||||
|
||||
async def _handle_error(error: Exception, context: str, message: Optional[MessageRecv] = None) -> None:
|
||||
"""统一的错误处理函数
|
||||
|
||||
Args:
|
||||
error: 捕获到的异常
|
||||
context: 错误发生的上下文描述
|
||||
message: 可选的消息对象,用于记录相关消息内容
|
||||
"""
|
||||
logger.error(f"{context}: {error}")
|
||||
logger.error(traceback.format_exc())
|
||||
if message and hasattr(message, "raw_message"):
|
||||
logger.error(f"相关消息原始内容: {message.raw_message}")
|
||||
|
||||
|
||||
async def _process_relationship(message: MessageRecv) -> None:
|
||||
"""处理用户关系逻辑
|
||||
|
||||
@@ -149,4 +134,5 @@ class HeartFCMessageReceiver:
|
||||
await _process_relationship(message)
|
||||
|
||||
except Exception as e:
|
||||
await _handle_error(e, "消息处理失败", message)
|
||||
logger.error(f"消息处理失败: {e}")
|
||||
print(traceback.format_exc())
|
||||
@@ -1,46 +0,0 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 特殊的观察,专门用于观察动作
|
||||
# 所有观察的基类
|
||||
class ActionObservation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
self.action_manager: ActionManager = None
|
||||
|
||||
self.all_actions = {}
|
||||
self.all_using_actions = {}
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.observe_info
|
||||
|
||||
def set_action_manager(self, action_manager: ActionManager):
|
||||
self.action_manager = action_manager
|
||||
self.all_actions = self.action_manager.get_registered_actions()
|
||||
|
||||
async def observe(self):
|
||||
action_info_block = ""
|
||||
self.all_using_actions = self.action_manager.get_using_actions()
|
||||
for action_name, action_info in self.all_using_actions.items():
|
||||
action_info_block += f"\n{action_name}: {action_info.get('description', '')}"
|
||||
action_info_block += "\n注意,除了上面动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
|
||||
|
||||
self.observe_info = action_info_block
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""将观察对象转换为可序列化的字典"""
|
||||
return {
|
||||
"observe_info": self.observe_info,
|
||||
"observe_id": self.observe_id,
|
||||
"last_observe_time": self.last_observe_time,
|
||||
"all_actions": self.all_actions,
|
||||
"all_using_actions": self.all_using_actions,
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
from datetime import datetime
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.chat_message_builder import (
|
||||
get_raw_msg_before_timestamp_with_chat,
|
||||
build_readable_messages,
|
||||
get_raw_msg_by_timestamp_with_chat,
|
||||
num_new_messages_since,
|
||||
get_person_id_list,
|
||||
)
|
||||
from src.chat.utils.prompt_builder import global_prompt_manager, Prompt
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
# 定义提示模板
|
||||
Prompt(
|
||||
"""这是qq群聊的聊天记录,请总结以下聊天记录的主题:
|
||||
{chat_logs}
|
||||
请概括这段聊天记录的主题和主要内容
|
||||
主题:简短的概括,包括时间,人物和事件,不要超过20个字
|
||||
内容:具体的信息内容,包括人物、事件和信息,不要超过200个字,不要分点。
|
||||
|
||||
请用json格式返回,格式如下:
|
||||
{{
|
||||
"theme": "主题,例如 2025-06-14 10:00:00 群聊 麦麦 和 网友 讨论了 游戏 的话题",
|
||||
"content": "内容,可以是对聊天记录的概括,也可以是聊天记录的详细内容"
|
||||
}}
|
||||
""",
|
||||
"chat_summary_group_prompt", # Template for group chat
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""这是你和{chat_target}的私聊记录,请总结以下聊天记录的主题:
|
||||
{chat_logs}
|
||||
请用一句话概括,包括事件,时间,和主要信息,不要分点。
|
||||
主题:简短的介绍,不要超过10个字
|
||||
内容:包括人物、事件和主要信息,不要分点。
|
||||
|
||||
请用json格式返回,格式如下:
|
||||
{{
|
||||
"theme": "主题",
|
||||
"content": "内容"
|
||||
}}""",
|
||||
"chat_summary_private_prompt", # Template for private chat
|
||||
)
|
||||
|
||||
|
||||
class ChattingObservation(Observation):
|
||||
def __init__(self, chat_id):
|
||||
super().__init__(chat_id)
|
||||
self.chat_id = chat_id
|
||||
self.platform = "qq"
|
||||
|
||||
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_id)
|
||||
|
||||
self.talking_message = []
|
||||
self.talking_message_str = ""
|
||||
self.talking_message_str_truncate = ""
|
||||
self.talking_message_str_short = ""
|
||||
self.talking_message_str_truncate_short = ""
|
||||
self.name = global_config.bot.nickname
|
||||
self.nick_name = global_config.bot.alias_names
|
||||
self.max_now_obs_len = global_config.chat.max_context_size
|
||||
self.overlap_len = global_config.focus_chat.compressed_length
|
||||
self.person_list = []
|
||||
self.compressor_prompt = ""
|
||||
self.oldest_messages = []
|
||||
self.oldest_messages_str = ""
|
||||
|
||||
self.last_observe_time = datetime.now().timestamp()
|
||||
initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10)
|
||||
initial_messages_short = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 5)
|
||||
self.last_observe_time = initial_messages[-1]["time"] if initial_messages else self.last_observe_time
|
||||
self.talking_message = initial_messages
|
||||
self.talking_message_short = initial_messages_short
|
||||
self.talking_message_str = build_readable_messages(self.talking_message, show_actions=True)
|
||||
self.talking_message_str_truncate = build_readable_messages(
|
||||
self.talking_message, show_actions=True, truncate=True
|
||||
)
|
||||
self.talking_message_str_short = build_readable_messages(self.talking_message_short, show_actions=True)
|
||||
self.talking_message_str_truncate_short = build_readable_messages(
|
||||
self.talking_message_short, show_actions=True, truncate=True
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""将观察对象转换为可序列化的字典"""
|
||||
return {
|
||||
"chat_id": self.chat_id,
|
||||
"platform": self.platform,
|
||||
"is_group_chat": self.is_group_chat,
|
||||
"chat_target_info": self.chat_target_info,
|
||||
"talking_message_str": self.talking_message_str,
|
||||
"talking_message_str_truncate": self.talking_message_str_truncate,
|
||||
"talking_message_str_short": self.talking_message_str_short,
|
||||
"talking_message_str_truncate_short": self.talking_message_str_truncate_short,
|
||||
"name": self.name,
|
||||
"nick_name": self.nick_name,
|
||||
"last_observe_time": self.last_observe_time,
|
||||
}
|
||||
|
||||
def get_observe_info(self, ids=None):
|
||||
return self.talking_message_str
|
||||
|
||||
async def observe(self):
|
||||
# 自上一次观察的新消息
|
||||
new_messages_list = get_raw_msg_by_timestamp_with_chat(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_observe_time,
|
||||
timestamp_end=datetime.now().timestamp(),
|
||||
limit=self.max_now_obs_len,
|
||||
limit_mode="latest",
|
||||
)
|
||||
|
||||
# print(f"new_messages_list: {new_messages_list}")
|
||||
|
||||
last_obs_time_mark = self.last_observe_time
|
||||
if new_messages_list:
|
||||
self.last_observe_time = new_messages_list[-1]["time"]
|
||||
self.talking_message.extend(new_messages_list)
|
||||
|
||||
if len(self.talking_message) > self.max_now_obs_len:
|
||||
# 计算需要移除的消息数量,保留最新的 max_now_obs_len 条
|
||||
messages_to_remove_count = len(self.talking_message) - self.max_now_obs_len
|
||||
oldest_messages = self.talking_message[:messages_to_remove_count]
|
||||
self.talking_message = self.talking_message[messages_to_remove_count:]
|
||||
|
||||
# 构建压缩提示
|
||||
oldest_messages_str = build_readable_messages(
|
||||
messages=oldest_messages, timestamp_mode="normal_no_YMD", read_mark=0, show_actions=True
|
||||
)
|
||||
|
||||
# 根据聊天类型选择提示模板
|
||||
if self.is_group_chat:
|
||||
prompt_template_name = "chat_summary_group_prompt"
|
||||
prompt = await global_prompt_manager.format_prompt(prompt_template_name, chat_logs=oldest_messages_str)
|
||||
else:
|
||||
prompt_template_name = "chat_summary_private_prompt"
|
||||
chat_target_name = "对方"
|
||||
if self.chat_target_info:
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name")
|
||||
or self.chat_target_info.get("user_nickname")
|
||||
or chat_target_name
|
||||
)
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
prompt_template_name,
|
||||
chat_target=chat_target_name,
|
||||
chat_logs=oldest_messages_str,
|
||||
)
|
||||
|
||||
self.compressor_prompt = prompt
|
||||
|
||||
# 构建当前消息
|
||||
self.talking_message_str = build_readable_messages(
|
||||
messages=self.talking_message,
|
||||
timestamp_mode="lite",
|
||||
read_mark=last_obs_time_mark,
|
||||
show_actions=True,
|
||||
)
|
||||
self.talking_message_str_truncate = build_readable_messages(
|
||||
messages=self.talking_message,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=last_obs_time_mark,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
# 构建简短版本 - 使用最新一半的消息
|
||||
half_count = len(self.talking_message) // 2
|
||||
recent_messages = self.talking_message[-half_count:] if half_count > 0 else self.talking_message
|
||||
|
||||
self.talking_message_str_short = build_readable_messages(
|
||||
messages=recent_messages,
|
||||
timestamp_mode="lite",
|
||||
read_mark=last_obs_time_mark,
|
||||
show_actions=True,
|
||||
)
|
||||
self.talking_message_str_truncate_short = build_readable_messages(
|
||||
messages=recent_messages,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=last_obs_time_mark,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
self.person_list = await get_person_id_list(self.talking_message)
|
||||
|
||||
# logger.debug(
|
||||
# f"Chat {self.chat_id} - 现在聊天内容:{self.talking_message_str}"
|
||||
# )
|
||||
|
||||
async def has_new_messages_since(self, timestamp: float) -> bool:
|
||||
"""检查指定时间戳之后是否有新消息"""
|
||||
count = num_new_messages_since(chat_id=self.chat_id, timestamp_start=timestamp)
|
||||
return count > 0
|
||||
@@ -1,25 +0,0 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class Observation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""将观察对象转换为可序列化的字典"""
|
||||
return {
|
||||
"observe_info": self.observe_info,
|
||||
"observe_id": self.observe_id,
|
||||
"last_observe_time": self.last_observe_time,
|
||||
}
|
||||
|
||||
async def observe(self):
|
||||
pass
|
||||
@@ -1,34 +0,0 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.focus_chat.working_memory.working_memory import WorkingMemory
|
||||
from src.chat.focus_chat.working_memory.memory_item import MemoryItem
|
||||
from typing import List
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class WorkingMemoryObservation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp()
|
||||
|
||||
self.working_memory = WorkingMemory(chat_id=observe_id)
|
||||
|
||||
self.retrieved_working_memory = []
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.working_memory
|
||||
|
||||
def add_retrieved_working_memory(self, retrieved_working_memory: List[MemoryItem]):
|
||||
self.retrieved_working_memory.append(retrieved_working_memory)
|
||||
|
||||
def get_retrieved_working_memory(self):
|
||||
return self.retrieved_working_memory
|
||||
|
||||
async def observe(self):
|
||||
pass
|
||||
@@ -1,5 +1,3 @@
|
||||
from .observation.observation import Observation
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Optional, List, Dict, Tuple
|
||||
@@ -10,7 +8,7 @@ from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.focus_chat.heartFC_chat import HeartFChatting
|
||||
from src.chat.normal_chat.normal_chat import NormalChat
|
||||
from src.chat.heart_flow.chat_state_info import ChatState, ChatStateInfo
|
||||
from .utils_chat import get_chat_type_and_target_info
|
||||
from src.chat.utils.utils import get_chat_type_and_target_info
|
||||
from src.config.config import global_config
|
||||
from rich.traceback import install
|
||||
|
||||
@@ -46,10 +44,6 @@ class SubHeartflow:
|
||||
# 兴趣消息集合
|
||||
self.interest_dict: Dict[str, tuple[MessageRecv, float, bool]] = {}
|
||||
|
||||
# 活动状态管理
|
||||
self.should_stop = False # 停止标志
|
||||
self.task: Optional[asyncio.Task] = None # 后台任务
|
||||
|
||||
# focus模式退出冷却时间管理
|
||||
self.last_focus_exit_time: float = 0 # 上次退出focus模式的时间
|
||||
|
||||
@@ -126,6 +120,7 @@ class SubHeartflow:
|
||||
chat_stream=chat_stream,
|
||||
interest_dict=self.interest_dict,
|
||||
on_switch_to_focus_callback=self._handle_switch_to_focus_request,
|
||||
get_cooldown_progress_callback=self.get_cooldown_progress,
|
||||
)
|
||||
|
||||
logger.info(f"{log_prefix} 开始普通聊天,随便水群...")
|
||||
@@ -137,27 +132,31 @@ class SubHeartflow:
|
||||
self.normal_chat_instance = None # 启动/初始化失败,清理实例
|
||||
return False
|
||||
|
||||
async def _handle_switch_to_focus_request(self) -> None:
|
||||
async def _handle_switch_to_focus_request(self) -> bool:
|
||||
"""
|
||||
处理来自NormalChat的切换到focus模式的请求
|
||||
|
||||
Args:
|
||||
stream_id: 请求切换的stream_id
|
||||
Returns:
|
||||
bool: 切换成功返回True,失败返回False
|
||||
"""
|
||||
logger.info(f"{self.log_prefix} 收到NormalChat请求切换到focus模式")
|
||||
|
||||
# 检查是否在focus冷却期内
|
||||
if self.is_in_focus_cooldown():
|
||||
logger.info(f"{self.log_prefix} 正在focus冷却期内,忽略切换到focus模式的请求")
|
||||
return
|
||||
return False
|
||||
|
||||
# 切换到focus模式
|
||||
current_state = self.chat_state.chat_status
|
||||
if current_state == ChatState.NORMAL:
|
||||
await self.change_chat_state(ChatState.FOCUSED)
|
||||
logger.info(f"{self.log_prefix} 已根据NormalChat请求从NORMAL切换到FOCUSED状态")
|
||||
return True
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 当前状态为{current_state.value},无法切换到FOCUSED状态")
|
||||
return False
|
||||
|
||||
async def _handle_stop_focus_chat_request(self) -> None:
|
||||
"""
|
||||
@@ -208,10 +207,6 @@ class SubHeartflow:
|
||||
await asyncio.wait_for(self.heart_fc_instance.start(), timeout=15.0)
|
||||
logger.info(f"{log_prefix} HeartFChatting 循环已启动。")
|
||||
return True
|
||||
except asyncio.TimeoutError:
|
||||
logger.error(f"{log_prefix} 启动现有 HeartFChatting 循环超时")
|
||||
# 超时时清理实例,准备重新创建
|
||||
self.heart_fc_instance = None
|
||||
except Exception as e:
|
||||
logger.error(f"{log_prefix} 尝试启动现有 HeartFChatting 循环时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -228,7 +223,6 @@ class SubHeartflow:
|
||||
logger.debug(f"{log_prefix} 创建新的 HeartFChatting 实例")
|
||||
self.heart_fc_instance = HeartFChatting(
|
||||
chat_id=self.subheartflow_id,
|
||||
# observations=self.observations,
|
||||
on_stop_focus_chat=self._handle_stop_focus_chat_request,
|
||||
)
|
||||
|
||||
@@ -238,10 +232,6 @@ class SubHeartflow:
|
||||
logger.debug(f"{log_prefix} 麦麦已成功进入专注聊天模式 (新实例已启动)。")
|
||||
return True
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.error(f"{log_prefix} 创建或启动新 HeartFChatting 实例超时")
|
||||
self.heart_fc_instance = None # 超时时清理实例
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"{log_prefix} 创建或启动 HeartFChatting 实例时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -252,8 +242,6 @@ class SubHeartflow:
|
||||
logger.error(f"{self.log_prefix} _start_heart_fc_chat 执行时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
return False
|
||||
finally:
|
||||
logger.debug(f"{self.log_prefix} _start_heart_fc_chat 完成")
|
||||
|
||||
async def change_chat_state(self, new_state: ChatState) -> None:
|
||||
"""
|
||||
@@ -309,43 +297,6 @@ class SubHeartflow:
|
||||
f"{log_prefix} 尝试将状态从 {current_state.value} 变为 {new_state.value},但未成功或未执行更改。"
|
||||
)
|
||||
|
||||
def add_observation(self, observation: Observation):
|
||||
for existing_obs in self.observations:
|
||||
if existing_obs.observe_id == observation.observe_id:
|
||||
return
|
||||
self.observations.append(observation)
|
||||
|
||||
def remove_observation(self, observation: Observation):
|
||||
if observation in self.observations:
|
||||
self.observations.remove(observation)
|
||||
|
||||
def get_all_observations(self) -> list[Observation]:
|
||||
return self.observations
|
||||
|
||||
def _get_primary_observation(self) -> Optional[ChattingObservation]:
|
||||
if self.observations and isinstance(self.observations[0], ChattingObservation):
|
||||
return self.observations[0]
|
||||
logger.warning(f"SubHeartflow {self.subheartflow_id} 没有找到有效的 ChattingObservation")
|
||||
return None
|
||||
|
||||
def get_normal_chat_last_speak_time(self) -> float:
|
||||
if self.normal_chat_instance:
|
||||
return self.normal_chat_instance.last_speak_time
|
||||
return 0
|
||||
|
||||
def get_normal_chat_recent_replies(self, limit: int = 10) -> List[dict]:
|
||||
"""获取NormalChat实例的最近回复记录
|
||||
|
||||
Args:
|
||||
limit: 最大返回数量,默认10条
|
||||
|
||||
Returns:
|
||||
List[dict]: 最近的回复记录列表,如果没有NormalChat实例则返回空列表
|
||||
"""
|
||||
if self.normal_chat_instance:
|
||||
return self.normal_chat_instance.get_recent_replies(limit)
|
||||
return []
|
||||
|
||||
def add_message_to_normal_chat_cache(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
|
||||
self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
|
||||
# 如果字典长度超过10,删除最旧的消息
|
||||
@@ -353,66 +304,6 @@ class SubHeartflow:
|
||||
oldest_key = next(iter(self.interest_dict))
|
||||
self.interest_dict.pop(oldest_key)
|
||||
|
||||
def get_normal_chat_action_manager(self):
|
||||
"""获取NormalChat的ActionManager实例
|
||||
|
||||
Returns:
|
||||
ActionManager: NormalChat的ActionManager实例,如果不存在则返回None
|
||||
"""
|
||||
if self.normal_chat_instance:
|
||||
return self.normal_chat_instance.get_action_manager()
|
||||
return None
|
||||
|
||||
def set_normal_chat_planner_enabled(self, enabled: bool):
|
||||
"""设置NormalChat的planner是否启用
|
||||
|
||||
Args:
|
||||
enabled: 是否启用planner
|
||||
"""
|
||||
if self.normal_chat_instance:
|
||||
self.normal_chat_instance.set_planner_enabled(enabled)
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} NormalChat实例不存在,无法设置planner状态")
|
||||
|
||||
async def get_full_state(self) -> dict:
|
||||
"""获取子心流的完整状态,包括兴趣、思维和聊天状态。"""
|
||||
return {
|
||||
"interest_state": "interest_state",
|
||||
"chat_state": self.chat_state.chat_status.value,
|
||||
"chat_state_changed_time": self.chat_state_changed_time,
|
||||
}
|
||||
|
||||
async def shutdown(self):
|
||||
"""安全地关闭子心流及其管理的任务"""
|
||||
if self.should_stop:
|
||||
logger.info(f"{self.log_prefix} 子心流已在关闭过程中。")
|
||||
return
|
||||
|
||||
logger.info(f"{self.log_prefix} 开始关闭子心流...")
|
||||
self.should_stop = True # 标记为停止,让后台任务退出
|
||||
|
||||
# 使用新的停止方法
|
||||
await self._stop_normal_chat()
|
||||
await self._stop_heart_fc_chat()
|
||||
|
||||
# 取消可能存在的旧后台任务 (self.task)
|
||||
if self.task and not self.task.done():
|
||||
logger.debug(f"{self.log_prefix} 取消子心流主任务 (Shutdown)...")
|
||||
self.task.cancel()
|
||||
try:
|
||||
await asyncio.wait_for(self.task, timeout=1.0) # 给点时间响应取消
|
||||
except asyncio.CancelledError:
|
||||
logger.debug(f"{self.log_prefix} 子心流主任务已取消 (Shutdown)。")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"{self.log_prefix} 等待子心流主任务取消超时 (Shutdown)。")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 等待子心流主任务取消时发生错误 (Shutdown): {e}")
|
||||
|
||||
self.task = None # 清理任务引用
|
||||
self.chat_state.chat_status = ChatState.ABSENT # 状态重置为不参与
|
||||
|
||||
logger.info(f"{self.log_prefix} 子心流关闭完成。")
|
||||
|
||||
def is_in_focus_cooldown(self) -> bool:
|
||||
"""检查是否在focus模式的冷却期内
|
||||
|
||||
@@ -439,3 +330,26 @@ class SubHeartflow:
|
||||
)
|
||||
|
||||
return is_cooling
|
||||
|
||||
def get_cooldown_progress(self) -> float:
|
||||
"""获取冷却进度,返回0-1之间的值
|
||||
|
||||
Returns:
|
||||
float: 0表示刚开始冷却,1表示冷却完成
|
||||
"""
|
||||
if self.last_focus_exit_time == 0:
|
||||
return 1.0 # 没有冷却,返回1表示完全恢复
|
||||
|
||||
# 基础冷却时间10分钟,受auto_focus_threshold调控
|
||||
base_cooldown = 10 * 60 # 10分钟转换为秒
|
||||
cooldown_duration = base_cooldown / global_config.chat.auto_focus_threshold
|
||||
|
||||
current_time = time.time()
|
||||
elapsed_since_exit = current_time - self.last_focus_exit_time
|
||||
|
||||
if elapsed_since_exit >= cooldown_duration:
|
||||
return 1.0 # 冷却完成
|
||||
|
||||
# 计算进度:0表示刚开始冷却,1表示冷却完成
|
||||
progress = elapsed_since_exit / cooldown_duration
|
||||
return progress
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Dict, Any, Optional, List
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
|
||||
|
||||
|
||||
# 初始化日志记录器
|
||||
|
||||
logger = get_logger("subheartflow_manager")
|
||||
|
||||
# 子心流管理相关常量
|
||||
INACTIVE_THRESHOLD_SECONDS = 3600 # 子心流不活跃超时时间(秒)
|
||||
NORMAL_CHAT_TIMEOUT_SECONDS = 30 * 60 # 30分钟
|
||||
|
||||
|
||||
async def _try_set_subflow_absent_internal(subflow: "SubHeartflow", log_prefix: str) -> bool:
|
||||
"""
|
||||
尝试将给定的子心流对象状态设置为 ABSENT (内部方法,不处理锁)。
|
||||
|
||||
Args:
|
||||
subflow: 子心流对象。
|
||||
log_prefix: 用于日志记录的前缀 (例如 "[子心流管理]" 或 "[停用]")。
|
||||
|
||||
Returns:
|
||||
bool: 如果状态成功变为 ABSENT 或原本就是 ABSENT,返回 True;否则返回 False。
|
||||
"""
|
||||
flow_id = subflow.subheartflow_id
|
||||
stream_name = get_chat_manager().get_stream_name(flow_id) or flow_id
|
||||
|
||||
if subflow.chat_state.chat_status != ChatState.ABSENT:
|
||||
logger.debug(f"{log_prefix} 设置 {stream_name} 状态为 ABSENT")
|
||||
try:
|
||||
await subflow.change_chat_state(ChatState.ABSENT)
|
||||
# 再次检查以确认状态已更改 (change_chat_state 内部应确保)
|
||||
if subflow.chat_state.chat_status == ChatState.ABSENT:
|
||||
return True
|
||||
else:
|
||||
logger.warning(
|
||||
f"{log_prefix} 调用 change_chat_state 后,{stream_name} 状态仍为 {subflow.chat_state.chat_status.value}"
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"{log_prefix} 设置 {stream_name} 状态为 ABSENT 时失败: {e}", exc_info=True)
|
||||
return False
|
||||
else:
|
||||
logger.debug(f"{log_prefix} {stream_name} 已是 ABSENT 状态")
|
||||
return True # 已经是目标状态,视为成功
|
||||
|
||||
|
||||
class SubHeartflowManager:
|
||||
"""管理所有活跃的 SubHeartflow 实例。"""
|
||||
|
||||
def __init__(self):
|
||||
self.subheartflows: Dict[Any, "SubHeartflow"] = {}
|
||||
self._lock = asyncio.Lock() # 用于保护 self.subheartflows 的访问
|
||||
|
||||
async def force_change_state(self, subflow_id: Any, target_state: ChatState) -> bool:
|
||||
"""强制改变指定子心流的状态"""
|
||||
async with self._lock:
|
||||
subflow = self.subheartflows.get(subflow_id)
|
||||
if not subflow:
|
||||
logger.warning(f"[强制状态转换]尝试转换不存在的子心流{subflow_id} 到 {target_state.value}")
|
||||
return False
|
||||
await subflow.change_chat_state(target_state)
|
||||
logger.info(f"[强制状态转换]子心流 {subflow_id} 已转换到 {target_state.value}")
|
||||
return True
|
||||
|
||||
def get_all_subheartflows(self) -> List["SubHeartflow"]:
|
||||
"""获取所有当前管理的 SubHeartflow 实例列表 (快照)。"""
|
||||
return list(self.subheartflows.values())
|
||||
|
||||
async def get_or_create_subheartflow(self, subheartflow_id: Any) -> Optional["SubHeartflow"]:
|
||||
"""获取或创建指定ID的子心流实例
|
||||
|
||||
Args:
|
||||
subheartflow_id: 子心流唯一标识符
|
||||
mai_states 参数已被移除,使用 self.mai_state_info
|
||||
|
||||
Returns:
|
||||
成功返回SubHeartflow实例,失败返回None
|
||||
"""
|
||||
async with self._lock:
|
||||
# 检查是否已存在该子心流
|
||||
if subheartflow_id in self.subheartflows:
|
||||
subflow = self.subheartflows[subheartflow_id]
|
||||
if subflow.should_stop:
|
||||
logger.warning(f"尝试获取已停止的子心流 {subheartflow_id},正在重新激活")
|
||||
subflow.should_stop = False # 重置停止标志
|
||||
return subflow
|
||||
|
||||
try:
|
||||
new_subflow = SubHeartflow(
|
||||
subheartflow_id,
|
||||
)
|
||||
|
||||
# 然后再进行异步初始化,此时 SubHeartflow 内部若需启动 HeartFChatting,就能拿到 observation
|
||||
await new_subflow.initialize()
|
||||
|
||||
# 注册子心流
|
||||
self.subheartflows[subheartflow_id] = new_subflow
|
||||
heartflow_name = get_chat_manager().get_stream_name(subheartflow_id) or subheartflow_id
|
||||
logger.info(f"[{heartflow_name}] 开始接收消息")
|
||||
|
||||
return new_subflow
|
||||
except Exception as e:
|
||||
logger.error(f"创建子心流 {subheartflow_id} 失败: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
async def sleep_subheartflow(self, subheartflow_id: Any, reason: str) -> bool:
|
||||
"""停止指定的子心流并将其状态设置为 ABSENT"""
|
||||
log_prefix = "[子心流管理]"
|
||||
async with self._lock: # 加锁以安全访问字典
|
||||
subheartflow = self.subheartflows.get(subheartflow_id)
|
||||
|
||||
stream_name = get_chat_manager().get_stream_name(subheartflow_id) or subheartflow_id
|
||||
logger.info(f"{log_prefix} 正在停止 {stream_name}, 原因: {reason}")
|
||||
|
||||
# 调用内部方法处理状态变更
|
||||
success = await _try_set_subflow_absent_internal(subheartflow, log_prefix)
|
||||
|
||||
return success
|
||||
# 锁在此处自动释放
|
||||
|
||||
def get_inactive_subheartflows(self, max_age_seconds=INACTIVE_THRESHOLD_SECONDS):
|
||||
"""识别并返回需要清理的不活跃(处于ABSENT状态超过一小时)子心流(id, 原因)"""
|
||||
_current_time = time.time()
|
||||
flows_to_stop = []
|
||||
|
||||
for subheartflow_id, subheartflow in list(self.subheartflows.items()):
|
||||
state = subheartflow.chat_state.chat_status
|
||||
if state != ChatState.ABSENT:
|
||||
continue
|
||||
subheartflow.update_last_chat_state_time()
|
||||
_absent_last_time = subheartflow.chat_state_last_time
|
||||
flows_to_stop.append(subheartflow_id)
|
||||
|
||||
return flows_to_stop
|
||||
|
||||
async def deactivate_all_subflows(self):
|
||||
"""将所有子心流的状态更改为 ABSENT (例如主状态变为OFFLINE时调用)"""
|
||||
log_prefix = "[停用]"
|
||||
changed_count = 0
|
||||
processed_count = 0
|
||||
|
||||
async with self._lock: # 获取锁以安全迭代
|
||||
# 使用 list() 创建一个当前值的快照,防止在迭代时修改字典
|
||||
flows_to_update = list(self.subheartflows.values())
|
||||
processed_count = len(flows_to_update)
|
||||
if not flows_to_update:
|
||||
logger.debug(f"{log_prefix} 无活跃子心流,无需操作")
|
||||
return
|
||||
|
||||
for subflow in flows_to_update:
|
||||
# 记录原始状态,以便统计实际改变的数量
|
||||
original_state_was_absent = subflow.chat_state.chat_status == ChatState.ABSENT
|
||||
|
||||
success = await _try_set_subflow_absent_internal(subflow, log_prefix)
|
||||
|
||||
# 如果成功设置为 ABSENT 且原始状态不是 ABSENT,则计数
|
||||
if success and not original_state_was_absent:
|
||||
if subflow.chat_state.chat_status == ChatState.ABSENT:
|
||||
changed_count += 1
|
||||
else:
|
||||
# 这种情况理论上不应发生,如果内部方法返回 True 的话
|
||||
stream_name = (
|
||||
get_chat_manager().get_stream_name(subflow.subheartflow_id) or subflow.subheartflow_id
|
||||
)
|
||||
logger.warning(f"{log_prefix} 内部方法声称成功但 {stream_name} 状态未变为 ABSENT。")
|
||||
# 锁在此处自动释放
|
||||
|
||||
logger.info(
|
||||
f"{log_prefix} 完成,共处理 {processed_count} 个子心流,成功将 {changed_count} 个非 ABSENT 子心流的状态更改为 ABSENT。"
|
||||
)
|
||||
|
||||
# async def sbhf_normal_into_focus(self):
|
||||
# """评估子心流兴趣度,满足条件则提升到FOCUSED状态(基于start_hfc_probability)"""
|
||||
# try:
|
||||
# for sub_hf in list(self.subheartflows.values()):
|
||||
# flow_id = sub_hf.subheartflow_id
|
||||
# stream_name = get_chat_manager().get_stream_name(flow_id) or flow_id
|
||||
|
||||
# # 跳过已经是FOCUSED状态的子心流
|
||||
# if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
|
||||
# continue
|
||||
|
||||
# if sub_hf.interest_chatting.start_hfc_probability == 0:
|
||||
# continue
|
||||
# else:
|
||||
# logger.debug(
|
||||
# f"{stream_name},现在状态: {sub_hf.chat_state.chat_status.value},进入专注概率: {sub_hf.interest_chatting.start_hfc_probability}"
|
||||
# )
|
||||
|
||||
# if random.random() >= sub_hf.interest_chatting.start_hfc_probability:
|
||||
# continue
|
||||
|
||||
# # 获取最新状态并执行提升
|
||||
# current_subflow = self.subheartflows.get(flow_id)
|
||||
# if not current_subflow:
|
||||
# continue
|
||||
|
||||
# logger.info(
|
||||
# f"{stream_name} 触发 认真水群 (概率={current_subflow.interest_chatting.start_hfc_probability:.2f})"
|
||||
# )
|
||||
|
||||
# # 执行状态提升
|
||||
# await current_subflow.change_chat_state(ChatState.FOCUSED)
|
||||
|
||||
# except Exception as e:
|
||||
# logger.error(f"启动HFC 兴趣评估失败: {e}", exc_info=True)
|
||||
|
||||
async def sbhf_focus_into_normal(self, subflow_id: Any):
|
||||
"""
|
||||
接收来自 HeartFChatting 的请求,将特定子心流的状态转换为 NORMAL。
|
||||
通常在连续多次 "no_reply" 后被调用。
|
||||
对于私聊和群聊,都转换为 NORMAL。
|
||||
|
||||
Args:
|
||||
subflow_id: 需要转换状态的子心流 ID。
|
||||
"""
|
||||
async with self._lock:
|
||||
subflow = self.subheartflows.get(subflow_id)
|
||||
if not subflow:
|
||||
logger.warning(f"[状态转换请求] 尝试转换不存在的子心流 {subflow_id} 到 NORMAL")
|
||||
return
|
||||
|
||||
stream_name = get_chat_manager().get_stream_name(subflow_id) or subflow_id
|
||||
current_state = subflow.chat_state.chat_status
|
||||
|
||||
if current_state == ChatState.FOCUSED:
|
||||
target_state = ChatState.NORMAL
|
||||
log_reason = "转为NORMAL"
|
||||
|
||||
logger.info(
|
||||
f"[状态转换请求] 接收到请求,将 {stream_name} (当前: {current_state.value}) 尝试转换为 {target_state.value} ({log_reason})"
|
||||
)
|
||||
try:
|
||||
# 从HFC到CHAT时,清空兴趣字典
|
||||
subflow.interest_dict.clear()
|
||||
await subflow.change_chat_state(target_state)
|
||||
final_state = subflow.chat_state.chat_status
|
||||
if final_state == target_state:
|
||||
logger.debug(f"[状态转换请求] {stream_name} 状态已成功转换为 {final_state.value}")
|
||||
else:
|
||||
logger.warning(
|
||||
f"[状态转换请求] 尝试将 {stream_name} 转换为 {target_state.value} 后,状态实际为 {final_state.value}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[状态转换请求] 转换 {stream_name} 到 {target_state.value} 时出错: {e}", exc_info=True
|
||||
)
|
||||
elif current_state == ChatState.ABSENT:
|
||||
logger.debug(f"[状态转换请求] {stream_name} 处于 ABSENT 状态,尝试转为 NORMAL")
|
||||
await subflow.change_chat_state(ChatState.NORMAL)
|
||||
else:
|
||||
logger.debug(f"[状态转换请求] {stream_name} 当前状态为 {current_state.value},无需转换")
|
||||
|
||||
async def delete_subflow(self, subheartflow_id: Any):
|
||||
"""删除指定的子心流。"""
|
||||
async with self._lock:
|
||||
subflow = self.subheartflows.pop(subheartflow_id, None)
|
||||
if subflow:
|
||||
logger.info(f"正在删除 SubHeartflow: {subheartflow_id}...")
|
||||
try:
|
||||
# 调用 shutdown 方法确保资源释放
|
||||
await subflow.shutdown()
|
||||
logger.info(f"SubHeartflow {subheartflow_id} 已成功删除。")
|
||||
except Exception as e:
|
||||
logger.error(f"删除 SubHeartflow {subheartflow_id} 时出错: {e}", exc_info=True)
|
||||
else:
|
||||
logger.warning(f"尝试删除不存在的 SubHeartflow: {subheartflow_id}")
|
||||
|
||||
# --- 新增:处理私聊从 ABSENT 直接到 FOCUSED 的逻辑 --- #
|
||||
async def sbhf_absent_private_into_focus(self):
|
||||
"""检查 ABSENT 状态的私聊子心流是否有新活动,若有则直接转换为 FOCUSED。"""
|
||||
log_prefix_task = "[私聊激活检查]"
|
||||
transitioned_count = 0
|
||||
checked_count = 0
|
||||
|
||||
async with self._lock:
|
||||
# --- 筛选出所有 ABSENT 状态的私聊子心流 --- #
|
||||
eligible_subflows = [
|
||||
hf
|
||||
for hf in self.subheartflows.values()
|
||||
if hf.chat_state.chat_status == ChatState.ABSENT and not hf.is_group_chat
|
||||
]
|
||||
checked_count = len(eligible_subflows)
|
||||
|
||||
if not eligible_subflows:
|
||||
# logger.debug(f"{log_prefix_task} 没有 ABSENT 状态的私聊子心流可以评估。")
|
||||
return
|
||||
|
||||
# --- 遍历评估每个符合条件的私聊 --- #
|
||||
for sub_hf in eligible_subflows:
|
||||
flow_id = sub_hf.subheartflow_id
|
||||
stream_name = get_chat_manager().get_stream_name(flow_id) or flow_id
|
||||
log_prefix = f"[{stream_name}]({log_prefix_task})"
|
||||
|
||||
try:
|
||||
# --- 检查是否有新活动 --- #
|
||||
observation = sub_hf._get_primary_observation() # 获取主要观察者
|
||||
is_active = False
|
||||
if observation:
|
||||
# 检查自上次状态变为 ABSENT 后是否有新消息
|
||||
# 使用 chat_state_changed_time 可能更精确
|
||||
# 加一点点缓冲时间(例如 1 秒)以防时间戳完全相等
|
||||
timestamp_to_check = sub_hf.chat_state_changed_time - 1
|
||||
has_new = await observation.has_new_messages_since(timestamp_to_check)
|
||||
if has_new:
|
||||
is_active = True
|
||||
logger.debug(f"{log_prefix} 检测到新消息,标记为活跃。")
|
||||
else:
|
||||
logger.warning(f"{log_prefix} 无法获取主要观察者来检查活动状态。")
|
||||
|
||||
# --- 如果活跃,则尝试转换 --- #
|
||||
if is_active:
|
||||
await sub_hf.change_chat_state(ChatState.FOCUSED)
|
||||
# 确认转换成功
|
||||
if sub_hf.chat_state.chat_status == ChatState.FOCUSED:
|
||||
transitioned_count += 1
|
||||
logger.info(f"{log_prefix} 成功进入 FOCUSED 状态。")
|
||||
else:
|
||||
logger.warning(
|
||||
f"{log_prefix} 尝试进入 FOCUSED 状态失败。当前状态: {sub_hf.chat_state.chat_status.value}"
|
||||
)
|
||||
# else: # 不活跃,无需操作
|
||||
# logger.debug(f"{log_prefix} 未检测到新活动,保持 ABSENT。")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{log_prefix} 检查私聊活动或转换状态时出错: {e}", exc_info=True)
|
||||
|
||||
# --- 循环结束后记录总结日志 --- #
|
||||
if transitioned_count > 0:
|
||||
logger.debug(
|
||||
f"{log_prefix_task} 完成,共检查 {checked_count} 个私聊,{transitioned_count} 个转换为 FOCUSED。"
|
||||
)
|
||||
@@ -1,73 +0,0 @@
|
||||
from typing import Optional, Tuple, Dict
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
|
||||
|
||||
logger = get_logger("heartflow_utils")
|
||||
|
||||
|
||||
def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]:
|
||||
"""
|
||||
获取聊天类型(是否群聊)和私聊对象信息。
|
||||
|
||||
Args:
|
||||
chat_id: 聊天流ID
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[Dict]]:
|
||||
- bool: 是否为群聊 (True 是群聊, False 是私聊或未知)
|
||||
- Optional[Dict]: 如果是私聊,包含对方信息的字典;否则为 None。
|
||||
字典包含: platform, user_id, user_nickname, person_id, person_name
|
||||
"""
|
||||
is_group_chat = False # Default to private/unknown
|
||||
chat_target_info = None
|
||||
|
||||
try:
|
||||
chat_stream = get_chat_manager().get_stream(chat_id)
|
||||
|
||||
if chat_stream:
|
||||
if chat_stream.group_info:
|
||||
is_group_chat = True
|
||||
chat_target_info = None # Explicitly None for group chat
|
||||
elif chat_stream.user_info: # It's a private chat
|
||||
is_group_chat = False
|
||||
user_info = chat_stream.user_info
|
||||
platform = chat_stream.platform
|
||||
user_id = user_info.user_id
|
||||
|
||||
# Initialize target_info with basic info
|
||||
target_info = {
|
||||
"platform": platform,
|
||||
"user_id": user_id,
|
||||
"user_nickname": user_info.user_nickname,
|
||||
"person_id": None,
|
||||
"person_name": None,
|
||||
}
|
||||
|
||||
# Try to fetch person info
|
||||
try:
|
||||
# Assume get_person_id is sync (as per original code), keep using to_thread
|
||||
person_id = PersonInfoManager.get_person_id(platform, user_id)
|
||||
person_name = None
|
||||
if person_id:
|
||||
# get_value is async, so await it directly
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_name = person_info_manager.get_value_sync(person_id, "person_name")
|
||||
|
||||
target_info["person_id"] = person_id
|
||||
target_info["person_name"] = person_name
|
||||
except Exception as person_e:
|
||||
logger.warning(
|
||||
f"获取 person_id 或 person_name 时出错 for {platform}:{user_id} in utils: {person_e}"
|
||||
)
|
||||
|
||||
chat_target_info = target_info
|
||||
else:
|
||||
logger.warning(f"无法获取 chat_stream for {chat_id} in utils")
|
||||
# Keep defaults: is_group_chat=False, chat_target_info=None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"获取聊天类型和目标信息时出错 for {chat_id}: {e}", exc_info=True)
|
||||
# Keep defaults on error
|
||||
|
||||
return is_group_chat, chat_target_info
|
||||
@@ -5,60 +5,67 @@ from src.chat.knowledge.mem_active_manager import MemoryActiveManager
|
||||
from src.chat.knowledge.qa_manager import QAManager
|
||||
from src.chat.knowledge.kg_manager import KGManager
|
||||
from src.chat.knowledge.global_logger import logger
|
||||
from src.config.config import global_config as bot_global_config
|
||||
# try:
|
||||
# import quick_algo
|
||||
# except ImportError:
|
||||
# print("quick_algo not found, please install it first")
|
||||
|
||||
logger.info("正在初始化Mai-LPMM\n")
|
||||
logger.info("创建LLM客户端")
|
||||
llm_client_list = dict()
|
||||
for key in global_config["llm_providers"]:
|
||||
# 检查LPMM知识库是否启用
|
||||
if bot_global_config.lpmm_knowledge.enable:
|
||||
logger.info("正在初始化Mai-LPMM\n")
|
||||
logger.info("创建LLM客户端")
|
||||
llm_client_list = dict()
|
||||
for key in global_config["llm_providers"]:
|
||||
llm_client_list[key] = LLMClient(
|
||||
global_config["llm_providers"][key]["base_url"],
|
||||
global_config["llm_providers"][key]["api_key"],
|
||||
)
|
||||
|
||||
# 初始化Embedding库
|
||||
embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
|
||||
logger.info("正在从文件加载Embedding库")
|
||||
try:
|
||||
# 初始化Embedding库
|
||||
embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
|
||||
logger.info("正在从文件加载Embedding库")
|
||||
try:
|
||||
embed_manager.load_from_file()
|
||||
except Exception as e:
|
||||
except Exception as e:
|
||||
logger.warning("此消息不会影响正常使用:从文件加载Embedding库时,{}".format(e))
|
||||
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
|
||||
logger.info("Embedding库加载完成")
|
||||
# 初始化KG
|
||||
kg_manager = KGManager()
|
||||
logger.info("正在从文件加载KG")
|
||||
try:
|
||||
logger.info("Embedding库加载完成")
|
||||
# 初始化KG
|
||||
kg_manager = KGManager()
|
||||
logger.info("正在从文件加载KG")
|
||||
try:
|
||||
kg_manager.load_from_file()
|
||||
except Exception as e:
|
||||
except Exception as e:
|
||||
logger.warning("此消息不会影响正常使用:从文件加载KG时,{}".format(e))
|
||||
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
|
||||
logger.info("KG加载完成")
|
||||
logger.info("KG加载完成")
|
||||
|
||||
logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}")
|
||||
logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}")
|
||||
logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}")
|
||||
logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}")
|
||||
|
||||
|
||||
# 数据比对:Embedding库与KG的段落hash集合
|
||||
for pg_hash in kg_manager.stored_paragraph_hashes:
|
||||
# 数据比对:Embedding库与KG的段落hash集合
|
||||
for pg_hash in kg_manager.stored_paragraph_hashes:
|
||||
key = PG_NAMESPACE + "-" + pg_hash
|
||||
if key not in embed_manager.stored_pg_hashes:
|
||||
logger.warning(f"KG中存在Embedding库中不存在的段落:{key}")
|
||||
|
||||
# 问答系统(用于知识库)
|
||||
qa_manager = QAManager(
|
||||
# 问答系统(用于知识库)
|
||||
qa_manager = QAManager(
|
||||
embed_manager,
|
||||
kg_manager,
|
||||
llm_client_list[global_config["embedding"]["provider"]],
|
||||
llm_client_list[global_config["qa"]["llm"]["provider"]],
|
||||
llm_client_list[global_config["qa"]["llm"]["provider"]],
|
||||
)
|
||||
)
|
||||
|
||||
# 记忆激活(用于记忆库)
|
||||
inspire_manager = MemoryActiveManager(
|
||||
# 记忆激活(用于记忆库)
|
||||
inspire_manager = MemoryActiveManager(
|
||||
embed_manager,
|
||||
llm_client_list[global_config["embedding"]["provider"]],
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.info("LPMM知识库已禁用,跳过初始化")
|
||||
# 创建空的占位符对象,避免导入错误
|
||||
qa_manager = None
|
||||
inspire_manager = None
|
||||
|
||||
@@ -784,12 +784,12 @@ class Hippocampus:
|
||||
|
||||
# 计算激活节点数与总节点数的比值
|
||||
total_activation = sum(activate_map.values())
|
||||
logger.debug(f"总激活值: {total_activation:.2f}")
|
||||
# logger.debug(f"总激活值: {total_activation:.2f}")
|
||||
total_nodes = len(self.memory_graph.G.nodes())
|
||||
# activated_nodes = len(activate_map)
|
||||
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
|
||||
activation_ratio = activation_ratio * 60
|
||||
logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
|
||||
logger.debug(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
|
||||
|
||||
return activation_ratio
|
||||
|
||||
|
||||
@@ -69,23 +69,19 @@ def init_prompt():
|
||||
class MemoryActivator:
|
||||
def __init__(self):
|
||||
# TODO: API-Adapter修改标记
|
||||
self.summary_model = LLMRequest(
|
||||
model=global_config.model.memory_summary,
|
||||
temperature=0.7,
|
||||
|
||||
self.key_words_model = LLMRequest(
|
||||
model=global_config.model.utils_small,
|
||||
temperature=0.5,
|
||||
request_type="memory_activator",
|
||||
)
|
||||
|
||||
self.running_memory = []
|
||||
self.cached_keywords = set() # 用于缓存历史关键词
|
||||
|
||||
async def activate_memory_with_chat_history(self, target_message, chat_history_prompt) -> List[Dict]:
|
||||
"""
|
||||
激活记忆
|
||||
|
||||
Args:
|
||||
observations: 现有的进行观察后的 观察列表
|
||||
|
||||
Returns:
|
||||
List[Dict]: 激活的记忆列表
|
||||
"""
|
||||
# 如果记忆系统被禁用,直接返回空列表
|
||||
if not global_config.memory.enable_memory:
|
||||
@@ -103,7 +99,7 @@ class MemoryActivator:
|
||||
|
||||
# logger.debug(f"prompt: {prompt}")
|
||||
|
||||
response, (reasoning_content, model_name) = await self.summary_model.generate_response_async(prompt)
|
||||
response, (reasoning_content, model_name) = await self.key_words_model.generate_response_async(prompt)
|
||||
|
||||
keywords = list(get_keywords_from_json(response))
|
||||
|
||||
@@ -117,14 +113,13 @@ class MemoryActivator:
|
||||
|
||||
# 添加新的关键词到缓存
|
||||
self.cached_keywords.update(keywords)
|
||||
logger.info(f"当前激活的记忆关键词: {self.cached_keywords}")
|
||||
|
||||
# 调用记忆系统获取相关记忆
|
||||
related_memory = await hippocampus_manager.get_memory_from_topic(
|
||||
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||
)
|
||||
|
||||
logger.info(f"获取到的记忆: {related_memory}")
|
||||
logger.info(f"当前记忆关键词: {self.cached_keywords} 。获取到的记忆: {related_memory}")
|
||||
|
||||
# 激活时,所有已有记忆的duration+1,达到3则移除
|
||||
for m in self.running_memory[:]:
|
||||
@@ -1,6 +1,6 @@
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.message_receive.message_sender import message_manager
|
||||
from src.chat.message_receive.normal_message_sender import message_manager
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from src.chat.message_receive.message import MessageRecv
|
||||
from src.experimental.only_message_process import MessageProcessor
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.experimental.PFC.pfc_manager import PFCManager
|
||||
from src.chat.focus_chat.heartflow_message_processor import HeartFCMessageReceiver
|
||||
from src.chat.heart_flow.heartflow_message_processor import HeartFCMessageReceiver
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.config.config import global_config
|
||||
from src.plugin_system.core.component_registry import component_registry # 导入新插件系统
|
||||
@@ -190,15 +190,15 @@ class ChatBot:
|
||||
|
||||
message.update_chat_stream(chat)
|
||||
|
||||
# 处理消息内容,生成纯文本
|
||||
await message.process()
|
||||
|
||||
# 过滤检查
|
||||
if _check_ban_words(message.processed_plain_text, chat, user_info) or _check_ban_regex(
|
||||
message.raw_message, chat, user_info
|
||||
):
|
||||
return
|
||||
|
||||
# 处理消息内容,生成纯文本
|
||||
await message.process()
|
||||
|
||||
# 命令处理 - 使用新插件系统检查并处理命令
|
||||
is_command, cmd_result, continue_process = await self._process_commands_with_new_system(message)
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ class MessageRecv(Message):
|
||||
self.detailed_plain_text = message_dict.get("detailed_plain_text", "")
|
||||
self.is_emoji = False
|
||||
self.is_picid = False
|
||||
self.is_mentioned = 0.0
|
||||
self.is_mentioned = None
|
||||
self.priority_mode = "interest"
|
||||
self.priority_info = None
|
||||
|
||||
@@ -152,14 +152,10 @@ class MessageRecv(Message):
|
||||
elif segment.type == "mention_bot":
|
||||
self.is_mentioned = float(segment.data)
|
||||
return ""
|
||||
elif segment.type == "set_priority_mode":
|
||||
# 处理设置优先级模式的消息段
|
||||
if isinstance(segment.data, str):
|
||||
self.priority_mode = segment.data
|
||||
return ""
|
||||
elif segment.type == "priority_info":
|
||||
if isinstance(segment.data, dict):
|
||||
# 处理优先级信息
|
||||
self.priority_mode = "priority"
|
||||
self.priority_info = segment.data
|
||||
"""
|
||||
{
|
||||
|
||||
@@ -9,7 +9,6 @@ from src.common.message.api import get_global_api
|
||||
from .message import MessageSending, MessageThinking, MessageSet
|
||||
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from ...config.config import global_config
|
||||
from ..utils.utils import truncate_message, calculate_typing_time, count_messages_between
|
||||
|
||||
from src.common.logger import get_logger
|
||||
@@ -192,20 +191,6 @@ class MessageManager:
|
||||
container = await self.get_container(chat_stream.stream_id)
|
||||
container.add_message(message)
|
||||
|
||||
def check_if_sending_message_exist(self, chat_id, thinking_id):
|
||||
"""检查指定聊天流的容器中是否存在具有特定 thinking_id 的 MessageSending 消息 或 emoji 消息"""
|
||||
# 这个方法现在是非异步的,因为它只读取数据
|
||||
container = self.containers.get(chat_id) # 直接 get,因为读取不需要锁
|
||||
if container and container.has_messages():
|
||||
for message in container.get_all_messages():
|
||||
if isinstance(message, MessageSending):
|
||||
msg_id = getattr(message.message_info, "message_id", None)
|
||||
# 检查 message_id 是否匹配 thinking_id 或以 "me" 开头 (emoji)
|
||||
if msg_id == thinking_id or (msg_id and msg_id.startswith("me")):
|
||||
# logger.debug(f"检查到存在相同thinking_id或emoji的消息: {msg_id} for {thinking_id}")
|
||||
return True
|
||||
return False
|
||||
|
||||
async def _handle_sending_message(self, container: MessageContainer, message: MessageSending):
|
||||
"""处理单个 MessageSending 消息 (包含 set_reply 逻辑)"""
|
||||
try:
|
||||
@@ -216,12 +201,7 @@ class MessageManager:
|
||||
thinking_messages_count, thinking_messages_length = count_messages_between(
|
||||
start_time=thinking_start_time, end_time=now_time, stream_id=message.chat_stream.stream_id
|
||||
)
|
||||
# print(f"message.reply:{message.reply}")
|
||||
|
||||
# --- 条件应用 set_reply 逻辑 ---
|
||||
# logger.debug(
|
||||
# f"[message.apply_set_reply_logic:{message.apply_set_reply_logic},message.is_head:{message.is_head},thinking_messages_count:{thinking_messages_count},thinking_messages_length:{thinking_messages_length},message.is_private_message():{message.is_private_message()}]"
|
||||
# )
|
||||
if (
|
||||
message.is_head
|
||||
and (thinking_messages_count > 3 or thinking_messages_length > 200)
|
||||
@@ -277,14 +257,6 @@ class MessageManager:
|
||||
flush=True,
|
||||
)
|
||||
|
||||
# 检查是否超时
|
||||
if thinking_time > global_config.normal_chat.thinking_timeout:
|
||||
logger.warning(
|
||||
f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}"
|
||||
)
|
||||
container.remove_message(message_earliest)
|
||||
print() # 超时后换行,避免覆盖下一条日志
|
||||
|
||||
elif isinstance(message_earliest, MessageSending):
|
||||
# --- 处理发送消息 ---
|
||||
await self._handle_sending_message(container, message_earliest)
|
||||
@@ -301,12 +273,6 @@ class MessageManager:
|
||||
logger.info(f"[{chat_id}] 处理超时发送消息: {msg.message_info.message_id}")
|
||||
await self._handle_sending_message(container, msg) # 复用处理逻辑
|
||||
|
||||
# 清理空容器 (可选)
|
||||
# async with self._container_lock:
|
||||
# if not container.has_messages() and chat_id in self.containers:
|
||||
# logger.debug(f"[{chat_id}] 容器已空,准备移除。")
|
||||
# del self.containers[chat_id]
|
||||
|
||||
async def _start_processor_loop(self):
|
||||
"""消息处理器主循环"""
|
||||
while self._running:
|
||||
@@ -4,7 +4,7 @@ from typing import Union
|
||||
# from ...common.database.database import db # db is now Peewee's SqliteDatabase instance
|
||||
from .message import MessageSending, MessageRecv
|
||||
from .chat_stream import ChatStream
|
||||
from ...common.database.database_model import Messages, RecalledMessages # Import Peewee models
|
||||
from ...common.database.database_model import Messages, RecalledMessages, Images # Import Peewee models
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("message_storage")
|
||||
@@ -25,6 +25,7 @@ class MessageStorage:
|
||||
# print(processed_plain_text)
|
||||
|
||||
if processed_plain_text:
|
||||
processed_plain_text = MessageStorage.replace_image_descriptions(processed_plain_text)
|
||||
filtered_processed_plain_text = re.sub(pattern, "", processed_plain_text, flags=re.DOTALL)
|
||||
else:
|
||||
filtered_processed_plain_text = ""
|
||||
@@ -136,3 +137,29 @@ class MessageStorage:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"更新消息ID失败: {e}")
|
||||
|
||||
@staticmethod
|
||||
def replace_image_descriptions(text: str) -> str:
|
||||
"""将[图片:描述]替换为[picid:image_id]"""
|
||||
# 先检查文本中是否有图片标记
|
||||
pattern = r"\[图片:([^\]]+)\]"
|
||||
matches = re.findall(pattern, text)
|
||||
|
||||
if not matches:
|
||||
logger.debug("文本中没有图片标记,直接返回原文本")
|
||||
return text
|
||||
|
||||
def replace_match(match):
|
||||
description = match.group(1).strip()
|
||||
try:
|
||||
image_record = (
|
||||
Images.select().where(Images.description == description).order_by(Images.timestamp.desc()).first()
|
||||
)
|
||||
if image_record:
|
||||
return f"[picid:{image_record.image_id}]"
|
||||
else:
|
||||
return match.group(0) # 保持原样
|
||||
except Exception:
|
||||
return match.group(0)
|
||||
|
||||
return re.sub(r"\[图片:([^\]]+)\]", replace_match, text)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,294 +0,0 @@
|
||||
from typing import List, Any, Dict
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
from src.config.config import global_config
|
||||
import random
|
||||
import time
|
||||
|
||||
logger = get_logger("normal_chat_action_modifier")
|
||||
|
||||
|
||||
class NormalChatActionModifier:
|
||||
"""Normal Chat动作修改器
|
||||
|
||||
负责根据Normal Chat的上下文和状态动态调整可用的动作集合
|
||||
实现与Focus Chat类似的动作激活策略,但将LLM_JUDGE转换为概率激活以提升性能
|
||||
"""
|
||||
|
||||
def __init__(self, action_manager: ActionManager, stream_id: str, stream_name: str):
|
||||
"""初始化动作修改器"""
|
||||
self.action_manager = action_manager
|
||||
self.stream_id = stream_id
|
||||
self.stream_name = stream_name
|
||||
self.log_prefix = f"[{stream_name}]动作修改器"
|
||||
|
||||
# 缓存所有注册的动作
|
||||
self.all_actions = self.action_manager.get_registered_actions()
|
||||
|
||||
async def modify_actions_for_normal_chat(
|
||||
self,
|
||||
chat_stream,
|
||||
recent_replies: List[dict],
|
||||
message_content: str,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""为Normal Chat修改可用动作集合
|
||||
|
||||
实现动作激活策略:
|
||||
1. 基于关联类型的动态过滤
|
||||
2. 基于激活类型的智能判定(LLM_JUDGE转为概率激活)
|
||||
|
||||
Args:
|
||||
chat_stream: 聊天流对象
|
||||
recent_replies: 最近的回复记录
|
||||
message_content: 当前消息内容
|
||||
**kwargs: 其他参数
|
||||
"""
|
||||
|
||||
reasons = []
|
||||
merged_action_changes = {"add": [], "remove": []}
|
||||
type_mismatched_actions = [] # 在外层定义避免作用域问题
|
||||
|
||||
self.action_manager.restore_default_actions()
|
||||
|
||||
# 第一阶段:基于关联类型的动态过滤
|
||||
if chat_stream:
|
||||
chat_context = chat_stream.context if hasattr(chat_stream, "context") else None
|
||||
if chat_context:
|
||||
# 获取Normal模式下的可用动作(已经过滤了mode_enable)
|
||||
current_using_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||
# print(f"current_using_actions: {current_using_actions}")
|
||||
for action_name in current_using_actions.keys():
|
||||
if action_name in self.all_actions:
|
||||
data = self.all_actions[action_name]
|
||||
if data.get("associated_types"):
|
||||
if not chat_context.check_types(data["associated_types"]):
|
||||
type_mismatched_actions.append(action_name)
|
||||
logger.debug(f"{self.log_prefix} 动作 {action_name} 关联类型不匹配,移除该动作")
|
||||
|
||||
if type_mismatched_actions:
|
||||
merged_action_changes["remove"].extend(type_mismatched_actions)
|
||||
reasons.append(f"移除{type_mismatched_actions}(关联类型不匹配)")
|
||||
|
||||
# 第二阶段:应用激活类型判定
|
||||
# 构建聊天内容 - 使用与planner一致的方式
|
||||
chat_content = ""
|
||||
if chat_stream and hasattr(chat_stream, "stream_id"):
|
||||
try:
|
||||
# 获取消息历史,使用与normal_chat_planner相同的方法
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.chat.max_context_size, # 使用相同的配置
|
||||
)
|
||||
|
||||
# 构建可读的聊天上下文
|
||||
chat_content = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="relative",
|
||||
read_mark=0.0,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
logger.debug(f"{self.log_prefix} 成功构建聊天内容,长度: {len(chat_content)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"{self.log_prefix} 构建聊天内容失败: {e}")
|
||||
chat_content = ""
|
||||
|
||||
# 获取当前Normal模式下的动作集进行激活判定
|
||||
current_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||
|
||||
# print(f"current_actions: {current_actions}")
|
||||
# print(f"chat_content: {chat_content}")
|
||||
final_activated_actions = await self._apply_normal_activation_filtering(
|
||||
current_actions, chat_content, message_content, recent_replies
|
||||
)
|
||||
# print(f"final_activated_actions: {final_activated_actions}")
|
||||
|
||||
# 统一处理所有需要移除的动作,避免重复移除
|
||||
all_actions_to_remove = set() # 使用set避免重复
|
||||
|
||||
# 添加关联类型不匹配的动作
|
||||
if type_mismatched_actions:
|
||||
all_actions_to_remove.update(type_mismatched_actions)
|
||||
|
||||
# 添加激活类型判定未通过的动作
|
||||
for action_name in current_actions.keys():
|
||||
if action_name not in final_activated_actions:
|
||||
all_actions_to_remove.add(action_name)
|
||||
|
||||
# 统计移除原因(避免重复)
|
||||
activation_failed_actions = [
|
||||
name
|
||||
for name in current_actions.keys()
|
||||
if name not in final_activated_actions and name not in type_mismatched_actions
|
||||
]
|
||||
if activation_failed_actions:
|
||||
reasons.append(f"移除{activation_failed_actions}(激活类型判定未通过)")
|
||||
|
||||
# 统一执行移除操作
|
||||
for action_name in all_actions_to_remove:
|
||||
success = self.action_manager.remove_action_from_using(action_name)
|
||||
if success:
|
||||
logger.debug(f"{self.log_prefix} 移除动作: {action_name}")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix} 动作 {action_name} 已经不在使用集中,跳过移除")
|
||||
|
||||
# 应用动作添加(如果有的话)
|
||||
for action_name in merged_action_changes["add"]:
|
||||
if action_name in self.all_actions:
|
||||
success = self.action_manager.add_action_to_using(action_name)
|
||||
if success:
|
||||
logger.debug(f"{self.log_prefix} 添加动作: {action_name}")
|
||||
|
||||
# 记录变更原因
|
||||
if reasons:
|
||||
logger.info(f"{self.log_prefix} 动作调整完成: {' | '.join(reasons)}")
|
||||
|
||||
# 获取最终的Normal模式可用动作并记录
|
||||
final_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||
logger.debug(f"{self.log_prefix} 当前Normal模式可用动作: {list(final_actions.keys())}")
|
||||
|
||||
async def _apply_normal_activation_filtering(
|
||||
self,
|
||||
actions_with_info: Dict[str, Any],
|
||||
chat_content: str = "",
|
||||
message_content: str = "",
|
||||
recent_replies: List[dict] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
应用Normal模式的激活类型过滤逻辑
|
||||
|
||||
与Focus模式的区别:
|
||||
1. LLM_JUDGE类型转换为概率激活(避免LLM调用)
|
||||
2. RANDOM类型保持概率激活
|
||||
3. KEYWORD类型保持关键词匹配
|
||||
4. ALWAYS类型直接激活
|
||||
|
||||
Args:
|
||||
actions_with_info: 带完整信息的动作字典
|
||||
chat_content: 聊天内容
|
||||
message_content: 当前消息内容
|
||||
recent_replies: 最近的回复记录列表
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 过滤后激活的actions字典
|
||||
"""
|
||||
activated_actions = {}
|
||||
|
||||
# 分类处理不同激活类型的actions
|
||||
always_actions = {}
|
||||
random_actions = {}
|
||||
keyword_actions = {}
|
||||
|
||||
for action_name, action_info in actions_with_info.items():
|
||||
# 使用normal_activation_type
|
||||
activation_type = action_info.get("normal_activation_type", "always")
|
||||
|
||||
# 现在统一是字符串格式的激活类型值
|
||||
if activation_type == "always":
|
||||
always_actions[action_name] = action_info
|
||||
elif activation_type == "random" or activation_type == "llm_judge":
|
||||
random_actions[action_name] = action_info
|
||||
elif activation_type == "keyword":
|
||||
keyword_actions[action_name] = action_info
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix}未知的激活类型: {activation_type},跳过处理")
|
||||
|
||||
# 1. 处理ALWAYS类型(直接激活)
|
||||
for action_name, action_info in always_actions.items():
|
||||
activated_actions[action_name] = action_info
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: ALWAYS类型直接激活")
|
||||
|
||||
# 2. 处理RANDOM类型(概率激活)
|
||||
for action_name, action_info in random_actions.items():
|
||||
probability = action_info.get("random_activation_probability", ActionManager.DEFAULT_RANDOM_PROBABILITY)
|
||||
should_activate = random.random() < probability
|
||||
if should_activate:
|
||||
activated_actions[action_name] = action_info
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: RANDOM类型触发(概率{probability})")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: RANDOM类型未触发(概率{probability})")
|
||||
|
||||
# 3. 处理KEYWORD类型(关键词匹配)
|
||||
for action_name, action_info in keyword_actions.items():
|
||||
should_activate = self._check_keyword_activation(action_name, action_info, chat_content, message_content)
|
||||
if should_activate:
|
||||
activated_actions[action_name] = action_info
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: KEYWORD类型匹配关键词({keywords})")
|
||||
else:
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词({keywords})")
|
||||
|
||||
logger.debug(f"{self.log_prefix}Normal模式激活类型过滤完成: {list(activated_actions.keys())}")
|
||||
return activated_actions
|
||||
|
||||
def _check_keyword_activation(
|
||||
self,
|
||||
action_name: str,
|
||||
action_info: Dict[str, Any],
|
||||
chat_content: str = "",
|
||||
message_content: str = "",
|
||||
) -> bool:
|
||||
"""
|
||||
检查是否匹配关键词触发条件
|
||||
|
||||
Args:
|
||||
action_name: 动作名称
|
||||
action_info: 动作信息
|
||||
chat_content: 聊天内容(已经是格式化后的可读消息)
|
||||
|
||||
Returns:
|
||||
bool: 是否应该激活此action
|
||||
"""
|
||||
|
||||
activation_keywords = action_info.get("activation_keywords", [])
|
||||
case_sensitive = action_info.get("keyword_case_sensitive", False)
|
||||
|
||||
if not activation_keywords:
|
||||
logger.warning(f"{self.log_prefix}动作 {action_name} 设置为关键词触发但未配置关键词")
|
||||
return False
|
||||
|
||||
# 使用构建好的聊天内容作为检索文本
|
||||
search_text = chat_content + message_content
|
||||
|
||||
# 如果不区分大小写,转换为小写
|
||||
if not case_sensitive:
|
||||
search_text = search_text.lower()
|
||||
|
||||
# 检查每个关键词
|
||||
matched_keywords = []
|
||||
for keyword in activation_keywords:
|
||||
check_keyword = keyword if case_sensitive else keyword.lower()
|
||||
if check_keyword in search_text:
|
||||
matched_keywords.append(keyword)
|
||||
|
||||
# print(f"search_text: {search_text}")
|
||||
# print(f"activation_keywords: {activation_keywords}")
|
||||
|
||||
if matched_keywords:
|
||||
logger.debug(f"{self.log_prefix}动作 {action_name} 匹配到关键词: {matched_keywords}")
|
||||
return True
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}")
|
||||
return False
|
||||
|
||||
def get_available_actions_count(self) -> int:
|
||||
"""获取当前可用动作数量(排除默认的no_action)"""
|
||||
current_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||
# 排除no_action(如果存在)
|
||||
filtered_actions = {k: v for k, v in current_actions.items() if k != "no_action"}
|
||||
return len(filtered_actions)
|
||||
|
||||
def should_skip_planning(self) -> bool:
|
||||
"""判断是否应该跳过规划过程"""
|
||||
available_count = self.get_available_actions_count()
|
||||
if available_count == 0:
|
||||
logger.debug(f"{self.log_prefix} 没有可用动作,跳过规划")
|
||||
return True
|
||||
return False
|
||||
@@ -1,262 +0,0 @@
|
||||
"""
|
||||
Normal Chat Expressor
|
||||
|
||||
为Normal Chat专门设计的表达器,不需要经过LLM风格化处理,
|
||||
直接发送消息,主要用于插件动作中需要发送消息的场景。
|
||||
"""
|
||||
|
||||
import time
|
||||
from typing import List, Optional, Tuple, Dict, Any
|
||||
from src.chat.message_receive.message import MessageRecv, MessageSending, MessageThinking, Seg
|
||||
from src.chat.message_receive.message import UserInfo
|
||||
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
|
||||
from src.chat.message_receive.message_sender import message_manager
|
||||
from src.config.config import global_config
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("normal_chat_expressor")
|
||||
|
||||
|
||||
class NormalChatExpressor:
|
||||
"""Normal Chat专用表达器
|
||||
|
||||
特点:
|
||||
1. 不经过LLM风格化,直接发送消息
|
||||
2. 支持文本和表情包发送
|
||||
3. 为插件动作提供简化的消息发送接口
|
||||
4. 保持与focus_chat expressor相似的API,但去掉复杂的风格化流程
|
||||
"""
|
||||
|
||||
def __init__(self, chat_stream: ChatStream):
|
||||
"""初始化Normal Chat表达器
|
||||
|
||||
Args:
|
||||
chat_stream: 聊天流对象
|
||||
stream_name: 流名称
|
||||
"""
|
||||
self.chat_stream = chat_stream
|
||||
self.stream_name = get_chat_manager().get_stream_name(self.chat_stream.stream_id) or self.chat_stream.stream_id
|
||||
self.log_prefix = f"[{self.stream_name}]Normal表达器"
|
||||
|
||||
logger.debug(f"{self.log_prefix} 初始化完成")
|
||||
|
||||
async def create_thinking_message(
|
||||
self, anchor_message: Optional[MessageRecv], thinking_id: str
|
||||
) -> Optional[MessageThinking]:
|
||||
"""创建思考消息
|
||||
|
||||
Args:
|
||||
anchor_message: 锚点消息
|
||||
thinking_id: 思考ID
|
||||
|
||||
Returns:
|
||||
MessageThinking: 创建的思考消息,如果失败返回None
|
||||
"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流")
|
||||
return None
|
||||
|
||||
messageinfo = anchor_message.message_info
|
||||
thinking_time_point = time.time()
|
||||
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=messageinfo.platform,
|
||||
)
|
||||
|
||||
thinking_message = MessageThinking(
|
||||
message_id=thinking_id,
|
||||
chat_stream=self.chat_stream,
|
||||
bot_user_info=bot_user_info,
|
||||
reply=anchor_message,
|
||||
thinking_start_time=thinking_time_point,
|
||||
)
|
||||
|
||||
await message_manager.add_message(thinking_message)
|
||||
logger.debug(f"{self.log_prefix} 创建思考消息: {thinking_id}")
|
||||
return thinking_message
|
||||
|
||||
async def send_response_messages(
|
||||
self,
|
||||
anchor_message: Optional[MessageRecv],
|
||||
response_set: List[Tuple[str, str]],
|
||||
thinking_id: str = "",
|
||||
display_message: str = "",
|
||||
) -> Optional[MessageSending]:
|
||||
"""发送回复消息
|
||||
|
||||
Args:
|
||||
anchor_message: 锚点消息
|
||||
response_set: 回复内容集合,格式为 [(type, content), ...]
|
||||
thinking_id: 思考ID
|
||||
display_message: 显示消息
|
||||
|
||||
Returns:
|
||||
MessageSending: 发送的第一条消息,如果失败返回None
|
||||
"""
|
||||
try:
|
||||
if not response_set:
|
||||
logger.warning(f"{self.log_prefix} 回复内容为空")
|
||||
return None
|
||||
|
||||
# 如果没有thinking_id,生成一个
|
||||
if not thinking_id:
|
||||
thinking_time_point = round(time.time(), 2)
|
||||
thinking_id = "mt" + str(thinking_time_point)
|
||||
|
||||
# 创建思考消息
|
||||
if anchor_message:
|
||||
await self.create_thinking_message(anchor_message, thinking_id)
|
||||
|
||||
# 创建消息集
|
||||
|
||||
mark_head = False
|
||||
is_emoji = False
|
||||
if len(response_set) == 0:
|
||||
return None
|
||||
message_id = f"{thinking_id}_{len(response_set)}"
|
||||
response_type, content = response_set[0]
|
||||
if len(response_set) > 1:
|
||||
message_segment = Seg(type="seglist", data=[Seg(type=t, data=c) for t, c in response_set])
|
||||
else:
|
||||
message_segment = Seg(type=response_type, data=content)
|
||||
if response_type == "emoji":
|
||||
is_emoji = True
|
||||
|
||||
bot_msg = await self._build_sending_message(
|
||||
message_id=message_id,
|
||||
message_segment=message_segment,
|
||||
thinking_id=thinking_id,
|
||||
anchor_message=anchor_message,
|
||||
thinking_start_time=time.time(),
|
||||
reply_to=mark_head,
|
||||
is_emoji=is_emoji,
|
||||
display_message=display_message,
|
||||
)
|
||||
logger.debug(f"{self.log_prefix} 添加{response_type}类型消息: {content}")
|
||||
|
||||
# 提交消息集
|
||||
if bot_msg:
|
||||
await message_manager.add_message(bot_msg)
|
||||
logger.info(
|
||||
f"{self.log_prefix} 成功发送 {response_type}类型消息: {str(content)[:200] + '...' if len(str(content)) > 200 else content}"
|
||||
)
|
||||
container = await message_manager.get_container(self.chat_stream.stream_id) # 使用 self.stream_id
|
||||
for msg in container.messages[:]:
|
||||
if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id:
|
||||
container.messages.remove(msg)
|
||||
logger.debug(f"[{self.stream_name}] 已移除未产生回复的思考消息 {thinking_id}")
|
||||
break
|
||||
return bot_msg
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 没有有效的消息被创建")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 发送消息失败: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
async def _build_sending_message(
|
||||
self,
|
||||
message_id: str,
|
||||
message_segment: Seg,
|
||||
thinking_id: str,
|
||||
anchor_message: Optional[MessageRecv],
|
||||
thinking_start_time: float,
|
||||
reply_to: bool = False,
|
||||
is_emoji: bool = False,
|
||||
display_message: str = "",
|
||||
) -> MessageSending:
|
||||
"""构建发送消息
|
||||
|
||||
Args:
|
||||
message_id: 消息ID
|
||||
message_segment: 消息段
|
||||
thinking_id: 思考ID
|
||||
anchor_message: 锚点消息
|
||||
thinking_start_time: 思考开始时间
|
||||
reply_to: 是否回复
|
||||
is_emoji: 是否为表情包
|
||||
|
||||
Returns:
|
||||
MessageSending: 构建的发送消息
|
||||
"""
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=anchor_message.message_info.platform if anchor_message else "unknown",
|
||||
)
|
||||
|
||||
message_sending = MessageSending(
|
||||
message_id=message_id,
|
||||
chat_stream=self.chat_stream,
|
||||
bot_user_info=bot_user_info,
|
||||
message_segment=message_segment,
|
||||
sender_info=self.chat_stream.user_info,
|
||||
reply=anchor_message if reply_to else None,
|
||||
thinking_start_time=thinking_start_time,
|
||||
is_emoji=is_emoji,
|
||||
display_message=display_message,
|
||||
)
|
||||
|
||||
return message_sending
|
||||
|
||||
async def deal_reply(
|
||||
self,
|
||||
cycle_timers: dict,
|
||||
action_data: Dict[str, Any],
|
||||
reasoning: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""处理回复动作 - 兼容focus_chat expressor API
|
||||
|
||||
Args:
|
||||
cycle_timers: 周期计时器(normal_chat中不使用)
|
||||
action_data: 动作数据,包含text、target、emojis等
|
||||
reasoning: 推理说明
|
||||
anchor_message: 锚点消息
|
||||
thinking_id: 思考ID
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[str]]: (是否成功, 回复文本)
|
||||
"""
|
||||
try:
|
||||
response_set = []
|
||||
|
||||
# 处理文本内容
|
||||
text_content = action_data.get("text", "")
|
||||
if text_content:
|
||||
response_set.append(("text", text_content))
|
||||
|
||||
# 处理表情包
|
||||
emoji_content = action_data.get("emojis", "")
|
||||
if emoji_content:
|
||||
response_set.append(("emoji", emoji_content))
|
||||
|
||||
if not response_set:
|
||||
logger.warning(f"{self.log_prefix} deal_reply: 没有有效的回复内容")
|
||||
return False, None
|
||||
|
||||
# 发送消息
|
||||
result = await self.send_response_messages(
|
||||
anchor_message=anchor_message,
|
||||
response_set=response_set,
|
||||
thinking_id=thinking_id,
|
||||
)
|
||||
|
||||
if result:
|
||||
return True, text_content if text_content else "发送成功"
|
||||
else:
|
||||
return False, None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} deal_reply执行失败: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
@@ -1,123 +0,0 @@
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.message import MessageThinking
|
||||
from src.common.logger import get_logger
|
||||
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
|
||||
from src.chat.utils.utils import process_llm_response
|
||||
from src.plugin_system.apis import generator_api
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
|
||||
|
||||
logger = get_logger("normal_chat_response")
|
||||
|
||||
|
||||
class NormalChatGenerator:
|
||||
def __init__(self):
|
||||
model_config_1 = global_config.model.replyer_1.copy()
|
||||
model_config_2 = global_config.model.replyer_2.copy()
|
||||
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
model_config_1["weight"] = prob_first
|
||||
model_config_2["weight"] = 1.0 - prob_first
|
||||
|
||||
self.model_configs = [model_config_1, model_config_2]
|
||||
|
||||
self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation")
|
||||
self.memory_activator = MemoryActivator()
|
||||
|
||||
async def generate_response(
|
||||
self,
|
||||
message: MessageThinking,
|
||||
available_actions=None,
|
||||
):
|
||||
logger.info(
|
||||
f"NormalChat思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||
)
|
||||
person_id = PersonInfoManager.get_person_id(
|
||||
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
|
||||
)
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
relation_info = await person_info_manager.get_value(person_id, "short_impression")
|
||||
reply_to_str = f"{person_name}:{message.processed_plain_text}"
|
||||
|
||||
try:
|
||||
success, reply_set, prompt = await generator_api.generate_reply(
|
||||
chat_stream=message.chat_stream,
|
||||
reply_to=reply_to_str,
|
||||
relation_info=relation_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||
model_configs=self.model_configs,
|
||||
request_type="normal.replyer",
|
||||
return_prompt=True,
|
||||
)
|
||||
|
||||
if not success or not reply_set:
|
||||
logger.info(f"对 {message.processed_plain_text} 的回复生成失败")
|
||||
return None
|
||||
|
||||
content = " ".join([item[1] for item in reply_set if item[0] == "text"])
|
||||
logger.debug(f"对 {message.processed_plain_text} 的回复:{content}")
|
||||
|
||||
if content:
|
||||
logger.info(f"{global_config.bot.nickname}的备选回复是:{content}")
|
||||
content = process_llm_response(content)
|
||||
|
||||
return content
|
||||
|
||||
except Exception:
|
||||
logger.exception("生成回复时出错")
|
||||
return None
|
||||
|
||||
return content
|
||||
|
||||
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
|
||||
"""提取情感标签,结合立场和情绪"""
|
||||
try:
|
||||
# 构建提示词,结合回复内容、被回复的内容以及立场分析
|
||||
prompt = f"""
|
||||
请严格根据以下对话内容,完成以下任务:
|
||||
1. 判断回复者对被回复者观点的直接立场:
|
||||
- "支持":明确同意或强化被回复者观点
|
||||
- "反对":明确反驳或否定被回复者观点
|
||||
- "中立":不表达明确立场或无关回应
|
||||
2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
|
||||
3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒"
|
||||
4. 考虑回复者的人格设定为{global_config.personality.personality_core}
|
||||
|
||||
对话示例:
|
||||
被回复:「A就是笨」
|
||||
回复:「A明明很聪明」 → 反对-愤怒
|
||||
|
||||
当前对话:
|
||||
被回复:「{processed_plain_text}」
|
||||
回复:「{content}」
|
||||
|
||||
输出要求:
|
||||
- 只需输出"立场-情绪"结果,不要解释
|
||||
- 严格基于文字直接表达的对立关系判断
|
||||
"""
|
||||
|
||||
# 调用模型生成结果
|
||||
result, (reasoning_content, model_name) = await self.model_sum.generate_response_async(prompt)
|
||||
result = result.strip()
|
||||
|
||||
# 解析模型输出的结果
|
||||
if "-" in result:
|
||||
stance, emotion = result.split("-", 1)
|
||||
valid_stances = ["支持", "反对", "中立"]
|
||||
valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"]
|
||||
if stance in valid_stances and emotion in valid_emotions:
|
||||
return stance, emotion # 返回有效的立场-情绪组合
|
||||
else:
|
||||
logger.debug(f"无效立场-情感组合:{result}")
|
||||
return "中立", "平静" # 默认返回中立-平静
|
||||
else:
|
||||
logger.debug(f"立场-情感格式错误:{result}")
|
||||
return "中立", "平静" # 格式错误时返回默认值
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"获取情感标签时出错: {e}")
|
||||
return "中立", "平静" # 出错时返回默认值
|
||||
@@ -1,308 +0,0 @@
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
from rich.traceback import install
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.individuality.individuality import get_individuality
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.message_receive.message import MessageThinking
|
||||
from json_repair import repair_json
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
import time
|
||||
import traceback
|
||||
|
||||
logger = get_logger("normal_chat_planner")
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
你的自我认知是:
|
||||
{self_info_block}
|
||||
请记住你的性格,身份和特点。
|
||||
|
||||
你是群内的一员,你现在正在参与群内的闲聊,以下是群内的聊天内容:
|
||||
{chat_context}
|
||||
|
||||
基于以上聊天上下文和用户的最新消息,选择最合适的action。
|
||||
|
||||
注意,除了下面动作选项之外,你在聊天中不能做其他任何事情,这是你能力的边界,现在请你选择合适的action:
|
||||
|
||||
{action_options_text}
|
||||
|
||||
重要说明:
|
||||
- "no_action" 表示只进行普通聊天回复,不执行任何额外动作
|
||||
- 其他action表示在普通回复的基础上,执行相应的额外动作
|
||||
|
||||
你必须从上面列出的可用action中选择一个,并说明原因。
|
||||
{moderation_prompt}
|
||||
|
||||
请以动作的输出要求,以严格的 JSON 格式输出,且仅包含 JSON 内容。不要有任何其他文字或解释:
|
||||
""",
|
||||
"normal_chat_planner_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
动作:{action_name}
|
||||
该动作的描述:{action_description}
|
||||
使用该动作的场景:
|
||||
{action_require}
|
||||
输出要求:
|
||||
{{
|
||||
"action": "{action_name}",{action_parameters}
|
||||
}}
|
||||
""",
|
||||
"normal_chat_action_prompt",
|
||||
)
|
||||
|
||||
|
||||
class NormalChatPlanner:
|
||||
def __init__(self, log_prefix: str, action_manager: ActionManager):
|
||||
self.log_prefix = log_prefix
|
||||
# LLM规划器配置
|
||||
self.planner_llm = LLMRequest(
|
||||
model=global_config.model.planner,
|
||||
request_type="normal.planner", # 用于normal_chat动作规划
|
||||
)
|
||||
|
||||
self.action_manager = action_manager
|
||||
|
||||
async def plan(self, message: MessageThinking, sender_name: str = "某人") -> Dict[str, Any]:
|
||||
"""
|
||||
Normal Chat 规划器: 使用LLM根据上下文决定做出什么动作。
|
||||
|
||||
参数:
|
||||
message: 思考消息对象
|
||||
sender_name: 发送者名称
|
||||
"""
|
||||
|
||||
action = "no_action" # 默认动作改为no_action
|
||||
reasoning = "规划器初始化默认"
|
||||
action_data = {}
|
||||
|
||||
try:
|
||||
# 设置默认值
|
||||
nickname_str = ""
|
||||
for nicknames in global_config.bot.alias_names:
|
||||
nickname_str += f"{nicknames},"
|
||||
name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。"
|
||||
|
||||
personality_block = get_individuality().get_personality_prompt(x_person=2, level=2)
|
||||
identity_block = get_individuality().get_identity_prompt(x_person=2, level=2)
|
||||
|
||||
self_info = name_block + personality_block + identity_block
|
||||
|
||||
# 获取当前可用的动作,使用Normal模式过滤
|
||||
current_available_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||
|
||||
# 注意:动作的激活判定现在在 normal_chat_action_modifier 中完成
|
||||
# 这里直接使用经过 action_modifier 处理后的最终动作集
|
||||
# 符合职责分离原则:ActionModifier负责动作管理,Planner专注于决策
|
||||
|
||||
# 如果没有可用动作,直接返回no_action
|
||||
if not current_available_actions:
|
||||
logger.debug(f"{self.log_prefix}规划器: 没有可用动作,返回no_action")
|
||||
return {
|
||||
"action_result": {
|
||||
"action_type": action,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"is_parallel": True,
|
||||
},
|
||||
"chat_context": "",
|
||||
"action_prompt": "",
|
||||
}
|
||||
|
||||
# 构建normal_chat的上下文 (使用与normal_chat相同的prompt构建方法)
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=message.chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.chat.max_context_size,
|
||||
)
|
||||
|
||||
chat_context = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="relative",
|
||||
read_mark=0.0,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
# 构建planner的prompt
|
||||
prompt = await self.build_planner_prompt(
|
||||
self_info_block=self_info,
|
||||
chat_context=chat_context,
|
||||
current_available_actions=current_available_actions,
|
||||
)
|
||||
|
||||
if not prompt:
|
||||
logger.warning(f"{self.log_prefix}规划器: 构建提示词失败")
|
||||
return {
|
||||
"action_result": {
|
||||
"action_type": action,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"is_parallel": False,
|
||||
},
|
||||
"chat_context": chat_context,
|
||||
"action_prompt": "",
|
||||
}
|
||||
|
||||
# 使用LLM生成动作决策
|
||||
try:
|
||||
content, (reasoning_content, model_name) = await self.planner_llm.generate_response_async(prompt)
|
||||
|
||||
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
|
||||
logger.info(f"{self.log_prefix}规划器原始响应: {content}")
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
logger.info(f"{self.log_prefix}规划器模型: {model_name}")
|
||||
|
||||
# 解析JSON响应
|
||||
try:
|
||||
# 尝试修复JSON
|
||||
fixed_json = repair_json(content)
|
||||
action_result = json.loads(fixed_json)
|
||||
|
||||
action = action_result.get("action", "no_action")
|
||||
reasoning = action_result.get("reasoning", "未提供原因")
|
||||
|
||||
# 提取其他参数作为action_data
|
||||
action_data = {k: v for k, v in action_result.items() if k not in ["action", "reasoning"]}
|
||||
|
||||
# 验证动作是否在可用动作列表中,或者是特殊动作
|
||||
if action not in current_available_actions:
|
||||
logger.warning(f"{self.log_prefix}规划器选择了不可用的动作: {action}, 回退到no_action")
|
||||
action = "no_action"
|
||||
reasoning = f"选择的动作{action}不在可用列表中,回退到no_action"
|
||||
action_data = {}
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"{self.log_prefix}规划器JSON解析失败: {e}, 内容: {content}")
|
||||
action = "no_action"
|
||||
reasoning = "JSON解析失败,使用默认动作"
|
||||
action_data = {}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}规划器LLM调用失败: {e}")
|
||||
action = "no_action"
|
||||
reasoning = "LLM调用失败,使用默认动作"
|
||||
action_data = {}
|
||||
|
||||
except Exception as outer_e:
|
||||
logger.error(f"{self.log_prefix}规划器异常: {outer_e}")
|
||||
# 设置异常时的默认值
|
||||
current_available_actions = {}
|
||||
chat_context = "无法获取聊天上下文"
|
||||
prompt = ""
|
||||
action = "no_action"
|
||||
reasoning = "规划器出现异常,使用默认动作"
|
||||
action_data = {}
|
||||
|
||||
# 检查动作是否支持并行执行
|
||||
is_parallel = False
|
||||
if action in current_available_actions:
|
||||
action_info = current_available_actions[action]
|
||||
is_parallel = action_info.get("parallel_action", False)
|
||||
|
||||
logger.debug(
|
||||
f"{self.log_prefix}规划器决策动作:{action}, 动作信息: '{action_data}', 理由: {reasoning}, 并行执行: {is_parallel}"
|
||||
)
|
||||
|
||||
# 恢复到默认动作集
|
||||
self.action_manager.restore_actions()
|
||||
logger.debug(
|
||||
f"{self.log_prefix}规划后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
|
||||
)
|
||||
|
||||
# 构建 action 记录
|
||||
action_record = {
|
||||
"action_type": action,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"timestamp": time.time(),
|
||||
"model_name": model_name if "model_name" in locals() else None,
|
||||
}
|
||||
|
||||
action_result = {
|
||||
"action_type": action,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"is_parallel": is_parallel,
|
||||
"action_record": json.dumps(action_record, ensure_ascii=False),
|
||||
}
|
||||
|
||||
plan_result = {
|
||||
"action_result": action_result,
|
||||
"chat_context": chat_context,
|
||||
"action_prompt": prompt,
|
||||
}
|
||||
|
||||
return plan_result
|
||||
|
||||
async def build_planner_prompt(
|
||||
self,
|
||||
self_info_block: str,
|
||||
chat_context: str,
|
||||
current_available_actions: Dict[str, Any],
|
||||
) -> str:
|
||||
"""构建 Normal Chat Planner LLM 的提示词"""
|
||||
try:
|
||||
# 构建动作选项文本
|
||||
action_options_text = ""
|
||||
|
||||
for action_name, action_info in current_available_actions.items():
|
||||
action_description = action_info.get("description", "")
|
||||
action_parameters = action_info.get("parameters", {})
|
||||
action_require = action_info.get("require", [])
|
||||
|
||||
if action_parameters:
|
||||
param_text = "\n"
|
||||
# print(action_parameters)
|
||||
for param_name, param_description in action_parameters.items():
|
||||
param_text += f' "{param_name}":"{param_description}"\n'
|
||||
param_text = param_text.rstrip("\n")
|
||||
else:
|
||||
param_text = ""
|
||||
|
||||
require_text = ""
|
||||
for require_item in action_require:
|
||||
require_text += f"- {require_item}\n"
|
||||
require_text = require_text.rstrip("\n")
|
||||
|
||||
# 构建单个动作的提示
|
||||
action_prompt = await global_prompt_manager.format_prompt(
|
||||
"normal_chat_action_prompt",
|
||||
action_name=action_name,
|
||||
action_description=action_description,
|
||||
action_parameters=param_text,
|
||||
action_require=require_text,
|
||||
)
|
||||
action_options_text += action_prompt + "\n\n"
|
||||
|
||||
# 审核提示
|
||||
moderation_prompt = "请确保你的回复符合平台规则,避免不当内容。"
|
||||
|
||||
# 使用模板构建最终提示词
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"normal_chat_planner_prompt",
|
||||
self_info_block=self_info_block,
|
||||
action_options_text=action_options_text,
|
||||
moderation_prompt=moderation_prompt,
|
||||
chat_context=chat_context,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}构建Planner提示词失败: {e}")
|
||||
traceback.print_exc()
|
||||
return ""
|
||||
|
||||
|
||||
init_prompt()
|
||||
@@ -1,30 +0,0 @@
|
||||
import time
|
||||
from src.config.config import global_config
|
||||
from src.common.message_repository import count_messages
|
||||
|
||||
|
||||
def get_recent_message_stats(minutes: int = 30, chat_id: str = None) -> dict:
|
||||
"""
|
||||
Args:
|
||||
minutes (int): 检索的分钟数,默认30分钟
|
||||
chat_id (str, optional): 指定的chat_id,仅统计该chat下的消息。为None时统计全部。
|
||||
Returns:
|
||||
dict: {"bot_reply_count": int, "total_message_count": int}
|
||||
"""
|
||||
|
||||
now = time.time()
|
||||
start_time = now - minutes * 60
|
||||
bot_id = global_config.bot.qq_account
|
||||
|
||||
filter_base = {"time": {"$gte": start_time}}
|
||||
if chat_id is not None:
|
||||
filter_base["chat_id"] = chat_id
|
||||
|
||||
# 总消息数
|
||||
total_message_count = count_messages(filter_base)
|
||||
# bot自身回复数
|
||||
bot_filter = filter_base.copy()
|
||||
bot_filter["user_id"] = bot_id
|
||||
bot_reply_count = count_messages(bot_filter)
|
||||
|
||||
return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count}
|
||||
@@ -33,28 +33,10 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
if willing_info.is_mentioned_bot:
|
||||
current_willing += 1 if current_willing < 1.0 else 0.05
|
||||
|
||||
is_emoji_not_reply = False
|
||||
if willing_info.is_emoji:
|
||||
if global_config.normal_chat.emoji_response_penalty != 0:
|
||||
current_willing *= global_config.normal_chat.emoji_response_penalty
|
||||
else:
|
||||
is_emoji_not_reply = True
|
||||
|
||||
# 处理picid格式消息,直接不回复
|
||||
is_picid_not_reply = False
|
||||
if willing_info.is_picid:
|
||||
is_picid_not_reply = True
|
||||
|
||||
self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
|
||||
|
||||
reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1)
|
||||
|
||||
if is_emoji_not_reply:
|
||||
reply_probability = 0
|
||||
|
||||
if is_picid_not_reply:
|
||||
reply_probability = 0
|
||||
|
||||
return reply_probability
|
||||
|
||||
async def before_generate_reply_handle(self, message_id):
|
||||
@@ -71,8 +53,5 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
if current_willing < 1:
|
||||
self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.4)
|
||||
|
||||
async def bombing_buffer_message_handle(self, message_id):
|
||||
return await super().bombing_buffer_message_handle(message_id)
|
||||
|
||||
async def not_reply_handle(self, message_id):
|
||||
return await super().not_reply_handle(message_id)
|
||||
|
||||
@@ -17,8 +17,5 @@ class CustomWillingManager(BaseWillingManager):
|
||||
async def get_reply_probability(self, message_id: str):
|
||||
pass
|
||||
|
||||
async def bombing_buffer_message_handle(self, message_id: str):
|
||||
pass
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
@@ -19,7 +19,6 @@ Mxp 模式:梦溪畔独家赞助
|
||||
下下策是询问一个菜鸟(@梦溪畔)
|
||||
"""
|
||||
|
||||
from src.config.config import global_config
|
||||
from .willing_manager import BaseWillingManager
|
||||
from typing import Dict
|
||||
import asyncio
|
||||
@@ -173,22 +172,10 @@ class MxpWillingManager(BaseWillingManager):
|
||||
|
||||
probability = self._willing_to_probability(current_willing)
|
||||
|
||||
if w_info.is_emoji:
|
||||
probability *= global_config.normal_chat.emoji_response_penalty
|
||||
|
||||
if w_info.is_picid:
|
||||
probability = 0 # picid格式消息直接不回复
|
||||
|
||||
self.temporary_willing = current_willing
|
||||
|
||||
return probability
|
||||
|
||||
async def bombing_buffer_message_handle(self, message_id: str):
|
||||
"""炸飞消息处理"""
|
||||
async with self.lock:
|
||||
w_info = self.ongoing_messages[message_id]
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += 0.1
|
||||
|
||||
async def _return_to_basic_willing(self):
|
||||
"""使每个人的意愿恢复到chat基础意愿"""
|
||||
while True:
|
||||
|
||||
@@ -20,7 +20,6 @@ before_generate_reply_handle 确定要回复后,在生成回复前的处理
|
||||
after_generate_reply_handle 确定要回复后,在生成回复后的处理
|
||||
not_reply_handle 确定不回复后的处理
|
||||
get_reply_probability 获取回复概率
|
||||
bombing_buffer_message_handle 缓冲器炸飞消息后的处理
|
||||
get_variable_parameters 暂不确定
|
||||
set_variable_parameters 暂不确定
|
||||
以下2个方法根据你的实现可以做调整:
|
||||
@@ -137,11 +136,6 @@ class BaseWillingManager(ABC):
|
||||
"""抽象方法:获取回复概率"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
async def bombing_buffer_message_handle(self, message_id: str):
|
||||
"""抽象方法:炸飞消息处理"""
|
||||
pass
|
||||
|
||||
async def get_willing(self, chat_id: str):
|
||||
"""获取指定聊天流的回复意愿"""
|
||||
async with self.lock:
|
||||
|
||||
@@ -292,10 +292,6 @@ class ActionManager:
|
||||
)
|
||||
self._using_actions = self._default_actions.copy()
|
||||
|
||||
def restore_default_actions(self) -> None:
|
||||
"""恢复默认动作集到使用集"""
|
||||
self._using_actions = self._default_actions.copy()
|
||||
|
||||
def add_system_action_if_needed(self, action_name: str) -> bool:
|
||||
"""
|
||||
根据需要添加系统动作到使用集
|
||||
@@ -1,8 +1,6 @@
|
||||
from typing import List, Optional, Any, Dict
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.chat.focus_chat.focus_loop_info import FocusLoopInfo
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.config.config import global_config
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
@@ -10,7 +8,8 @@ import random
|
||||
import asyncio
|
||||
import hashlib
|
||||
import time
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.planner_actions.action_manager import ActionManager
|
||||
from src.chat.utils.chat_message_builder import get_raw_msg_before_timestamp_with_chat, build_readable_messages
|
||||
|
||||
logger = get_logger("action_manager")
|
||||
|
||||
@@ -23,12 +22,13 @@ class ActionModifier:
|
||||
支持并行判定和智能缓存优化。
|
||||
"""
|
||||
|
||||
log_prefix = "动作处理"
|
||||
|
||||
def __init__(self, action_manager: ActionManager):
|
||||
def __init__(self, action_manager: ActionManager, chat_id: str):
|
||||
"""初始化动作处理器"""
|
||||
self.chat_id = chat_id
|
||||
self.chat_stream = get_chat_manager().get_stream(self.chat_id)
|
||||
self.log_prefix = f"[{get_chat_manager().get_stream_name(self.chat_id) or self.chat_id}]"
|
||||
|
||||
self.action_manager = action_manager
|
||||
self.all_actions = self.action_manager.get_using_actions_for_mode("focus")
|
||||
|
||||
# 用于LLM判定的小模型
|
||||
self.llm_judge = LLMRequest(
|
||||
@@ -43,11 +43,12 @@ class ActionModifier:
|
||||
|
||||
async def modify_actions(
|
||||
self,
|
||||
observations: Optional[List[Observation]] = None,
|
||||
**kwargs: Any,
|
||||
loop_info=None,
|
||||
mode: str = "focus",
|
||||
message_content: str = "",
|
||||
):
|
||||
"""
|
||||
完整的动作修改流程,整合传统观察处理和新的激活类型判定
|
||||
动作修改流程,整合传统观察处理和新的激活类型判定
|
||||
|
||||
这个方法处理完整的动作管理流程:
|
||||
1. 基于观察的传统动作修改(循环历史分析、类型匹配等)
|
||||
@@ -57,230 +58,150 @@ class ActionModifier:
|
||||
"""
|
||||
logger.debug(f"{self.log_prefix}开始完整动作修改流程")
|
||||
|
||||
# === 第一阶段:传统观察处理 ===
|
||||
chat_content = None
|
||||
removals_s1 = []
|
||||
removals_s2 = []
|
||||
|
||||
if observations:
|
||||
hfc_obs = None
|
||||
chat_obs = None
|
||||
self.action_manager.restore_actions()
|
||||
all_actions = self.action_manager.get_using_actions_for_mode(mode)
|
||||
|
||||
# 收集所有观察对象
|
||||
for obs in observations:
|
||||
if isinstance(obs, HFCloopObservation):
|
||||
hfc_obs = obs
|
||||
if isinstance(obs, ChattingObservation):
|
||||
chat_obs = obs
|
||||
chat_content = obs.talking_message_str_truncate_short
|
||||
|
||||
# 合并所有动作变更
|
||||
merged_action_changes = {"add": [], "remove": []}
|
||||
reasons = []
|
||||
|
||||
# 处理HFCloopObservation - 传统的循环历史分析
|
||||
if hfc_obs:
|
||||
obs = hfc_obs
|
||||
# 获取适用于FOCUS模式的动作
|
||||
all_actions = self.all_actions
|
||||
action_changes = await self.analyze_loop_actions(obs)
|
||||
if action_changes["add"] or action_changes["remove"]:
|
||||
# 合并动作变更
|
||||
merged_action_changes["add"].extend(action_changes["add"])
|
||||
merged_action_changes["remove"].extend(action_changes["remove"])
|
||||
reasons.append("基于循环历史分析")
|
||||
|
||||
# 详细记录循环历史分析的变更原因
|
||||
for action_name in action_changes["add"]:
|
||||
logger.info(f"{self.log_prefix}添加动作: {action_name},原因: 循环历史分析建议添加")
|
||||
for action_name in action_changes["remove"]:
|
||||
logger.info(f"{self.log_prefix}移除动作: {action_name},原因: 循环历史分析建议移除")
|
||||
|
||||
# 处理ChattingObservation - 传统的类型匹配检查
|
||||
if chat_obs:
|
||||
# 检查动作的关联类型
|
||||
chat_context = get_chat_manager().get_stream(chat_obs.chat_id).context
|
||||
type_mismatched_actions = []
|
||||
|
||||
for action_name in all_actions.keys():
|
||||
data = all_actions[action_name]
|
||||
if data.get("associated_types"):
|
||||
if not chat_context.check_types(data["associated_types"]):
|
||||
type_mismatched_actions.append(action_name)
|
||||
associated_types_str = ", ".join(data["associated_types"])
|
||||
logger.info(
|
||||
f"{self.log_prefix}移除动作: {action_name},原因: 关联类型不匹配(需要: {associated_types_str})"
|
||||
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=self.chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=int(global_config.chat.max_context_size * 0.5),
|
||||
)
|
||||
chat_content = build_readable_messages(
|
||||
message_list_before_now_half,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="relative",
|
||||
read_mark=0.0,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
if message_content:
|
||||
chat_content = chat_content + "\n" + f"现在,最新的消息是:{message_content}"
|
||||
|
||||
# === 第一阶段:传统观察处理 ===
|
||||
if loop_info:
|
||||
removals_from_loop = await self.analyze_loop_actions(loop_info)
|
||||
if removals_from_loop:
|
||||
removals_s1.extend(removals_from_loop)
|
||||
|
||||
# 检查动作的关联类型
|
||||
chat_context = self.chat_stream.context
|
||||
type_mismatched_actions = self._check_action_associated_types(all_actions, chat_context)
|
||||
|
||||
if type_mismatched_actions:
|
||||
# 合并到移除列表中
|
||||
merged_action_changes["remove"].extend(type_mismatched_actions)
|
||||
reasons.append("基于关联类型检查")
|
||||
removals_s1.extend(type_mismatched_actions)
|
||||
|
||||
# 应用传统的动作变更到ActionManager
|
||||
for action_name in merged_action_changes["add"]:
|
||||
if action_name in self.action_manager.get_registered_actions():
|
||||
self.action_manager.add_action_to_using(action_name)
|
||||
logger.debug(f"{self.log_prefix}应用添加动作: {action_name},原因集合: {reasons}")
|
||||
|
||||
for action_name in merged_action_changes["remove"]:
|
||||
# 应用第一阶段的移除
|
||||
for action_name, reason in removals_s1:
|
||||
self.action_manager.remove_action_from_using(action_name)
|
||||
logger.debug(f"{self.log_prefix}应用移除动作: {action_name},原因集合: {reasons}")
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix}传统动作修改完成,当前使用动作: {list(self.action_manager.get_using_actions().keys())}"
|
||||
)
|
||||
|
||||
# 注释:已移除exit_focus_chat动作,现在由no_reply动作处理频率检测退出专注模式
|
||||
logger.debug(f"{self.log_prefix}阶段一移除动作: {action_name},原因: {reason}")
|
||||
|
||||
# === 第二阶段:激活类型判定 ===
|
||||
# 如果提供了聊天上下文,则进行激活类型判定
|
||||
if chat_content is not None:
|
||||
logger.debug(f"{self.log_prefix}开始激活类型判定阶段")
|
||||
|
||||
# 获取当前使用的动作集(经过第一阶段处理,且适用于FOCUS模式)
|
||||
current_using_actions = self.action_manager.get_using_actions()
|
||||
all_registered_actions = self.action_manager.get_registered_actions()
|
||||
# 获取当前使用的动作集(经过第一阶段处理)
|
||||
current_using_actions = self.action_manager.get_using_actions_for_mode(mode)
|
||||
|
||||
# 构建完整的动作信息
|
||||
current_actions_with_info = {}
|
||||
for action_name in current_using_actions.keys():
|
||||
if action_name in all_registered_actions:
|
||||
current_actions_with_info[action_name] = all_registered_actions[action_name]
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix}使用中的动作 {action_name} 未在已注册动作中找到")
|
||||
|
||||
# 应用激活类型判定
|
||||
final_activated_actions = await self._apply_activation_type_filtering(
|
||||
current_actions_with_info,
|
||||
# 获取因激活类型判定而需要移除的动作
|
||||
removals_s2 = await self._get_deactivated_actions_by_type(
|
||||
current_using_actions,
|
||||
mode,
|
||||
chat_content,
|
||||
)
|
||||
|
||||
# 更新ActionManager,移除未激活的动作
|
||||
actions_to_remove = []
|
||||
removal_reasons = {}
|
||||
|
||||
for action_name in current_using_actions.keys():
|
||||
if action_name not in final_activated_actions:
|
||||
actions_to_remove.append(action_name)
|
||||
# 确定移除原因
|
||||
if action_name in all_registered_actions:
|
||||
action_info = all_registered_actions[action_name]
|
||||
activation_type = action_info.get("focus_activation_type", "always")
|
||||
|
||||
# 处理字符串格式的激活类型值
|
||||
if activation_type == "random":
|
||||
probability = action_info.get("random_probability", 0.3)
|
||||
removal_reasons[action_name] = f"RANDOM类型未触发(概率{probability})"
|
||||
elif activation_type == "llm_judge":
|
||||
removal_reasons[action_name] = "LLM判定未激活"
|
||||
elif activation_type == "keyword":
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
removal_reasons[action_name] = f"关键词未匹配(关键词: {keywords})"
|
||||
else:
|
||||
removal_reasons[action_name] = "激活判定未通过"
|
||||
else:
|
||||
removal_reasons[action_name] = "动作信息不完整"
|
||||
|
||||
for action_name in actions_to_remove:
|
||||
# 应用第二阶段的移除
|
||||
for action_name, reason in removals_s2:
|
||||
self.action_manager.remove_action_from_using(action_name)
|
||||
reason = removal_reasons.get(action_name, "未知原因")
|
||||
logger.info(f"{self.log_prefix}移除动作: {action_name},原因: {reason}")
|
||||
logger.debug(f"{self.log_prefix}阶段二移除动作: {action_name},原因: {reason}")
|
||||
|
||||
# 注释:已完全移除exit_focus_chat动作
|
||||
|
||||
logger.info(f"{self.log_prefix}激活类型判定完成,最终可用动作: {list(final_activated_actions.keys())}")
|
||||
# === 统一日志记录 ===
|
||||
all_removals = removals_s1 + removals_s2
|
||||
if all_removals:
|
||||
removals_summary = " | ".join([f"{name}({reason})" for name, reason in all_removals])
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix}完整动作修改流程结束,最终动作集: {list(self.action_manager.get_using_actions().keys())}"
|
||||
f"{self.log_prefix}{mode}模式动作修改流程结束,最终可用动作: {list(self.action_manager.get_using_actions_for_mode(mode).keys())}||移除记录: {removals_summary}"
|
||||
)
|
||||
|
||||
async def _apply_activation_type_filtering(
|
||||
def _check_action_associated_types(self, all_actions, chat_context):
|
||||
type_mismatched_actions = []
|
||||
for action_name, data in all_actions.items():
|
||||
if data.get("associated_types"):
|
||||
if not chat_context.check_types(data["associated_types"]):
|
||||
associated_types_str = ", ".join(data["associated_types"])
|
||||
reason = f"适配器不支持(需要: {associated_types_str})"
|
||||
type_mismatched_actions.append((action_name, reason))
|
||||
logger.debug(f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}")
|
||||
return type_mismatched_actions
|
||||
|
||||
async def _get_deactivated_actions_by_type(
|
||||
self,
|
||||
actions_with_info: Dict[str, Any],
|
||||
mode: str = "focus",
|
||||
chat_content: str = "",
|
||||
) -> Dict[str, Any]:
|
||||
) -> List[tuple[str, str]]:
|
||||
"""
|
||||
应用激活类型过滤逻辑,支持四种激活类型的并行处理
|
||||
根据激活类型过滤,返回需要停用的动作列表及原因
|
||||
|
||||
Args:
|
||||
actions_with_info: 带完整信息的动作字典
|
||||
chat_content: 聊天内容
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 过滤后激活的actions字典
|
||||
List[Tuple[str, str]]: 需要停用的 (action_name, reason) 元组列表
|
||||
"""
|
||||
activated_actions = {}
|
||||
deactivated_actions = []
|
||||
|
||||
# 分类处理不同激活类型的actions
|
||||
always_actions = {}
|
||||
random_actions = {}
|
||||
llm_judge_actions = {}
|
||||
keyword_actions = {}
|
||||
|
||||
for action_name, action_info in actions_with_info.items():
|
||||
activation_type = action_info.get("focus_activation_type", "always")
|
||||
actions_to_check = list(actions_with_info.items())
|
||||
random.shuffle(actions_to_check)
|
||||
|
||||
# print(f"action_name: {action_name}, activation_type: {activation_type}")
|
||||
for action_name, action_info in actions_to_check:
|
||||
activation_type = f"{mode}_activation_type"
|
||||
activation_type = action_info.get(activation_type, "always")
|
||||
|
||||
# 现在统一是字符串格式的激活类型值
|
||||
if activation_type == "always":
|
||||
always_actions[action_name] = action_info
|
||||
continue # 总是激活,无需处理
|
||||
|
||||
elif activation_type == "random":
|
||||
random_actions[action_name] = action_info
|
||||
probability = action_info.get("random_activation_probability", ActionManager.DEFAULT_RANDOM_PROBABILITY)
|
||||
if not (random.random() < probability):
|
||||
reason = f"RANDOM类型未触发(概率{probability})"
|
||||
deactivated_actions.append((action_name, reason))
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: {reason}")
|
||||
|
||||
elif activation_type == "keyword":
|
||||
if not self._check_keyword_activation(action_name, action_info, chat_content):
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
reason = f"关键词未匹配(关键词: {keywords})"
|
||||
deactivated_actions.append((action_name, reason))
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: {reason}")
|
||||
|
||||
elif activation_type == "llm_judge":
|
||||
llm_judge_actions[action_name] = action_info
|
||||
elif activation_type == "keyword":
|
||||
keyword_actions[action_name] = action_info
|
||||
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix}未知的激活类型: {activation_type},跳过处理")
|
||||
|
||||
# 1. 处理ALWAYS类型(直接激活)
|
||||
for action_name, action_info in always_actions.items():
|
||||
activated_actions[action_name] = action_info
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: ALWAYS类型直接激活")
|
||||
|
||||
# 2. 处理RANDOM类型
|
||||
for action_name, action_info in random_actions.items():
|
||||
probability = action_info.get("random_activation_probability", ActionManager.DEFAULT_RANDOM_PROBABILITY)
|
||||
should_activate = random.random() < probability
|
||||
if should_activate:
|
||||
activated_actions[action_name] = action_info
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: RANDOM类型触发(概率{probability})")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: RANDOM类型未触发(概率{probability})")
|
||||
|
||||
# 3. 处理KEYWORD类型(快速判定)
|
||||
for action_name, action_info in keyword_actions.items():
|
||||
should_activate = self._check_keyword_activation(
|
||||
action_name,
|
||||
action_info,
|
||||
chat_content,
|
||||
)
|
||||
if should_activate:
|
||||
activated_actions[action_name] = action_info
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: KEYWORD类型匹配关键词({keywords})")
|
||||
else:
|
||||
keywords = action_info.get("activation_keywords", [])
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: KEYWORD类型未匹配关键词({keywords})")
|
||||
|
||||
# 4. 处理LLM_JUDGE类型(并行判定)
|
||||
# 并行处理LLM_JUDGE类型
|
||||
if llm_judge_actions:
|
||||
# 直接并行处理所有LLM判定actions
|
||||
llm_results = await self._process_llm_judge_actions_parallel(
|
||||
llm_judge_actions,
|
||||
chat_content,
|
||||
)
|
||||
|
||||
# 添加激活的LLM判定actions
|
||||
for action_name, should_activate in llm_results.items():
|
||||
if should_activate:
|
||||
activated_actions[action_name] = llm_judge_actions[action_name]
|
||||
logger.debug(f"{self.log_prefix}激活动作: {action_name},原因: LLM_JUDGE类型判定通过")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: LLM_JUDGE类型判定未通过")
|
||||
if not should_activate:
|
||||
reason = "LLM判定未激活"
|
||||
deactivated_actions.append((action_name, reason))
|
||||
logger.debug(f"{self.log_prefix}未激活动作: {action_name},原因: {reason}")
|
||||
|
||||
logger.debug(f"{self.log_prefix}激活类型过滤完成: {list(activated_actions.keys())}")
|
||||
return activated_actions
|
||||
return deactivated_actions
|
||||
|
||||
async def process_actions_for_planner(
|
||||
self, observed_messages_str: str = "", chat_context: Optional[str] = None, extra_context: Optional[str] = None
|
||||
@@ -538,22 +459,19 @@ class ActionModifier:
|
||||
logger.debug(f"{self.log_prefix}动作 {action_name} 未匹配到任何关键词: {activation_keywords}")
|
||||
return False
|
||||
|
||||
async def analyze_loop_actions(self, obs: HFCloopObservation) -> Dict[str, List[str]]:
|
||||
"""分析最近的循环内容并决定动作的增减
|
||||
async def analyze_loop_actions(self, obs: FocusLoopInfo) -> List[tuple[str, str]]:
|
||||
"""分析最近的循环内容并决定动作的移除
|
||||
|
||||
Returns:
|
||||
Dict[str, List[str]]: 包含要增加和删除的动作
|
||||
{
|
||||
"add": ["action1", "action2"],
|
||||
"remove": ["action3"]
|
||||
}
|
||||
List[Tuple[str, str]]: 包含要删除的动作及原因的元组列表
|
||||
[("action3", "some reason")]
|
||||
"""
|
||||
result = {"add": [], "remove": []}
|
||||
removals = []
|
||||
|
||||
# 获取最近10次循环
|
||||
recent_cycles = obs.history_loop[-10:] if len(obs.history_loop) > 10 else obs.history_loop
|
||||
if not recent_cycles:
|
||||
return result
|
||||
return removals
|
||||
|
||||
reply_sequence = [] # 记录最近的动作序列
|
||||
|
||||
@@ -584,36 +502,41 @@ class ActionModifier:
|
||||
# 根据最近的reply情况决定是否移除reply动作
|
||||
if len(last_max_reply_num) >= max_reply_num and all(last_max_reply_num):
|
||||
# 如果最近max_reply_num次都是reply,直接移除
|
||||
result["remove"].append("reply")
|
||||
reason = f"连续回复过多(最近{len(last_max_reply_num)}次全是reply,超过阈值{max_reply_num})"
|
||||
removals.append(("reply", reason))
|
||||
# reply_count = len(last_max_reply_num) - no_reply_count
|
||||
logger.info(
|
||||
f"{self.log_prefix}移除reply动作,原因: 连续回复过多(最近{len(last_max_reply_num)}次全是reply,超过阈值{max_reply_num})"
|
||||
)
|
||||
elif len(last_max_reply_num) >= sec_thres_reply_num and all(last_max_reply_num[-sec_thres_reply_num:]):
|
||||
# 如果最近sec_thres_reply_num次都是reply,40%概率移除
|
||||
removal_probability = 0.4 / global_config.focus_chat.consecutive_replies
|
||||
if random.random() < removal_probability:
|
||||
result["remove"].append("reply")
|
||||
logger.info(
|
||||
f"{self.log_prefix}移除reply动作,原因: 连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"{self.log_prefix}连续回复检测:最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,未触发"
|
||||
reason = (
|
||||
f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
||||
)
|
||||
removals.append(("reply", reason))
|
||||
elif len(last_max_reply_num) >= one_thres_reply_num and all(last_max_reply_num[-one_thres_reply_num:]):
|
||||
# 如果最近one_thres_reply_num次都是reply,20%概率移除
|
||||
removal_probability = 0.2 / global_config.focus_chat.consecutive_replies
|
||||
if random.random() < removal_probability:
|
||||
result["remove"].append("reply")
|
||||
logger.info(
|
||||
f"{self.log_prefix}移除reply动作,原因: 连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
f"{self.log_prefix}连续回复检测:最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,未触发"
|
||||
reason = (
|
||||
f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
||||
)
|
||||
removals.append(("reply", reason))
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix}连续回复检测:无需移除reply动作,最近回复模式正常")
|
||||
|
||||
return result
|
||||
return removals
|
||||
|
||||
def get_available_actions_count(self) -> int:
|
||||
"""获取当前可用动作数量(排除默认的no_action)"""
|
||||
current_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||
# 排除no_action(如果存在)
|
||||
filtered_actions = {k: v for k, v in current_actions.items() if k != "no_action"}
|
||||
return len(filtered_actions)
|
||||
|
||||
def should_skip_planning(self) -> bool:
|
||||
"""判断是否应该跳过规划过程"""
|
||||
available_count = self.get_available_actions_count()
|
||||
if available_count == 0:
|
||||
logger.debug(f"{self.log_prefix} 没有可用动作,跳过规划")
|
||||
return True
|
||||
return False
|
||||
@@ -1,19 +1,18 @@
|
||||
import json # <--- 确保导入 json
|
||||
import traceback
|
||||
from typing import List, Dict, Any, Optional
|
||||
from typing import Dict, Any, Optional
|
||||
from rich.traceback import install
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from src.chat.focus_chat.info.obs_info import ObsInfo
|
||||
from src.chat.focus_chat.info.action_info import ActionInfo
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.planner_actions.action_manager import ActionManager
|
||||
from json_repair import repair_json
|
||||
from src.chat.focus_chat.planners.base_planner import BasePlanner
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.chat.utils.utils import get_chat_type_and_target_info
|
||||
from datetime import datetime
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
import time
|
||||
|
||||
logger = get_logger("planner")
|
||||
|
||||
@@ -29,34 +28,22 @@ def init_prompt():
|
||||
{chat_context_description},以下是具体的聊天内容:
|
||||
{chat_content_block}
|
||||
{moderation_prompt}
|
||||
现在请你根据聊天内容选择合适的action:
|
||||
|
||||
现在请你根据{by_what}选择合适的action:
|
||||
{no_action_block}
|
||||
{action_options_text}
|
||||
|
||||
你必须从上面列出的可用action中选择一个,并说明原因。
|
||||
|
||||
请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容:
|
||||
""",
|
||||
"simple_planner_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{time_block}
|
||||
{indentify_block}
|
||||
你现在需要根据聊天内容,选择的合适的action来参与聊天。
|
||||
{chat_context_description},以下是具体的聊天内容:
|
||||
{chat_content_block}
|
||||
{moderation_prompt}
|
||||
现在请你选择合适的action:
|
||||
|
||||
{action_options_text}
|
||||
|
||||
请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容:
|
||||
""",
|
||||
"simple_planner_prompt_private",
|
||||
"planner_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
动作:{action_name}
|
||||
动作描述:{action_description}
|
||||
{action_require}
|
||||
{{
|
||||
"action": "{action_name}",{action_parameters}
|
||||
@@ -65,41 +52,24 @@ def init_prompt():
|
||||
"action_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{action_require}
|
||||
{{
|
||||
"action": "{action_name}",{action_parameters}
|
||||
}}
|
||||
""",
|
||||
"action_prompt_private",
|
||||
)
|
||||
|
||||
|
||||
class ActionPlanner(BasePlanner):
|
||||
def __init__(self, log_prefix: str, action_manager: ActionManager):
|
||||
super().__init__(log_prefix, action_manager)
|
||||
class ActionPlanner:
|
||||
def __init__(self, chat_id: str, action_manager: ActionManager, mode: str = "focus"):
|
||||
self.chat_id = chat_id
|
||||
self.log_prefix = f"[{get_chat_manager().get_stream_name(chat_id) or chat_id}]"
|
||||
self.mode = mode
|
||||
self.action_manager = action_manager
|
||||
# LLM规划器配置
|
||||
self.planner_llm = LLMRequest(
|
||||
model=global_config.model.planner,
|
||||
request_type="focus.planner", # 用于动作规划
|
||||
request_type=f"{self.mode}.planner", # 用于动作规划
|
||||
)
|
||||
|
||||
self.utils_llm = LLMRequest(
|
||||
model=global_config.model.utils_small,
|
||||
request_type="focus.planner", # 用于动作规划
|
||||
)
|
||||
self.last_obs_time_mark = 0.0
|
||||
|
||||
async def plan(
|
||||
self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]], loop_start_time: float
|
||||
) -> Dict[str, Any]:
|
||||
async def plan(self) -> Dict[str, Any]:
|
||||
"""
|
||||
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
|
||||
|
||||
参数:
|
||||
all_plan_info: 所有计划信息
|
||||
running_memorys: 回忆信息
|
||||
loop_start_time: 循环开始时间
|
||||
"""
|
||||
|
||||
action = "no_reply" # 默认动作
|
||||
@@ -107,47 +77,12 @@ class ActionPlanner(BasePlanner):
|
||||
action_data = {}
|
||||
|
||||
try:
|
||||
# 获取观察信息
|
||||
extra_info: list[str] = []
|
||||
|
||||
extra_info = []
|
||||
observed_messages = []
|
||||
observed_messages_str = ""
|
||||
chat_type = "group"
|
||||
is_group_chat = True
|
||||
chat_id = None # 添加chat_id变量
|
||||
|
||||
for info in all_plan_info:
|
||||
if isinstance(info, ObsInfo):
|
||||
observed_messages = info.get_talking_message()
|
||||
observed_messages_str = info.get_talking_message_str_truncate_short()
|
||||
chat_type = info.get_chat_type()
|
||||
is_group_chat = chat_type == "group"
|
||||
# 从ObsInfo中获取chat_id
|
||||
chat_id = info.get_chat_id()
|
||||
else:
|
||||
extra_info.append(info.get_processed_info())
|
||||
is_group_chat, chat_target_info = get_chat_type_and_target_info(self.chat_id)
|
||||
logger.debug(f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}")
|
||||
|
||||
# 获取聊天类型和目标信息
|
||||
chat_target_info = None
|
||||
if chat_id:
|
||||
try:
|
||||
# 重新获取更准确的聊天信息
|
||||
is_group_chat_updated, chat_target_info = get_chat_type_and_target_info(chat_id)
|
||||
# 如果获取成功,更新is_group_chat
|
||||
if is_group_chat_updated is not None:
|
||||
is_group_chat = is_group_chat_updated
|
||||
logger.debug(
|
||||
f"{self.log_prefix}获取到聊天信息 - 群聊: {is_group_chat}, 目标信息: {chat_target_info}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"{self.log_prefix}获取聊天目标信息失败: {e}")
|
||||
chat_target_info = None
|
||||
|
||||
# 获取经过modify_actions处理后的最终可用动作集
|
||||
# 注意:动作的激活判定现在在主循环的modify_actions中完成
|
||||
# 使用Focus模式过滤动作
|
||||
current_available_actions_dict = self.action_manager.get_using_actions_for_mode("focus")
|
||||
current_available_actions_dict = self.action_manager.get_using_actions_for_mode(self.mode)
|
||||
|
||||
# 获取完整的动作信息
|
||||
all_registered_actions = self.action_manager.get_registered_actions()
|
||||
@@ -165,31 +100,29 @@ class ActionPlanner(BasePlanner):
|
||||
action = "no_reply"
|
||||
reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划"
|
||||
logger.info(f"{self.log_prefix}{reasoning}")
|
||||
self.action_manager.restore_actions()
|
||||
logger.debug(
|
||||
f"{self.log_prefix}[focus]沉默后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
|
||||
)
|
||||
return {
|
||||
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
|
||||
"observed_messages": observed_messages,
|
||||
}
|
||||
|
||||
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
|
||||
prompt = await self.build_planner_prompt(
|
||||
is_group_chat=is_group_chat, # <-- Pass HFC state
|
||||
chat_target_info=chat_target_info, # <-- 传递获取到的聊天目标信息
|
||||
observed_messages_str=observed_messages_str, # <-- Pass local variable
|
||||
current_available_actions=current_available_actions, # <-- Pass determined actions
|
||||
)
|
||||
|
||||
# --- 调用 LLM (普通文本生成) ---
|
||||
llm_content = None
|
||||
try:
|
||||
prompt = f"{prompt}"
|
||||
llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt)
|
||||
|
||||
if global_config.debug.show_prompt:
|
||||
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
|
||||
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
|
||||
if reasoning_content:
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
|
||||
except Exception as req_e:
|
||||
@@ -199,34 +132,21 @@ class ActionPlanner(BasePlanner):
|
||||
|
||||
if llm_content:
|
||||
try:
|
||||
fixed_json_string = repair_json(llm_content)
|
||||
if isinstance(fixed_json_string, str):
|
||||
try:
|
||||
parsed_json = json.loads(fixed_json_string)
|
||||
except json.JSONDecodeError as decode_error:
|
||||
logger.error(f"JSON解析错误: {str(decode_error)}")
|
||||
parsed_json = {}
|
||||
else:
|
||||
# 如果repair_json直接返回了字典对象,直接使用
|
||||
parsed_json = fixed_json_string
|
||||
parsed_json = json.loads(repair_json(llm_content))
|
||||
|
||||
# 处理repair_json可能返回列表的情况
|
||||
if isinstance(parsed_json, list):
|
||||
if parsed_json:
|
||||
# 取列表中最后一个元素(通常是最完整的)
|
||||
parsed_json = parsed_json[-1]
|
||||
logger.warning(f"{self.log_prefix}LLM返回了多个JSON对象,使用最后一个: {parsed_json}")
|
||||
else:
|
||||
parsed_json = {}
|
||||
|
||||
# 确保parsed_json是字典
|
||||
if not isinstance(parsed_json, dict):
|
||||
logger.error(f"{self.log_prefix}解析后的JSON不是字典类型: {type(parsed_json)}")
|
||||
parsed_json = {}
|
||||
|
||||
# 提取决策,提供默认值
|
||||
extracted_action = parsed_json.get("action", "no_reply")
|
||||
extracted_reasoning = ""
|
||||
action = parsed_json.get("action", "no_reply")
|
||||
reasoning = parsed_json.get("reasoning", "未提供原因")
|
||||
|
||||
# 将所有其他属性添加到action_data
|
||||
action_data = {}
|
||||
@@ -234,20 +154,14 @@ class ActionPlanner(BasePlanner):
|
||||
if key not in ["action", "reasoning"]:
|
||||
action_data[key] = value
|
||||
|
||||
action_data["loop_start_time"] = loop_start_time
|
||||
|
||||
# 对于reply动作不需要额外处理,因为相关字段已经在上面的循环中添加到action_data
|
||||
|
||||
if extracted_action not in current_available_actions:
|
||||
if action == "no_action":
|
||||
reasoning = "normal决定不使用额外动作"
|
||||
elif action not in current_available_actions:
|
||||
logger.warning(
|
||||
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
|
||||
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
|
||||
)
|
||||
action = "no_reply"
|
||||
reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}"
|
||||
else:
|
||||
# 动作有效且可用
|
||||
action = extracted_action
|
||||
reasoning = extracted_reasoning
|
||||
reasoning = f"LLM 返回了当前不可用的动作 '{action}' (可用: {list(current_available_actions.keys())})。原始理由: {reasoning}"
|
||||
|
||||
except Exception as json_e:
|
||||
logger.warning(f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'")
|
||||
@@ -261,17 +175,21 @@ class ActionPlanner(BasePlanner):
|
||||
action = "no_reply"
|
||||
reasoning = f"Planner 内部处理错误: {outer_e}"
|
||||
|
||||
# 恢复到默认动作集
|
||||
self.action_manager.restore_actions()
|
||||
logger.debug(
|
||||
f"{self.log_prefix}规划后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
|
||||
)
|
||||
is_parallel = False
|
||||
if action in current_available_actions:
|
||||
action_info = current_available_actions[action]
|
||||
is_parallel = action_info.get("parallel_action", False)
|
||||
|
||||
action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning}
|
||||
action_result = {
|
||||
"action_type": action,
|
||||
"action_data": action_data,
|
||||
"reasoning": reasoning,
|
||||
"timestamp": time.time(),
|
||||
"is_parallel": is_parallel,
|
||||
}
|
||||
|
||||
plan_result = {
|
||||
"action_result": action_result,
|
||||
"observed_messages": observed_messages,
|
||||
"action_prompt": prompt,
|
||||
}
|
||||
|
||||
@@ -281,11 +199,35 @@ class ActionPlanner(BasePlanner):
|
||||
self,
|
||||
is_group_chat: bool, # Now passed as argument
|
||||
chat_target_info: Optional[dict], # Now passed as argument
|
||||
observed_messages_str: str,
|
||||
current_available_actions: Dict[str, ActionInfo],
|
||||
current_available_actions,
|
||||
) -> str:
|
||||
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
|
||||
try:
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=self.chat_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.chat.max_context_size,
|
||||
)
|
||||
|
||||
chat_content_block = build_readable_messages(
|
||||
messages=message_list_before_now,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=self.last_obs_time_mark,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
self.last_obs_time_mark = time.time()
|
||||
|
||||
if self.mode == "focus":
|
||||
by_what = "聊天内容"
|
||||
no_action_block = ""
|
||||
else:
|
||||
by_what = "聊天内容和用户的最新消息"
|
||||
no_action_block = """重要说明:
|
||||
- 'no_action' 表示只进行普通聊天回复,不执行任何额外动作
|
||||
- 其他action表示在普通回复的基础上,执行相应的额外动作"""
|
||||
|
||||
chat_context_description = "你现在正在一个群聊中"
|
||||
chat_target_name = None # Only relevant for private
|
||||
if not is_group_chat and chat_target_info:
|
||||
@@ -294,19 +236,9 @@ class ActionPlanner(BasePlanner):
|
||||
)
|
||||
chat_context_description = f"你正在和 {chat_target_name} 私聊"
|
||||
|
||||
chat_content_block = ""
|
||||
if observed_messages_str:
|
||||
chat_content_block = f"\n{observed_messages_str}"
|
||||
else:
|
||||
chat_content_block = "你还未开始聊天"
|
||||
|
||||
action_options_block = ""
|
||||
# 根据聊天类型选择不同的动作prompt模板
|
||||
action_template_name = "action_prompt_private" if not is_group_chat else "action_prompt"
|
||||
|
||||
for using_actions_name, using_actions_info in current_available_actions.items():
|
||||
using_action_prompt = await global_prompt_manager.get_prompt_async(action_template_name)
|
||||
|
||||
if using_actions_info["parameters"]:
|
||||
param_text = "\n"
|
||||
for param_name, param_description in using_actions_info["parameters"].items():
|
||||
@@ -320,16 +252,7 @@ class ActionPlanner(BasePlanner):
|
||||
require_text += f"- {require_item}\n"
|
||||
require_text = require_text.rstrip("\n")
|
||||
|
||||
# 根据模板类型决定是否包含description参数
|
||||
if action_template_name == "action_prompt_private":
|
||||
# 私聊模板不包含description参数
|
||||
using_action_prompt = using_action_prompt.format(
|
||||
action_name=using_actions_name,
|
||||
action_parameters=param_text,
|
||||
action_require=require_text,
|
||||
)
|
||||
else:
|
||||
# 群聊模板包含description参数
|
||||
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
|
||||
using_action_prompt = using_action_prompt.format(
|
||||
action_name=using_actions_name,
|
||||
action_description=using_actions_info["description"],
|
||||
@@ -339,10 +262,8 @@ class ActionPlanner(BasePlanner):
|
||||
|
||||
action_options_block += using_action_prompt
|
||||
|
||||
# moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
|
||||
moderation_prompt_block = ""
|
||||
moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
|
||||
|
||||
# 获取当前时间
|
||||
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
@@ -353,13 +274,13 @@ class ActionPlanner(BasePlanner):
|
||||
bot_core_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}:"
|
||||
|
||||
# 根据聊天类型选择不同的prompt模板
|
||||
template_name = "simple_planner_prompt_private" if not is_group_chat else "simple_planner_prompt"
|
||||
planner_prompt_template = await global_prompt_manager.get_prompt_async(template_name)
|
||||
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
|
||||
prompt = planner_prompt_template.format(
|
||||
time_block=time_block,
|
||||
by_what=by_what,
|
||||
chat_context_description=chat_context_description,
|
||||
chat_content_block=chat_content_block,
|
||||
no_action_block=no_action_block,
|
||||
action_options_text=action_options_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
indentify_block=indentify_block,
|
||||
@@ -9,8 +9,8 @@ from src.common.logger import get_logger
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
|
||||
from src.chat.focus_chat.heartFC_sender import HeartFCSender
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.chat.message_receive.uni_message_sender import HeartFCSender
|
||||
from src.chat.utils.utils import get_chat_type_and_target_info
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
@@ -26,7 +26,7 @@ from src.person_info.person_info import get_person_info_manager
|
||||
from datetime import datetime
|
||||
import re
|
||||
from src.chat.knowledge.knowledge_lib import qa_manager
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
from src.chat.memory_system.memory_activator import MemoryActivator
|
||||
from src.tools.tool_executor import ToolExecutor
|
||||
|
||||
logger = get_logger("replyer")
|
||||
@@ -92,15 +92,12 @@ class DefaultReplyer:
|
||||
def __init__(
|
||||
self,
|
||||
chat_stream: ChatStream,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "focus.replyer",
|
||||
):
|
||||
self.log_prefix = "replyer"
|
||||
self.request_type = request_type
|
||||
|
||||
self.enable_tool = enable_tool
|
||||
|
||||
if model_configs:
|
||||
self.express_model_configs = model_configs
|
||||
else:
|
||||
@@ -170,9 +167,10 @@ class DefaultReplyer:
|
||||
self,
|
||||
reply_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
enable_tool: bool = True,
|
||||
enable_timeout: bool = False,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
@@ -186,7 +184,6 @@ class DefaultReplyer:
|
||||
if not reply_data:
|
||||
reply_data = {
|
||||
"reply_to": reply_to,
|
||||
"relation_info": relation_info,
|
||||
"extra_info": extra_info,
|
||||
}
|
||||
for key, value in reply_data.items():
|
||||
@@ -198,6 +195,8 @@ class DefaultReplyer:
|
||||
prompt = await self.build_prompt_reply_context(
|
||||
reply_data=reply_data, # 传递action_data
|
||||
available_actions=available_actions,
|
||||
enable_timeout=enable_timeout,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
@@ -218,7 +217,9 @@ class DefaultReplyer:
|
||||
request_type=self.request_type,
|
||||
)
|
||||
|
||||
if global_config.debug.show_prompt:
|
||||
logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n")
|
||||
|
||||
content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt)
|
||||
|
||||
logger.info(f"最终回复: {content}")
|
||||
@@ -255,8 +256,6 @@ class DefaultReplyer:
|
||||
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await self.build_prompt_rewrite_context(
|
||||
raw_reply=raw_reply,
|
||||
reason=reason,
|
||||
reply_data=reply_data,
|
||||
)
|
||||
|
||||
@@ -313,7 +312,7 @@ class DefaultReplyer:
|
||||
person_id = person_info_manager.get_person_id_by_person_name(sender)
|
||||
if not person_id:
|
||||
logger.warning(f"{self.log_prefix} 未找到用户 {sender} 的ID,跳过信息提取")
|
||||
return None
|
||||
return f"你完全不认识{sender},不理解ta的相关信息。"
|
||||
|
||||
relation_info = await relationship_fetcher.build_relation_info(person_id, text, chat_history)
|
||||
return relation_info
|
||||
@@ -369,13 +368,12 @@ class DefaultReplyer:
|
||||
for running_memory in running_memorys:
|
||||
memory_str += f"- {running_memory['content']}\n"
|
||||
memory_block = memory_str
|
||||
logger.info(f"{self.log_prefix} 添加了 {len(running_memorys)} 个激活的记忆到prompt")
|
||||
else:
|
||||
memory_block = ""
|
||||
|
||||
return memory_block
|
||||
|
||||
async def build_tool_info(self, reply_data=None, chat_history=None):
|
||||
async def build_tool_info(self, reply_data=None, chat_history=None, enable_tool: bool = True):
|
||||
"""构建工具信息块
|
||||
|
||||
Args:
|
||||
@@ -386,6 +384,9 @@ class DefaultReplyer:
|
||||
str: 工具信息字符串
|
||||
"""
|
||||
|
||||
if not enable_tool:
|
||||
return ""
|
||||
|
||||
if not reply_data:
|
||||
return ""
|
||||
|
||||
@@ -462,7 +463,21 @@ class DefaultReplyer:
|
||||
|
||||
return keywords_reaction_prompt
|
||||
|
||||
async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None) -> str:
|
||||
async def _time_and_run_task(self, coro, name: str):
|
||||
"""一个简单的帮助函数,用于计时和运行异步任务,返回任务名、结果和耗时"""
|
||||
start_time = time.time()
|
||||
result = await coro
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
return name, result, duration
|
||||
|
||||
async def build_prompt_reply_context(
|
||||
self,
|
||||
reply_data=None,
|
||||
available_actions: List[str] = None,
|
||||
enable_timeout: bool = False,
|
||||
enable_tool: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
构建回复器上下文
|
||||
|
||||
@@ -528,13 +543,34 @@ class DefaultReplyer:
|
||||
)
|
||||
|
||||
# 并行执行四个构建任务
|
||||
expression_habits_block, relation_info, memory_block, tool_info = await asyncio.gather(
|
||||
self.build_expression_habits(chat_talking_prompt_half, target),
|
||||
self.build_relation_info(reply_data, chat_talking_prompt_half),
|
||||
self.build_memory_block(chat_talking_prompt_half, target),
|
||||
self.build_tool_info(reply_data, chat_talking_prompt_half),
|
||||
task_results = await asyncio.gather(
|
||||
self._time_and_run_task(
|
||||
self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits"
|
||||
),
|
||||
self._time_and_run_task(
|
||||
self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info"
|
||||
),
|
||||
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_half, target), "build_memory_block"),
|
||||
self._time_and_run_task(
|
||||
self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info"
|
||||
),
|
||||
)
|
||||
|
||||
# 处理结果
|
||||
timing_logs = []
|
||||
results_dict = {}
|
||||
for name, result, duration in task_results:
|
||||
results_dict[name] = result
|
||||
timing_logs.append(f"{name}: {duration:.4f}s")
|
||||
if duration > 8:
|
||||
logger.warning(f"回复生成前信息获取耗时过长: {name} 耗时: {duration:.4f}s,请使用更快的模型")
|
||||
logger.info(f"回复生成前信息获取耗时: {'; '.join(timing_logs)}")
|
||||
|
||||
expression_habits_block = results_dict["build_expression_habits"]
|
||||
relation_info = results_dict["build_relation_info"]
|
||||
memory_block = results_dict["build_memory_block"]
|
||||
tool_info = results_dict["build_tool_info"]
|
||||
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
|
||||
if tool_info:
|
||||
@@ -617,10 +653,10 @@ class DefaultReplyer:
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方"
|
||||
)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async(
|
||||
chat_target_1 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private1", sender_name=chat_target_name
|
||||
)
|
||||
chat_target_2 = await global_prompt_manager.get_prompt_async(
|
||||
chat_target_2 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private2", sender_name=chat_target_name
|
||||
)
|
||||
|
||||
@@ -652,8 +688,6 @@ class DefaultReplyer:
|
||||
async def build_prompt_rewrite_context(
|
||||
self,
|
||||
reply_data: Dict[str, Any],
|
||||
raw_reply: str = "",
|
||||
reason: str = "",
|
||||
) -> str:
|
||||
chat_stream = self.chat_stream
|
||||
chat_id = chat_stream.stream_id
|
||||
@@ -662,6 +696,8 @@ class DefaultReplyer:
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
raw_reply = reply_data.get("raw_reply", "")
|
||||
reason = reply_data.get("reason", "")
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
|
||||
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
||||
@@ -747,10 +783,10 @@ class DefaultReplyer:
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方"
|
||||
)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async(
|
||||
chat_target_1 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private1", sender_name=chat_target_name
|
||||
)
|
||||
chat_target_2 = await global_prompt_manager.get_prompt_async(
|
||||
chat_target_2 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private2", sender_name=chat_target_name
|
||||
)
|
||||
|
||||
@@ -818,7 +854,7 @@ class DefaultReplyer:
|
||||
type = msg_text[0]
|
||||
data = msg_text[1]
|
||||
|
||||
if global_config.experimental.debug_show_chat_mode and type == "text":
|
||||
if global_config.debug.debug_show_chat_mode and type == "text":
|
||||
data += "ᶠ"
|
||||
|
||||
part_message_id = f"{thinking_id}_{i}"
|
||||
@@ -958,6 +994,11 @@ async def get_prompt_info(message: str, threshold: float):
|
||||
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
|
||||
# 从LPMM知识库获取知识
|
||||
try:
|
||||
# 检查LPMM知识库是否启用
|
||||
if qa_manager is None:
|
||||
logger.debug("LPMM知识库已禁用,跳过知识获取")
|
||||
return ""
|
||||
|
||||
found_knowledge_from_lpmm = qa_manager.get_knowledge(message)
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
@@ -14,7 +14,6 @@ class ReplyerManager:
|
||||
self,
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -50,7 +49,6 @@ class ReplyerManager:
|
||||
# model_configs 只在此时(初始化时)生效
|
||||
replyer = DefaultReplyer(
|
||||
chat_stream=target_stream,
|
||||
enable_tool=enable_tool,
|
||||
model_configs=model_configs, # 可以是None,此时使用默认模型
|
||||
request_type=request_type,
|
||||
)
|
||||
|
||||
@@ -1243,7 +1243,7 @@ class StatisticOutputTask(AsyncTask):
|
||||
focus_chat_rows = ""
|
||||
if stat_data[FOCUS_AVG_TIMES_BY_CHAT_ACTION]:
|
||||
# 获取前三个阶段(不包括执行动作)
|
||||
basic_stages = ["观察", "并行调整动作、处理", "规划器"]
|
||||
basic_stages = ["观察", "规划器"]
|
||||
existing_basic_stages = []
|
||||
for stage in basic_stages:
|
||||
# 检查是否有任何聊天流在这个阶段有数据
|
||||
@@ -1352,7 +1352,7 @@ class StatisticOutputTask(AsyncTask):
|
||||
focus_action_stage_rows = ""
|
||||
if stat_data[FOCUS_AVG_TIMES_BY_ACTION]:
|
||||
# 获取所有阶段(按固定顺序)
|
||||
stage_order = ["观察", "并行调整动作、处理", "规划器", "执行动作"]
|
||||
stage_order = ["观察", "规划器", "执行动作"]
|
||||
all_stages = []
|
||||
for stage in stage_order:
|
||||
if any(stage in stage_times for stage_times in stat_data[FOCUS_AVG_TIMES_BY_ACTION].values()):
|
||||
@@ -1618,7 +1618,7 @@ class StatisticOutputTask(AsyncTask):
|
||||
focus_version_stage_rows = ""
|
||||
if stat_data[FOCUS_AVG_TIMES_BY_VERSION]:
|
||||
# 基础三个阶段
|
||||
basic_stages = ["观察", "并行调整动作、处理", "规划器"]
|
||||
basic_stages = ["观察", "规划器"]
|
||||
|
||||
# 获取所有action类型用于执行时间列
|
||||
all_action_types_for_exec = set()
|
||||
|
||||
@@ -14,6 +14,9 @@ from src.llm_models.utils_model import LLMRequest
|
||||
from .typo_generator import ChineseTypoGenerator
|
||||
from ...config.config import global_config
|
||||
from ...common.message_repository import find_messages, count_messages
|
||||
from typing import Optional, Tuple, Dict
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
|
||||
|
||||
logger = get_logger("chat_utils")
|
||||
|
||||
@@ -47,7 +50,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
reply_probability = 0.0
|
||||
is_at = False
|
||||
is_mentioned = False
|
||||
|
||||
if message.is_mentioned is not None:
|
||||
return bool(message.is_mentioned), message.is_mentioned
|
||||
if (
|
||||
message.message_info.additional_config is not None
|
||||
and message.message_info.additional_config.get("is_mentioned") is not None
|
||||
@@ -80,7 +84,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
|
||||
if is_at and global_config.normal_chat.at_bot_inevitable_reply:
|
||||
reply_probability = 1.0
|
||||
logger.info("被@,回复概率设置为100%")
|
||||
logger.debug("被@,回复概率设置为100%")
|
||||
else:
|
||||
if not is_mentioned:
|
||||
# 判断是否被回复
|
||||
@@ -105,7 +109,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
is_mentioned = True
|
||||
if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply:
|
||||
reply_probability = 1.0
|
||||
logger.info("被提及,回复概率设置为100%")
|
||||
logger.debug("被提及,回复概率设置为100%")
|
||||
return is_mentioned, reply_probability
|
||||
|
||||
|
||||
@@ -637,3 +641,70 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
|
||||
else: # mode = "lite" or unknown
|
||||
# 只返回时分秒格式,喵~
|
||||
return time.strftime("%H:%M:%S", time.localtime(timestamp))
|
||||
|
||||
|
||||
def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]:
|
||||
"""
|
||||
获取聊天类型(是否群聊)和私聊对象信息。
|
||||
|
||||
Args:
|
||||
chat_id: 聊天流ID
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[Dict]]:
|
||||
- bool: 是否为群聊 (True 是群聊, False 是私聊或未知)
|
||||
- Optional[Dict]: 如果是私聊,包含对方信息的字典;否则为 None。
|
||||
字典包含: platform, user_id, user_nickname, person_id, person_name
|
||||
"""
|
||||
is_group_chat = False # Default to private/unknown
|
||||
chat_target_info = None
|
||||
|
||||
try:
|
||||
chat_stream = get_chat_manager().get_stream(chat_id)
|
||||
|
||||
if chat_stream:
|
||||
if chat_stream.group_info:
|
||||
is_group_chat = True
|
||||
chat_target_info = None # Explicitly None for group chat
|
||||
elif chat_stream.user_info: # It's a private chat
|
||||
is_group_chat = False
|
||||
user_info = chat_stream.user_info
|
||||
platform = chat_stream.platform
|
||||
user_id = user_info.user_id
|
||||
|
||||
# Initialize target_info with basic info
|
||||
target_info = {
|
||||
"platform": platform,
|
||||
"user_id": user_id,
|
||||
"user_nickname": user_info.user_nickname,
|
||||
"person_id": None,
|
||||
"person_name": None,
|
||||
}
|
||||
|
||||
# Try to fetch person info
|
||||
try:
|
||||
# Assume get_person_id is sync (as per original code), keep using to_thread
|
||||
person_id = PersonInfoManager.get_person_id(platform, user_id)
|
||||
person_name = None
|
||||
if person_id:
|
||||
# get_value is async, so await it directly
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_name = person_info_manager.get_value_sync(person_id, "person_name")
|
||||
|
||||
target_info["person_id"] = person_id
|
||||
target_info["person_name"] = person_name
|
||||
except Exception as person_e:
|
||||
logger.warning(
|
||||
f"获取 person_id 或 person_name 时出错 for {platform}:{user_id} in utils: {person_e}"
|
||||
)
|
||||
|
||||
chat_target_info = target_info
|
||||
else:
|
||||
logger.warning(f"无法获取 chat_stream for {chat_id} in utils")
|
||||
# Keep defaults: is_group_chat=False, chat_target_info=None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"获取聊天类型和目标信息时出错 for {chat_id}: {e}", exc_info=True)
|
||||
# Keep defaults on error
|
||||
|
||||
return is_group_chat, chat_target_info
|
||||
|
||||
@@ -178,12 +178,24 @@ class ImageManager:
|
||||
"""获取普通图片描述,带查重和保存功能"""
|
||||
try:
|
||||
# 计算图片哈希
|
||||
# 确保base64字符串只包含ASCII字符
|
||||
if isinstance(image_base64, str):
|
||||
image_base64 = image_base64.encode("ascii", errors="ignore").decode("ascii")
|
||||
image_bytes = base64.b64decode(image_base64)
|
||||
image_hash = hashlib.md5(image_bytes).hexdigest()
|
||||
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
|
||||
|
||||
# 检查图片是否已存在
|
||||
existing_image = Images.get_or_none(Images.emoji_hash == image_hash)
|
||||
if existing_image:
|
||||
# 更新计数
|
||||
if hasattr(existing_image, "count") and existing_image.count is not None:
|
||||
existing_image.count += 1
|
||||
else:
|
||||
existing_image.count = 1
|
||||
existing_image.save()
|
||||
|
||||
# 如果已有描述,直接返回
|
||||
if existing_image.description:
|
||||
return f"[图片:{existing_image.description}]"
|
||||
|
||||
# 查询缓存的描述
|
||||
cached_description = self._get_description_from_db(image_hash, "image")
|
||||
@@ -192,6 +204,7 @@ class ImageManager:
|
||||
return f"[图片:{cached_description}]"
|
||||
|
||||
# 调用AI获取描述
|
||||
image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
|
||||
prompt = "请用中文描述这张图片的内容。如果有文字,请把文字都描述出来,请留意其主题,直观感受,输出为一段平文本,最多50字"
|
||||
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
|
||||
|
||||
@@ -199,17 +212,7 @@ class ImageManager:
|
||||
logger.warning("AI未能生成图片描述")
|
||||
return "[图片(描述生成失败)]"
|
||||
|
||||
# 再次检查缓存
|
||||
cached_description = self._get_description_from_db(image_hash, "image")
|
||||
if cached_description:
|
||||
logger.warning(f"虽然生成了描述,但是找到缓存图片描述 {cached_description}")
|
||||
return f"[图片:{cached_description}]"
|
||||
|
||||
logger.debug(f"描述是{description}")
|
||||
|
||||
# 根据配置决定是否保存图片
|
||||
|
||||
# 生成文件名和路径
|
||||
# 保存图片和描述
|
||||
current_timestamp = time.time()
|
||||
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
|
||||
image_dir = os.path.join(self.IMAGE_DIR, "image")
|
||||
@@ -221,26 +224,31 @@ class ImageManager:
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(image_bytes)
|
||||
|
||||
# 保存到数据库 (Images表)
|
||||
try:
|
||||
img_obj = Images.get((Images.emoji_hash == image_hash) & (Images.type == "image"))
|
||||
img_obj.path = file_path
|
||||
img_obj.description = description
|
||||
img_obj.timestamp = current_timestamp
|
||||
img_obj.save()
|
||||
except Images.DoesNotExist:
|
||||
# 保存到数据库,补充缺失字段
|
||||
if existing_image:
|
||||
existing_image.path = file_path
|
||||
existing_image.description = description
|
||||
existing_image.timestamp = current_timestamp
|
||||
if not hasattr(existing_image, "image_id") or not existing_image.image_id:
|
||||
existing_image.image_id = str(uuid.uuid4())
|
||||
if not hasattr(existing_image, "vlm_processed") or existing_image.vlm_processed is None:
|
||||
existing_image.vlm_processed = True
|
||||
existing_image.save()
|
||||
else:
|
||||
Images.create(
|
||||
image_id=str(uuid.uuid4()),
|
||||
emoji_hash=image_hash,
|
||||
path=file_path,
|
||||
type="image",
|
||||
description=description,
|
||||
timestamp=current_timestamp,
|
||||
vlm_processed=True,
|
||||
count=1,
|
||||
)
|
||||
logger.debug(f"保存图片元数据: {file_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"保存图片文件或元数据失败: {str(e)}")
|
||||
|
||||
# 保存描述到数据库 (ImageDescriptions表)
|
||||
# 保存描述到ImageDescriptions表
|
||||
self._save_description_to_db(image_hash, description, "image")
|
||||
|
||||
return f"[图片:{description}]"
|
||||
@@ -403,7 +411,16 @@ class ImageManager:
|
||||
or existing_image.vlm_processed is None
|
||||
):
|
||||
logger.debug(f"图片记录缺少必要字段,补全旧记录: {image_hash}")
|
||||
image_id = str(uuid.uuid4())
|
||||
if not existing_image.image_id:
|
||||
existing_image.image_id = str(uuid.uuid4())
|
||||
if existing_image.count is None:
|
||||
existing_image.count = 0
|
||||
if existing_image.vlm_processed is None:
|
||||
existing_image.vlm_processed = False
|
||||
|
||||
existing_image.count += 1
|
||||
existing_image.save()
|
||||
return existing_image.image_id, f"[picid:{existing_image.image_id}]"
|
||||
else:
|
||||
# print(f"图片已存在: {existing_image.image_id}")
|
||||
# print(f"图片描述: {existing_image.description}")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.observation.chatting_observation import ChattingObservation
|
||||
from src.chat.focus_chat.observation.observation import Observation
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
@@ -7,9 +7,8 @@ import traceback
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from .base_processor import BaseProcessor
|
||||
from typing import List
|
||||
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
|
||||
from src.chat.focus_chat.observation.working_observation import WorkingMemoryObservation
|
||||
from src.chat.focus_chat.working_memory.working_memory import WorkingMemory
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from json_repair import repair_json
|
||||
@@ -44,12 +43,10 @@ def init_prompt():
|
||||
Prompt(memory_proces_prompt, "prompt_memory_proces")
|
||||
|
||||
|
||||
class WorkingMemoryProcessor(BaseProcessor):
|
||||
class WorkingMemoryProcessor:
|
||||
log_prefix = "工作记忆"
|
||||
|
||||
def __init__(self, subheartflow_id: str):
|
||||
super().__init__()
|
||||
|
||||
self.subheartflow_id = subheartflow_id
|
||||
|
||||
self.llm_model = LLMRequest(
|
||||
@@ -71,6 +68,7 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
"""
|
||||
working_memory = None
|
||||
chat_info = ""
|
||||
chat_obs = None
|
||||
try:
|
||||
for observation in observations:
|
||||
if isinstance(observation, WorkingMemoryObservation):
|
||||
@@ -79,10 +77,15 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
chat_info = observation.get_observe_info()
|
||||
chat_obs = observation
|
||||
# 检查是否有待压缩内容
|
||||
if chat_obs.compressor_prompt:
|
||||
if chat_obs and chat_obs.compressor_prompt:
|
||||
logger.debug(f"{self.log_prefix} 压缩聊天记忆")
|
||||
await self.compress_chat_memory(working_memory, chat_obs)
|
||||
|
||||
# 检查working_memory是否为None
|
||||
if working_memory is None:
|
||||
logger.debug(f"{self.log_prefix} 没有找到工作记忆观察,跳过处理")
|
||||
return []
|
||||
|
||||
all_memory = working_memory.get_all_memories()
|
||||
if not all_memory:
|
||||
logger.debug(f"{self.log_prefix} 目前没有工作记忆,跳过提取")
|
||||
@@ -183,6 +186,11 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
working_memory: 工作记忆对象
|
||||
obs: 聊天观察对象
|
||||
"""
|
||||
# 检查working_memory是否为None
|
||||
if working_memory is None:
|
||||
logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法压缩聊天记忆")
|
||||
return
|
||||
|
||||
try:
|
||||
summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt)
|
||||
if not summary_result:
|
||||
@@ -235,6 +243,11 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
memory_id1: 第一个记忆ID
|
||||
memory_id2: 第二个记忆ID
|
||||
"""
|
||||
# 检查working_memory是否为None
|
||||
if working_memory is None:
|
||||
logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法合并记忆")
|
||||
return
|
||||
|
||||
try:
|
||||
merged_memory = await working_memory.merge_memory(memory_id1, memory_id2)
|
||||
logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}")
|
||||
@@ -340,20 +340,18 @@ MODULE_COLORS = {
|
||||
"memory": "\033[34m",
|
||||
"hfc": "\033[96m",
|
||||
"base_action": "\033[96m",
|
||||
"action_manager": "\033[34m",
|
||||
"action_manager": "\033[32m",
|
||||
# 关系系统
|
||||
"relation": "\033[38;5;201m", # 深粉色
|
||||
# 聊天相关模块
|
||||
"normal_chat": "\033[38;5;81m", # 亮蓝绿色
|
||||
"normal_chat_response": "\033[38;5;123m", # 青绿色
|
||||
"normal_chat_expressor": "\033[38;5;117m", # 浅蓝色
|
||||
"normal_chat_action_modifier": "\033[38;5;111m", # 蓝色
|
||||
"normal_chat_planner": "\033[38;5;75m", # 浅蓝色
|
||||
"heartflow": "\033[38;5;213m", # 粉色
|
||||
"heartflow_utils": "\033[38;5;219m", # 浅粉色
|
||||
"sub_heartflow": "\033[38;5;207m", # 粉紫色
|
||||
"subheartflow_manager": "\033[38;5;201m", # 深粉色
|
||||
"observation": "\033[38;5;141m", # 紫色
|
||||
"background_tasks": "\033[38;5;240m", # 灰色
|
||||
"chat_message": "\033[38;5;45m", # 青色
|
||||
"chat_stream": "\033[38;5;51m", # 亮青色
|
||||
@@ -362,7 +360,6 @@ MODULE_COLORS = {
|
||||
# 专注聊天模块
|
||||
"replyer": "\033[38;5;166m", # 橙色
|
||||
"expressor": "\033[38;5;172m", # 黄橙色
|
||||
"planner_factory": "\033[38;5;178m", # 黄色
|
||||
"processor": "\033[38;5;184m", # 黄绿色
|
||||
"base_processor": "\033[38;5;190m", # 绿黄色
|
||||
"working_memory": "\033[38;5;22m", # 深绿色
|
||||
@@ -370,6 +367,7 @@ MODULE_COLORS = {
|
||||
# 插件系统
|
||||
"plugin_manager": "\033[38;5;208m", # 红色
|
||||
"base_plugin": "\033[38;5;202m", # 橙红色
|
||||
"send_api": "\033[38;5;208m", # 橙色
|
||||
"base_command": "\033[38;5;208m", # 橙色
|
||||
"component_registry": "\033[38;5;214m", # 橙黄色
|
||||
"stream_api": "\033[38;5;220m", # 黄色
|
||||
@@ -388,10 +386,8 @@ MODULE_COLORS = {
|
||||
"willing": "\033[38;5;147m", # 浅紫色
|
||||
# 工具模块
|
||||
"tool_use": "\033[38;5;64m", # 深绿色
|
||||
"tool_executor": "\033[38;5;64m", # 深绿色
|
||||
"base_tool": "\033[38;5;70m", # 绿色
|
||||
"compare_numbers_tool": "\033[38;5;76m", # 浅绿色
|
||||
"change_mood_tool": "\033[38;5;82m", # 绿色
|
||||
"relationship_tool": "\033[38;5;88m", # 深红色
|
||||
# 工具和实用模块
|
||||
"prompt": "\033[38;5;99m", # 紫色
|
||||
"prompt_build": "\033[38;5;105m", # 紫色
|
||||
@@ -417,6 +413,8 @@ MODULE_COLORS = {
|
||||
"confirm": "\033[1;93m", # 黄色+粗体
|
||||
# 模型相关
|
||||
"model_utils": "\033[38;5;164m", # 紫红色
|
||||
"relationship_fetcher": "\033[38;5;170m", # 浅紫色
|
||||
"relationship_builder": "\033[38;5;117m", # 浅蓝色
|
||||
}
|
||||
|
||||
RESET_COLOR = "\033[0m"
|
||||
|
||||
@@ -35,6 +35,7 @@ from src.config.official_configs import (
|
||||
LPMMKnowledgeConfig,
|
||||
RelationshipConfig,
|
||||
ToolConfig,
|
||||
DebugConfig,
|
||||
)
|
||||
|
||||
install(extra_lines=3)
|
||||
@@ -50,7 +51,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
|
||||
|
||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||
# 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/
|
||||
MMC_VERSION = "0.8.1-snapshot.1"
|
||||
MMC_VERSION = "0.8.2-snapshot.1"
|
||||
|
||||
|
||||
def update_config():
|
||||
@@ -165,6 +166,7 @@ class Config(ConfigBase):
|
||||
maim_message: MaimMessageConfig
|
||||
lpmm_knowledge: LPMMKnowledgeConfig
|
||||
tool: ToolConfig
|
||||
debug: DebugConfig
|
||||
|
||||
|
||||
def load_config(config_path: str) -> Config:
|
||||
|
||||
@@ -84,6 +84,9 @@ class ChatConfig(ConfigBase):
|
||||
选择普通模型的概率为 1 - reasoning_normal_model_probability
|
||||
"""
|
||||
|
||||
thinking_timeout: int = 30
|
||||
"""麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)"""
|
||||
|
||||
talk_frequency: float = 1
|
||||
"""回复频率阈值"""
|
||||
|
||||
@@ -270,24 +273,12 @@ class MessageReceiveConfig(ConfigBase):
|
||||
class NormalChatConfig(ConfigBase):
|
||||
"""普通聊天配置类"""
|
||||
|
||||
message_buffer: bool = False
|
||||
"""消息缓冲器"""
|
||||
|
||||
emoji_chance: float = 0.2
|
||||
"""发送表情包的基础概率"""
|
||||
|
||||
thinking_timeout: int = 120
|
||||
"""最长思考时间"""
|
||||
|
||||
willing_mode: str = "classical"
|
||||
"""意愿模式"""
|
||||
|
||||
response_interested_rate_amplifier: float = 1.0
|
||||
"""回复兴趣度放大系数"""
|
||||
|
||||
emoji_response_penalty: float = 0.0
|
||||
"""表情包回复惩罚系数"""
|
||||
|
||||
mentioned_bot_inevitable_reply: bool = False
|
||||
"""提及 bot 必然回复"""
|
||||
|
||||
@@ -302,21 +293,12 @@ class NormalChatConfig(ConfigBase):
|
||||
class FocusChatConfig(ConfigBase):
|
||||
"""专注聊天配置类"""
|
||||
|
||||
compressed_length: int = 5
|
||||
"""心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5"""
|
||||
|
||||
compress_length_limit: int = 5
|
||||
"""最多压缩份数,超过该数值的压缩上下文会被删除"""
|
||||
|
||||
think_interval: float = 1
|
||||
"""思考间隔(秒)"""
|
||||
|
||||
consecutive_replies: float = 1
|
||||
"""连续回复能力,值越高,麦麦连续回复的概率越高"""
|
||||
|
||||
working_memory_processor: bool = False
|
||||
"""是否启用工作记忆处理器"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExpressionConfig(ConfigBase):
|
||||
@@ -356,6 +338,12 @@ class ToolConfig(ConfigBase):
|
||||
class EmojiConfig(ConfigBase):
|
||||
"""表情包配置类"""
|
||||
|
||||
emoji_chance: float = 0.6
|
||||
"""发送表情包的基础概率"""
|
||||
|
||||
emoji_activate_type: str = "random"
|
||||
"""表情包激活类型,可选:random,llm,random下,表情包动作随机启用,llm下,表情包动作根据llm判断是否启用"""
|
||||
|
||||
max_reg_num: int = 200
|
||||
"""表情包最大注册数量"""
|
||||
|
||||
@@ -543,12 +531,20 @@ class TelemetryConfig(ConfigBase):
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalConfig(ConfigBase):
|
||||
"""实验功能配置类"""
|
||||
class DebugConfig(ConfigBase):
|
||||
"""调试配置类"""
|
||||
|
||||
debug_show_chat_mode: bool = False
|
||||
"""是否在回复后显示当前聊天模式"""
|
||||
|
||||
show_prompt: bool = False
|
||||
"""是否显示prompt"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalConfig(ConfigBase):
|
||||
"""实验功能配置类"""
|
||||
|
||||
enable_friend_chat: bool = False
|
||||
"""是否启用好友聊天"""
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.message_receive.message import Message
|
||||
from maim_message import UserInfo, Seg
|
||||
from src.chat.message_receive.message import MessageSending, MessageSet
|
||||
from src.chat.message_receive.message_sender import message_manager
|
||||
from src.chat.message_receive.normal_message_sender import message_manager
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.config.config import global_config
|
||||
from rich.traceback import install
|
||||
|
||||
@@ -35,6 +35,11 @@ class KnowledgeFetcher:
|
||||
|
||||
logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识")
|
||||
try:
|
||||
# 检查LPMM知识库是否启用
|
||||
if qa_manager is None:
|
||||
logger.debug(f"[私聊][{self.private_name}]LPMM知识库已禁用,跳过知识获取")
|
||||
return "未找到匹配的知识"
|
||||
|
||||
knowledge_info = qa_manager.get_knowledge(query)
|
||||
logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}")
|
||||
return knowledge_info
|
||||
|
||||
@@ -10,8 +10,7 @@ from src.manager.mood_manager import MoodPrintTask, MoodUpdateTask
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
from src.chat.normal_chat.willing.willing_manager import get_willing_manager
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.heart_flow.heartflow import heartflow
|
||||
from src.chat.message_receive.message_sender import message_manager
|
||||
from src.chat.message_receive.normal_message_sender import message_manager
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.bot import chat_bot
|
||||
@@ -142,10 +141,6 @@ class MainSystem:
|
||||
await message_manager.start()
|
||||
logger.info("全局消息管理器启动成功")
|
||||
|
||||
# 启动心流系统主循环
|
||||
asyncio.create_task(heartflow.heartflow_start_working())
|
||||
logger.info("心流系统启动成功")
|
||||
|
||||
init_time = int(1000 * (time.time() - init_start_time))
|
||||
logger.info(f"初始化完成,神经元放电{init_time}次")
|
||||
except Exception as e:
|
||||
|
||||
@@ -77,8 +77,6 @@ class MessageSenderContainer:
|
||||
msg_id = f"{current_time}_{random.randint(1000, 9999)}"
|
||||
|
||||
text_to_send = chunk
|
||||
if global_config.experimental.debug_show_chat_mode:
|
||||
text_to_send += "ⁿ"
|
||||
|
||||
message_segment = Seg(type="text", data=text_to_send)
|
||||
bot_message = MessageSending(
|
||||
@@ -165,6 +163,9 @@ class S4UChat:
|
||||
|
||||
self._is_replying = False
|
||||
self.gpt = S4UStreamGenerator()
|
||||
self.interest_dict: Dict[str, float] = {} # 用户兴趣分
|
||||
self.at_bot_priority_bonus = 100.0 # @机器人的优先级加成
|
||||
self.normal_queue_max_size = 50 # 普通队列最大容量
|
||||
logger.info(f"[{self.stream_name}] S4UChat with two-queue system initialized.")
|
||||
|
||||
def _is_vip(self, message: MessageRecv) -> bool:
|
||||
@@ -196,7 +197,7 @@ class S4UChat:
|
||||
async def add_message(self, message: MessageRecv) -> None:
|
||||
"""根据VIP状态和中断逻辑将消息放入相应队列。"""
|
||||
is_vip = self._is_vip(message)
|
||||
self._get_message_priority(message)
|
||||
new_priority_score = self._calculate_base_priority_score(message)
|
||||
|
||||
should_interrupt = False
|
||||
if self._current_generation_task and not self._current_generation_task.done():
|
||||
@@ -218,11 +219,11 @@ class S4UChat:
|
||||
new_sender_id = message.message_info.user_info.user_id
|
||||
current_sender_id = current_msg.message_info.user_info.user_id
|
||||
# 新消息优先级更高
|
||||
if new_priority_score > current_priority_score:
|
||||
if new_priority_score > current_priority:
|
||||
should_interrupt = True
|
||||
logger.info(f"[{self.stream_name}] New normal message has higher priority, interrupting.")
|
||||
# 同用户,新消息的优先级不能更低
|
||||
elif new_sender_id == current_sender_id and new_priority_score >= current_priority_score:
|
||||
elif new_sender_id == current_sender_id and new_priority_score >= current_priority:
|
||||
should_interrupt = True
|
||||
logger.info(f"[{self.stream_name}] Same user sent new message, interrupting.")
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ class RelationshipBuilderManager:
|
||||
"""
|
||||
if chat_id not in self.builders:
|
||||
self.builders[chat_id] = RelationshipBuilder(chat_id)
|
||||
logger.info(f"创建聊天 {chat_id} 的关系构建器")
|
||||
logger.debug(f"创建聊天 {chat_id} 的关系构建器")
|
||||
|
||||
return self.builders[chat_id]
|
||||
|
||||
@@ -51,7 +51,7 @@ class RelationshipBuilderManager:
|
||||
"""
|
||||
if chat_id in self.builders:
|
||||
del self.builders[chat_id]
|
||||
logger.info(f"移除聊天 {chat_id} 的关系构建器")
|
||||
logger.debug(f"移除聊天 {chat_id} 的关系构建器")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from typing import List, Dict
|
||||
from json_repair import repair_json
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
import json
|
||||
|
||||
import random
|
||||
|
||||
logger = get_logger("relationship_fetcher")
|
||||
|
||||
@@ -70,14 +70,14 @@ class RelationshipFetcher:
|
||||
|
||||
# LLM模型配置
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.model.relation,
|
||||
request_type="relation",
|
||||
model=global_config.model.utils_small,
|
||||
request_type="relation.fetcher",
|
||||
)
|
||||
|
||||
# 小模型用于即时信息提取
|
||||
self.instant_llm_model = LLMRequest(
|
||||
model=global_config.model.utils_small,
|
||||
request_type="relation.instant",
|
||||
request_type="relation.fetch",
|
||||
)
|
||||
|
||||
name = get_chat_manager().get_stream_name(self.chat_id)
|
||||
@@ -101,12 +101,72 @@ class RelationshipFetcher:
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
short_impression = await person_info_manager.get_value(person_id, "short_impression")
|
||||
|
||||
nickname_str = await person_info_manager.get_value(person_id, "nickname")
|
||||
platform = await person_info_manager.get_value(person_id, "platform")
|
||||
|
||||
if person_name == nickname_str and not short_impression:
|
||||
return ""
|
||||
|
||||
current_points = await person_info_manager.get_value(person_id, "points") or []
|
||||
|
||||
if isinstance(current_points, str):
|
||||
try:
|
||||
current_points = json.loads(current_points)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"解析points JSON失败: {current_points}")
|
||||
current_points = []
|
||||
elif not isinstance(current_points, list):
|
||||
current_points = []
|
||||
|
||||
# 按时间排序forgotten_points
|
||||
current_points.sort(key=lambda x: x[2])
|
||||
# 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大
|
||||
if len(current_points) > 3:
|
||||
# point[1] 取值范围1-10,直接作为权重
|
||||
weights = [max(1, min(10, int(point[1]))) for point in current_points]
|
||||
points = random.choices(current_points, weights=weights, k=3)
|
||||
else:
|
||||
points = current_points
|
||||
|
||||
# 构建points文本
|
||||
points_text = "\n".join([f"{point[2]}:{point[0]}" for point in points])
|
||||
|
||||
info_type = await self._build_fetch_query(person_id, target_message, chat_history)
|
||||
if info_type:
|
||||
await self._extract_single_info(person_id, info_type, person_name)
|
||||
|
||||
relation_info = self._organize_known_info()
|
||||
relation_info = f"你对{person_name}的印象是:{short_impression}\n{relation_info}"
|
||||
|
||||
nickname_str = ""
|
||||
if person_name != nickname_str:
|
||||
nickname_str = f"(ta在{platform}上的昵称是{nickname_str})"
|
||||
|
||||
if short_impression and relation_info:
|
||||
if points_text:
|
||||
relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}。你还记得ta最近做的事:{points_text}"
|
||||
else:
|
||||
relation_info = (
|
||||
f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}"
|
||||
)
|
||||
elif short_impression:
|
||||
if points_text:
|
||||
relation_info = (
|
||||
f"你对{person_name}的印象是{nickname_str}:{short_impression}。你还记得ta最近做的事:{points_text}"
|
||||
)
|
||||
else:
|
||||
relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}"
|
||||
elif relation_info:
|
||||
if points_text:
|
||||
relation_info = (
|
||||
f"你对{person_name}的了解{nickname_str}:{relation_info}。你还记得ta最近做的事:{points_text}"
|
||||
)
|
||||
else:
|
||||
relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}"
|
||||
elif points_text:
|
||||
relation_info = f"你记得{person_name}{nickname_str}最近做的事:{points_text}"
|
||||
else:
|
||||
relation_info = ""
|
||||
|
||||
return relation_info
|
||||
|
||||
async def _build_fetch_query(self, person_id, target_message, chat_history):
|
||||
@@ -134,7 +194,7 @@ class RelationshipFetcher:
|
||||
|
||||
# 检查是否返回了不需要查询的标志
|
||||
if "none" in content_json:
|
||||
logger.info(f"{self.log_prefix} LLM判断当前不需要查询任何信息:{content_json.get('none', '')}")
|
||||
logger.debug(f"{self.log_prefix} LLM判断当前不需要查询任何信息:{content_json.get('none', '')}")
|
||||
return None
|
||||
|
||||
info_type = content_json.get("info_type")
|
||||
|
||||
@@ -125,6 +125,30 @@ class RelationshipManager:
|
||||
return ""
|
||||
short_impression = await person_info_manager.get_value(person_id, "short_impression")
|
||||
|
||||
current_points = await person_info_manager.get_value(person_id, "points") or []
|
||||
print(f"current_points: {current_points}")
|
||||
if isinstance(current_points, str):
|
||||
try:
|
||||
current_points = json.loads(current_points)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"解析points JSON失败: {current_points}")
|
||||
current_points = []
|
||||
elif not isinstance(current_points, list):
|
||||
current_points = []
|
||||
|
||||
# 按时间排序forgotten_points
|
||||
current_points.sort(key=lambda x: x[2])
|
||||
# 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大
|
||||
if len(current_points) > 3:
|
||||
# point[1] 取值范围1-10,直接作为权重
|
||||
weights = [max(1, min(10, int(point[1]))) for point in current_points]
|
||||
points = random.choices(current_points, weights=weights, k=3)
|
||||
else:
|
||||
points = current_points
|
||||
|
||||
# 构建points文本
|
||||
points_text = "\n".join([f"{point[2]}:{point[0]}\n" for point in points])
|
||||
|
||||
nickname_str = await person_info_manager.get_value(person_id, "nickname")
|
||||
platform = await person_info_manager.get_value(person_id, "platform")
|
||||
|
||||
@@ -137,7 +161,10 @@ class RelationshipManager:
|
||||
relation_prompt = f"'{person_name}' ,ta在{platform}上的昵称是{nickname_str}。"
|
||||
|
||||
if short_impression:
|
||||
relation_prompt += f"你对ta的印象是:{short_impression}。"
|
||||
relation_prompt += f"你对ta的印象是:{short_impression}。\n"
|
||||
|
||||
if points_text:
|
||||
relation_prompt += f"你记得ta最近做的事:{points_text}"
|
||||
|
||||
return relation_prompt
|
||||
|
||||
@@ -241,16 +268,16 @@ class RelationshipManager:
|
||||
"weight": 10
|
||||
}},
|
||||
{{
|
||||
"point": "我让{person_name}帮我写作业,他拒绝了",
|
||||
"weight": 4
|
||||
"point": "我让{person_name}帮我写化学作业,他拒绝了,我感觉他对我有意见,或者ta不喜欢我",
|
||||
"weight": 3
|
||||
}},
|
||||
{{
|
||||
"point": "{person_name}居然搞错了我的名字,生气了",
|
||||
"point": "{person_name}居然搞错了我的名字,我感到生气了,之后不理ta了",
|
||||
"weight": 8
|
||||
}},
|
||||
{{
|
||||
"point": "{person_name}喜欢吃辣,我和她关系不错",
|
||||
"weight": 8
|
||||
"point": "{person_name}喜欢吃辣,具体来说,没有辣的食物ta都不喜欢吃,可能是因为ta是湖南人。",
|
||||
"weight": 7
|
||||
}}
|
||||
}}
|
||||
|
||||
@@ -456,7 +483,7 @@ class RelationshipManager:
|
||||
你对{person_name}的了解是:
|
||||
{compressed_summary}
|
||||
|
||||
请你用一句话概括你对{person_name}的了解。突出:
|
||||
请你概括你对{person_name}的了解。突出:
|
||||
1.对{person_name}的直观印象
|
||||
2.{global_config.bot.nickname}与{person_name}的关系
|
||||
3.{person_name}的关键信息
|
||||
@@ -487,8 +514,8 @@ class RelationshipManager:
|
||||
2. **好感度 (liking_value)**: 0-100的整数,表示这些信息让你对ta的喜。
|
||||
- 0: 非常厌恶
|
||||
- 25: 有点反感
|
||||
- 50: 中立/无感
|
||||
- 75: 有点喜欢
|
||||
- 50: 中立/无感(或者文本中无法明显看出)
|
||||
- 75: 喜欢这个人
|
||||
- 100: 非常喜欢/开心对这个人
|
||||
|
||||
请严格按照json格式输出,不要有其他多余内容:
|
||||
|
||||
@@ -17,7 +17,6 @@ from src.common.logger import get_logger
|
||||
|
||||
# 导入依赖
|
||||
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
|
||||
from src.chat.focus_chat.info.obs_info import ObsInfo
|
||||
|
||||
logger = get_logger("chat_api")
|
||||
|
||||
@@ -193,39 +192,6 @@ class ChatManager:
|
||||
logger.error(f"[ChatAPI] 获取聊天流信息失败: {e}")
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def get_recent_messages_from_obs(observations: List[Any], count: int = 5) -> List[Dict[str, Any]]:
|
||||
"""从观察对象获取最近的消息
|
||||
|
||||
Args:
|
||||
observations: 观察对象列表
|
||||
count: 要获取的消息数量
|
||||
|
||||
Returns:
|
||||
List[Dict]: 消息列表,每个消息包含发送者、内容等信息
|
||||
"""
|
||||
messages = []
|
||||
|
||||
try:
|
||||
if observations and len(observations) > 0:
|
||||
obs = observations[0]
|
||||
if hasattr(obs, "get_talking_message"):
|
||||
obs: ObsInfo
|
||||
raw_messages = obs.get_talking_message()
|
||||
# 转换为简化格式
|
||||
for msg in raw_messages[-count:]:
|
||||
simple_msg = {
|
||||
"sender": msg.get("sender", "未知"),
|
||||
"content": msg.get("content", ""),
|
||||
"timestamp": msg.get("timestamp", 0),
|
||||
}
|
||||
messages.append(simple_msg)
|
||||
logger.debug(f"[ChatAPI] 获取到 {len(messages)} 条最近消息")
|
||||
except Exception as e:
|
||||
logger.error(f"[ChatAPI] 获取最近消息失败: {e}")
|
||||
|
||||
return messages
|
||||
|
||||
@staticmethod
|
||||
def get_streams_summary() -> Dict[str, int]:
|
||||
"""获取聊天流统计摘要
|
||||
|
||||
@@ -374,7 +374,7 @@ async def store_action_info(
|
||||
)
|
||||
|
||||
if saved_record:
|
||||
logger.info(f"[DatabaseAPI] 成功存储动作信息: {action_name} (ID: {record_data['action_id']})")
|
||||
logger.debug(f"[DatabaseAPI] 成功存储动作信息: {action_name} (ID: {record_data['action_id']})")
|
||||
else:
|
||||
logger.error(f"[DatabaseAPI] 存储动作信息失败: {action_name}")
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ async def get_by_description(description: str) -> Optional[Tuple[str, str, str]]
|
||||
Optional[Tuple[str, str, str]]: (base64编码, 表情包描述, 匹配的情感标签) 或 None
|
||||
"""
|
||||
try:
|
||||
logger.info(f"[EmojiAPI] 根据描述获取表情包: {description}")
|
||||
logger.debug(f"[EmojiAPI] 根据描述获取表情包: {description}")
|
||||
|
||||
emoji_manager = get_emoji_manager()
|
||||
emoji_result = await emoji_manager.get_emoji_for_text(description)
|
||||
@@ -47,7 +47,7 @@ async def get_by_description(description: str) -> Optional[Tuple[str, str, str]]
|
||||
logger.error(f"[EmojiAPI] 无法将表情包文件转换为base64: {emoji_path}")
|
||||
return None
|
||||
|
||||
logger.info(f"[EmojiAPI] 成功获取表情包: {emoji_description}, 匹配情感: {matched_emotion}")
|
||||
logger.debug(f"[EmojiAPI] 成功获取表情包: {emoji_description}, 匹配情感: {matched_emotion}")
|
||||
return emoji_base64, emoji_description, matched_emotion
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -27,7 +27,6 @@ logger = get_logger("generator_api")
|
||||
def get_replyer(
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -52,7 +51,6 @@ def get_replyer(
|
||||
chat_id=chat_id,
|
||||
model_configs=model_configs,
|
||||
request_type=request_type,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True)
|
||||
@@ -70,7 +68,6 @@ async def generate_reply(
|
||||
chat_id: str = None,
|
||||
action_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
enable_tool: bool = False,
|
||||
@@ -79,6 +76,7 @@ async def generate_reply(
|
||||
return_prompt: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "",
|
||||
enable_timeout: bool = False,
|
||||
) -> Tuple[bool, List[Tuple[str, Any]]]:
|
||||
"""生成回复
|
||||
|
||||
@@ -94,28 +92,27 @@ async def generate_reply(
|
||||
"""
|
||||
try:
|
||||
# 获取回复器
|
||||
replyer = get_replyer(
|
||||
chat_stream, chat_id, model_configs=model_configs, request_type=request_type, enable_tool=enable_tool
|
||||
)
|
||||
replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type)
|
||||
if not replyer:
|
||||
logger.error("[GeneratorAPI] 无法获取回复器")
|
||||
return False, []
|
||||
|
||||
logger.info("[GeneratorAPI] 开始生成回复")
|
||||
logger.debug("[GeneratorAPI] 开始生成回复")
|
||||
|
||||
# 调用回复器生成回复
|
||||
success, content, prompt = await replyer.generate_reply_with_context(
|
||||
reply_data=action_data or {},
|
||||
reply_to=reply_to,
|
||||
relation_info=relation_info,
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
enable_timeout=enable_timeout,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
|
||||
reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
|
||||
if success:
|
||||
logger.info(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
else:
|
||||
logger.warning("[GeneratorAPI] 回复生成失败")
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ from src.common.logger import get_logger
|
||||
|
||||
# 导入依赖
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.focus_chat.heartFC_sender import HeartFCSender
|
||||
from src.chat.message_receive.uni_message_sender import HeartFCSender
|
||||
from src.chat.message_receive.message import MessageSending, MessageRecv
|
||||
from src.chat.utils.chat_message_builder import get_raw_msg_before_timestamp_with_chat
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
@@ -66,7 +66,7 @@ async def _send_to_target(
|
||||
bool: 是否发送成功
|
||||
"""
|
||||
try:
|
||||
logger.info(f"[SendAPI] 发送{message_type}消息到 {stream_id}")
|
||||
logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}")
|
||||
|
||||
# 查找目标聊天流
|
||||
target_stream = get_chat_manager().get_stream(stream_id)
|
||||
@@ -116,7 +116,7 @@ async def _send_to_target(
|
||||
)
|
||||
|
||||
if sent_msg:
|
||||
logger.info(f"[SendAPI] 成功发送消息到 {stream_id}")
|
||||
logger.debug(f"[SendAPI] 成功发送消息到 {stream_id}")
|
||||
return True
|
||||
else:
|
||||
logger.error("[SendAPI] 发送消息失败")
|
||||
|
||||
@@ -44,7 +44,6 @@ class BaseAction(ABC):
|
||||
reasoning: 执行该动作的理由
|
||||
cycle_timers: 计时器字典
|
||||
thinking_id: 思考ID
|
||||
observations: 观察列表
|
||||
expressor: 表达器对象
|
||||
replyer: 回复器对象
|
||||
chat_stream: 聊天流对象
|
||||
|
||||
@@ -18,7 +18,7 @@ class EmojiAction(BaseAction):
|
||||
"""表情动作 - 发送表情包"""
|
||||
|
||||
# 激活设置
|
||||
focus_activation_type = ActionActivationType.LLM_JUDGE
|
||||
focus_activation_type = ActionActivationType.RANDOM
|
||||
normal_activation_type = ActionActivationType.RANDOM
|
||||
mode_enable = ChatMode.ALL
|
||||
parallel_action = True
|
||||
|
||||
@@ -77,7 +77,7 @@ class NoReplyAction(BaseAction):
|
||||
|
||||
reason = self.action_data.get("reason", "")
|
||||
start_time = time.time()
|
||||
last_judge_time = 0 # 上次进行LLM判断的时间
|
||||
last_judge_time = start_time # 上次进行LLM判断的时间
|
||||
min_judge_interval = self._min_judge_interval # 最小判断间隔,从配置获取
|
||||
check_interval = 0.2 # 检查新消息的间隔,设为0.2秒提高响应性
|
||||
|
||||
@@ -357,7 +357,7 @@ class NoReplyAction(BaseAction):
|
||||
judge_history.append((current_time, judge_result, reason))
|
||||
|
||||
if judge_result == "需要回复":
|
||||
logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待")
|
||||
# logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待")
|
||||
|
||||
full_prompt = f"{global_config.bot.nickname}(你)的想法是:{reason}"
|
||||
await self.store_action_info(
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
import random
|
||||
import time
|
||||
from typing import List, Tuple, Type
|
||||
import asyncio
|
||||
|
||||
# 导入新插件系统
|
||||
from src.plugin_system import BasePlugin, register_plugin, BaseAction, ComponentInfo, ActionActivationType, ChatMode
|
||||
@@ -55,17 +56,24 @@ class ReplyAction(BaseAction):
|
||||
|
||||
async def execute(self) -> Tuple[bool, str]:
|
||||
"""执行回复动作"""
|
||||
logger.info(f"{self.log_prefix} 决定回复: {self.reasoning}")
|
||||
logger.info(f"{self.log_prefix} 决定进行回复")
|
||||
|
||||
start_time = self.action_data.get("loop_start_time", time.time())
|
||||
|
||||
try:
|
||||
success, reply_set = await generator_api.generate_reply(
|
||||
try:
|
||||
success, reply_set = await asyncio.wait_for(
|
||||
generator_api.generate_reply(
|
||||
action_data=self.action_data,
|
||||
chat_id=self.chat_id,
|
||||
request_type="focus.replyer",
|
||||
enable_tool=global_config.tool.enable_in_focus_chat,
|
||||
),
|
||||
timeout=global_config.chat.thinking_timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"{self.log_prefix} 回复生成超时 ({global_config.chat.thinking_timeout}s)")
|
||||
return False, "timeout"
|
||||
|
||||
# 检查从start_time以来的新消息数量
|
||||
# 获取动作触发时间或使用默认值
|
||||
@@ -77,7 +85,7 @@ class ReplyAction(BaseAction):
|
||||
# 根据新消息数量决定是否使用reply_to
|
||||
need_reply = new_message_count >= random.randint(2, 5)
|
||||
logger.info(
|
||||
f"{self.log_prefix} 从{start_time}到{current_time}共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}reply_to"
|
||||
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复"
|
||||
)
|
||||
|
||||
# 构建回复文本
|
||||
@@ -141,7 +149,7 @@ class CoreActionsPlugin(BasePlugin):
|
||||
config_schema = {
|
||||
"plugin": {
|
||||
"enabled": ConfigField(type=bool, default=True, description="是否启用插件"),
|
||||
"config_version": ConfigField(type=str, default="0.2.0", description="配置文件版本"),
|
||||
"config_version": ConfigField(type=str, default="0.3.1", description="配置文件版本"),
|
||||
},
|
||||
"components": {
|
||||
"enable_reply": ConfigField(type=bool, default=True, description="是否启用'回复'动作"),
|
||||
@@ -172,8 +180,15 @@ class CoreActionsPlugin(BasePlugin):
|
||||
"""返回插件包含的组件列表"""
|
||||
|
||||
# --- 从配置动态设置Action/Command ---
|
||||
emoji_chance = global_config.normal_chat.emoji_chance
|
||||
emoji_chance = global_config.emoji.emoji_chance
|
||||
if global_config.emoji.emoji_activate_type == "random":
|
||||
EmojiAction.random_activation_probability = emoji_chance
|
||||
EmojiAction.focus_activation_type = ActionActivationType.RANDOM
|
||||
EmojiAction.normal_activation_type = ActionActivationType.RANDOM
|
||||
elif global_config.emoji.emoji_activate_type == "llm":
|
||||
EmojiAction.random_activation_probability = 0.0
|
||||
EmojiAction.focus_activation_type = ActionActivationType.LLM_JUDGE
|
||||
EmojiAction.normal_activation_type = ActionActivationType.LLM_JUDGE
|
||||
|
||||
no_reply_probability = self.get_config("no_reply.random_probability", 0.8)
|
||||
NoReplyAction.random_activation_probability = no_reply_probability
|
||||
@@ -206,127 +221,3 @@ class CoreActionsPlugin(BasePlugin):
|
||||
# components.append((DeepReplyAction.get_action_info(), DeepReplyAction))
|
||||
|
||||
return components
|
||||
|
||||
|
||||
# class DeepReplyAction(BaseAction):
|
||||
# """回复动作 - 参与聊天回复"""
|
||||
|
||||
# # 激活设置
|
||||
# focus_activation_type = ActionActivationType.ALWAYS
|
||||
# normal_activation_type = ActionActivationType.NEVER
|
||||
# mode_enable = ChatMode.FOCUS
|
||||
# parallel_action = False
|
||||
|
||||
# # 动作基本信息
|
||||
# action_name = "deep_reply"
|
||||
# action_description = "参与聊天回复,关注某个话题,对聊天内容进行深度思考,给出回复"
|
||||
|
||||
# # 动作参数定义
|
||||
# action_parameters = {
|
||||
# "topic": "想要思考的话题"
|
||||
# }
|
||||
|
||||
# # 动作使用场景
|
||||
# action_require = ["有些问题需要深度思考", "某个问题可能涉及多个方面", "某个问题涉及专业领域或者需要专业知识","这个问题讨论的很激烈,需要深度思考"]
|
||||
|
||||
# # 关联类型
|
||||
# associated_types = ["text"]
|
||||
|
||||
# async def execute(self) -> Tuple[bool, str]:
|
||||
# """执行回复动作"""
|
||||
# logger.info(f"{self.log_prefix} 决定深度思考")
|
||||
|
||||
# try:
|
||||
# # 获取聊天观察
|
||||
# chatting_observation = self._get_chatting_observation()
|
||||
# if not chatting_observation:
|
||||
# return False, "未找到聊天观察"
|
||||
|
||||
# talking_message_str = chatting_observation.talking_message_str
|
||||
|
||||
# # 处理回复目标
|
||||
# chat_stream = self.api.get_service("chat_stream")
|
||||
# anchor_message = await create_empty_anchor_message(chat_stream.platform, chat_stream.group_info, chat_stream)
|
||||
|
||||
|
||||
# llm_model = self.api.get_available_models().replyer_1
|
||||
|
||||
# prompt = f"""
|
||||
# {talking_message_str}
|
||||
|
||||
# 在上面的聊天中,你对{self.action_data.get("topic", "")}感兴趣,形成深刻观点,请你思考,总结成一份学术论文,APA标准格式
|
||||
# """
|
||||
|
||||
# success, response, reasoning, model_name = await self.api.generate_with_model(prompt, llm_model)
|
||||
|
||||
# print(prompt)
|
||||
# print(f"DeepReplyAction: {response}")
|
||||
|
||||
# # prompt = f"""
|
||||
# # {talking_message_str}
|
||||
|
||||
# # 在上面的聊天中,你对{self.action_data.get("topic", "")}感兴趣,请你思考
|
||||
# # """
|
||||
|
||||
# extra_info_block = self.action_data.get("extra_info_block", "")
|
||||
# extra_info_block += response
|
||||
# # extra_info_block += f"\n--------------------------------\n注意,这是最重要的内容!!!!!你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以下方聊天记录的回复要求不再适用,请你自由的表达,不论字数长短限制\n\n--------------------------------\n注意,你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以下方聊天记录的回复要求不再适用,请你自由的表达,不论字数长短限制\n"
|
||||
# # extra_info_block += f"\n--------------------------------\n注意,优先关注这句!!!!你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以下方聊天记录的回复要求不再适用,请你自由的表达,不论字数长短限制\n\n--------------------------------\n注意,你现在可以用比较长的篇幅来表达你的观点,不要只回复一个字或者几个字\n由于你进入了深度思考模式,所以其他的回复要求不再适用,请你自由的表达,不论字数长短限制\n"
|
||||
# self.action_data["extra_info_block"] = extra_info_block
|
||||
|
||||
|
||||
# # 获取回复器服务
|
||||
# # replyer = self.api.get_service("replyer")
|
||||
# # if not replyer:
|
||||
# # logger.error(f"{self.log_prefix} 未找到回复器服务")
|
||||
# # return False, "回复器服务不可用"
|
||||
|
||||
# # await self.send_message_by_expressor(extra_info_block)
|
||||
# await self.send_text(extra_info_block)
|
||||
# # 执行回复
|
||||
# # success, reply_set = await replyer.deal_reply(
|
||||
# # cycle_timers=self.cycle_timers,
|
||||
# # action_data=self.action_data,
|
||||
# # anchor_message=anchor_message,
|
||||
# # reasoning=self.reasoning,
|
||||
# # thinking_id=self.thinking_id,
|
||||
# # )
|
||||
|
||||
# # 构建回复文本
|
||||
# reply_text = "self._build_reply_text(reply_set)"
|
||||
|
||||
# # 存储动作记录
|
||||
# await self.api.store_action_info(
|
||||
# action_build_into_prompt=False,
|
||||
# action_prompt_display=reply_text,
|
||||
# action_done=True,
|
||||
# thinking_id=self.thinking_id,
|
||||
# action_data=self.action_data,
|
||||
# )
|
||||
|
||||
# # 重置NoReplyAction的连续计数器
|
||||
# NoReplyAction.reset_consecutive_count()
|
||||
|
||||
# return success, reply_text
|
||||
|
||||
# except Exception as e:
|
||||
# logger.error(f"{self.log_prefix} 回复动作执行失败: {e}")
|
||||
# return False, f"回复失败: {str(e)}"
|
||||
|
||||
# def _get_chatting_observation(self) -> Optional[ChattingObservation]:
|
||||
# """获取聊天观察对象"""
|
||||
# observations = self.api.get_service("observations") or []
|
||||
# for obs in observations:
|
||||
# if isinstance(obs, ChattingObservation):
|
||||
# return obs
|
||||
# return None
|
||||
|
||||
|
||||
# def _build_reply_text(self, reply_set) -> str:
|
||||
# """构建回复文本"""
|
||||
# reply_text = ""
|
||||
# if reply_set:
|
||||
# for reply in reply_set:
|
||||
# data = reply[1]
|
||||
# reply_text += data
|
||||
# return reply_text
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
{
|
||||
"manifest_version": 1,
|
||||
"name": "豆包图片生成插件 (Doubao Image Generator)",
|
||||
"version": "2.0.0",
|
||||
"description": "基于火山引擎豆包模型的AI图片生成插件,支持智能LLM判定、高质量图片生成、结果缓存和多尺寸支持。",
|
||||
"author": {
|
||||
"name": "MaiBot团队",
|
||||
"url": "https://github.com/MaiM-with-u"
|
||||
},
|
||||
"license": "GPL-v3.0-or-later",
|
||||
|
||||
"host_application": {
|
||||
"min_version": "0.8.0",
|
||||
"max_version": "0.8.10"
|
||||
},
|
||||
"homepage_url": "https://github.com/MaiM-with-u/maibot",
|
||||
"repository_url": "https://github.com/MaiM-with-u/maibot",
|
||||
"keywords": ["ai", "image", "generation", "doubao", "volcengine", "art"],
|
||||
"categories": ["AI Tools", "Image Processing", "Content Generation"],
|
||||
|
||||
"default_locale": "zh-CN",
|
||||
"locales_path": "_locales",
|
||||
|
||||
"plugin_info": {
|
||||
"is_built_in": true,
|
||||
"plugin_type": "content_generator",
|
||||
"api_dependencies": ["volcengine"],
|
||||
"components": [
|
||||
{
|
||||
"type": "action",
|
||||
"name": "doubao_image_generation",
|
||||
"description": "根据描述使用火山引擎豆包API生成高质量图片",
|
||||
"activation_modes": ["llm_judge", "keyword"],
|
||||
"keywords": ["画", "图片", "生成", "画画", "绘制"]
|
||||
}
|
||||
],
|
||||
"features": [
|
||||
"智能LLM判定生成时机",
|
||||
"高质量AI图片生成",
|
||||
"结果缓存机制",
|
||||
"多种图片尺寸支持",
|
||||
"完整的错误处理"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,477 +0,0 @@
|
||||
"""
|
||||
豆包图片生成插件
|
||||
|
||||
基于火山引擎豆包模型的AI图片生成插件。
|
||||
|
||||
功能特性:
|
||||
- 智能LLM判定:根据聊天内容智能判断是否需要生成图片
|
||||
- 高质量图片生成:使用豆包Seed Dream模型生成图片
|
||||
- 结果缓存:避免重复生成相同内容的图片
|
||||
- 配置验证:自动验证和修复配置文件
|
||||
- 参数验证:完整的输入参数验证和错误处理
|
||||
- 多尺寸支持:支持多种图片尺寸生成
|
||||
|
||||
包含组件:
|
||||
- 图片生成Action - 根据描述使用火山引擎API生成图片
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
import base64
|
||||
import traceback
|
||||
from typing import List, Tuple, Type, Optional
|
||||
|
||||
# 导入新插件系统
|
||||
from src.plugin_system.base.base_plugin import BasePlugin
|
||||
from src.plugin_system.base.base_plugin import register_plugin
|
||||
from src.plugin_system.base.base_action import BaseAction
|
||||
from src.plugin_system.base.component_types import ComponentInfo, ActionActivationType, ChatMode
|
||||
from src.plugin_system.base.config_types import ConfigField
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger("doubao_pic_plugin")
|
||||
|
||||
|
||||
# ===== Action组件 =====
|
||||
|
||||
|
||||
class DoubaoImageGenerationAction(BaseAction):
|
||||
"""豆包图片生成Action - 根据描述使用火山引擎API生成图片"""
|
||||
|
||||
# 激活设置
|
||||
focus_activation_type = ActionActivationType.LLM_JUDGE # Focus模式使用LLM判定,精确理解需求
|
||||
normal_activation_type = ActionActivationType.KEYWORD # Normal模式使用关键词激活,快速响应
|
||||
mode_enable = ChatMode.ALL
|
||||
parallel_action = True
|
||||
|
||||
# 动作基本信息
|
||||
action_name = "doubao_image_generation"
|
||||
action_description = (
|
||||
"可以根据特定的描述,生成并发送一张图片,如果没提供描述,就根据聊天内容生成,你可以立刻画好,不用等待"
|
||||
)
|
||||
|
||||
# 关键词设置(用于Normal模式)
|
||||
activation_keywords = ["画", "绘制", "生成图片", "画图", "draw", "paint", "图片生成"]
|
||||
keyword_case_sensitive = False
|
||||
|
||||
# LLM判定提示词(用于Focus模式)
|
||||
llm_judge_prompt = """
|
||||
判定是否需要使用图片生成动作的条件:
|
||||
1. 用户明确要求画图、生成图片或创作图像
|
||||
2. 用户描述了想要看到的画面或场景
|
||||
3. 对话中提到需要视觉化展示某些概念
|
||||
4. 用户想要创意图片或艺术作品
|
||||
|
||||
适合使用的情况:
|
||||
- "画一张..."、"画个..."、"生成图片"
|
||||
- "我想看看...的样子"
|
||||
- "能画出...吗"
|
||||
- "创作一幅..."
|
||||
|
||||
绝对不要使用的情况:
|
||||
1. 纯文字聊天和问答
|
||||
2. 只是提到"图片"、"画"等词但不是要求生成
|
||||
3. 谈论已存在的图片或照片
|
||||
4. 技术讨论中提到绘图概念但无生成需求
|
||||
5. 用户明确表示不需要图片时
|
||||
"""
|
||||
|
||||
# 动作参数定义
|
||||
action_parameters = {
|
||||
"description": "图片描述,输入你想要生成并发送的图片的描述,必填",
|
||||
"size": "图片尺寸,例如 '1024x1024' (可选, 默认从配置或 '1024x1024')",
|
||||
}
|
||||
|
||||
# 动作使用场景
|
||||
action_require = [
|
||||
"当有人让你画东西时使用,你可以立刻画好,不用等待",
|
||||
"当有人要求你生成并发送一张图片时使用",
|
||||
"当有人让你画一张图时使用",
|
||||
]
|
||||
|
||||
# 关联类型
|
||||
associated_types = ["image", "text"]
|
||||
|
||||
# 简单的请求缓存,避免短时间内重复请求
|
||||
_request_cache = {}
|
||||
_cache_max_size = 10
|
||||
|
||||
async def execute(self) -> Tuple[bool, Optional[str]]:
|
||||
"""执行图片生成动作"""
|
||||
logger.info(f"{self.log_prefix} 执行豆包图片生成动作")
|
||||
|
||||
# 配置验证
|
||||
http_base_url = self.api.get_config("api.base_url")
|
||||
http_api_key = self.api.get_config("api.volcano_generate_api_key")
|
||||
|
||||
if not (http_base_url and http_api_key):
|
||||
error_msg = "抱歉,图片生成功能所需的HTTP配置(如API地址或密钥)不完整,无法提供服务。"
|
||||
await self.send_text(error_msg)
|
||||
logger.error(f"{self.log_prefix} HTTP调用配置缺失: base_url 或 volcano_generate_api_key.")
|
||||
return False, "HTTP配置不完整"
|
||||
|
||||
# API密钥验证
|
||||
if http_api_key == "YOUR_DOUBAO_API_KEY_HERE":
|
||||
error_msg = "图片生成功能尚未配置,请设置正确的API密钥。"
|
||||
await self.send_text(error_msg)
|
||||
logger.error(f"{self.log_prefix} API密钥未配置")
|
||||
return False, "API密钥未配置"
|
||||
|
||||
# 参数验证
|
||||
description = self.action_data.get("description")
|
||||
if not description or not description.strip():
|
||||
logger.warning(f"{self.log_prefix} 图片描述为空,无法生成图片。")
|
||||
await self.send_text("你需要告诉我想要画什么样的图片哦~ 比如说'画一只可爱的小猫'")
|
||||
return False, "图片描述为空"
|
||||
|
||||
# 清理和验证描述
|
||||
description = description.strip()
|
||||
if len(description) > 1000: # 限制描述长度
|
||||
description = description[:1000]
|
||||
logger.info(f"{self.log_prefix} 图片描述过长,已截断")
|
||||
|
||||
# 获取配置
|
||||
default_model = self.api.get_config("generation.default_model", "doubao-seedream-3-0-t2i-250415")
|
||||
image_size = self.action_data.get("size", self.api.get_config("generation.default_size", "1024x1024"))
|
||||
|
||||
# 验证图片尺寸格式
|
||||
if not self._validate_image_size(image_size):
|
||||
logger.warning(f"{self.log_prefix} 无效的图片尺寸: {image_size},使用默认值")
|
||||
image_size = "1024x1024"
|
||||
|
||||
# 检查缓存
|
||||
cache_key = self._get_cache_key(description, default_model, image_size)
|
||||
if cache_key in self._request_cache:
|
||||
cached_result = self._request_cache[cache_key]
|
||||
logger.info(f"{self.log_prefix} 使用缓存的图片结果")
|
||||
await self.send_text("我之前画过类似的图片,用之前的结果~")
|
||||
|
||||
# 直接发送缓存的结果
|
||||
send_success = await self._send_image(cached_result)
|
||||
if send_success:
|
||||
await self.send_text("图片已发送!")
|
||||
return True, "图片已发送(缓存)"
|
||||
else:
|
||||
# 缓存失败,清除这个缓存项并继续正常流程
|
||||
del self._request_cache[cache_key]
|
||||
|
||||
# 获取其他配置参数
|
||||
guidance_scale_val = self._get_guidance_scale()
|
||||
seed_val = self._get_seed()
|
||||
watermark_val = self._get_watermark()
|
||||
|
||||
await self.send_text(
|
||||
f"收到!正在为您生成关于 '{description}' 的图片,请稍候...(模型: {default_model}, 尺寸: {image_size})"
|
||||
)
|
||||
|
||||
try:
|
||||
success, result = await asyncio.to_thread(
|
||||
self._make_http_image_request,
|
||||
prompt=description,
|
||||
model=default_model,
|
||||
size=image_size,
|
||||
seed=seed_val,
|
||||
guidance_scale=guidance_scale_val,
|
||||
watermark=watermark_val,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} (HTTP) 异步请求执行失败: {e!r}", exc_info=True)
|
||||
traceback.print_exc()
|
||||
success = False
|
||||
result = f"图片生成服务遇到意外问题: {str(e)[:100]}"
|
||||
|
||||
if success:
|
||||
image_url = result
|
||||
# print(f"image_url: {image_url}")
|
||||
# print(f"result: {result}")
|
||||
logger.info(f"{self.log_prefix} 图片URL获取成功: {image_url[:70]}... 下载并编码.")
|
||||
|
||||
try:
|
||||
encode_success, encode_result = await asyncio.to_thread(self._download_and_encode_base64, image_url)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} (B64) 异步下载/编码失败: {e!r}", exc_info=True)
|
||||
traceback.print_exc()
|
||||
encode_success = False
|
||||
encode_result = f"图片下载或编码时发生内部错误: {str(e)[:100]}"
|
||||
|
||||
if encode_success:
|
||||
base64_image_string = encode_result
|
||||
send_success = await self._send_image(base64_image_string)
|
||||
if send_success:
|
||||
# 缓存成功的结果
|
||||
self._request_cache[cache_key] = base64_image_string
|
||||
self._cleanup_cache()
|
||||
|
||||
await self.send_message_by_expressor("图片已发送!")
|
||||
return True, "图片已成功生成并发送"
|
||||
else:
|
||||
print(f"send_success: {send_success}")
|
||||
await self.send_message_by_expressor("图片已处理为Base64,但发送失败了。")
|
||||
return False, "图片发送失败 (Base64)"
|
||||
else:
|
||||
await self.send_message_by_expressor(f"获取到图片URL,但在处理图片时失败了:{encode_result}")
|
||||
return False, f"图片处理失败(Base64): {encode_result}"
|
||||
else:
|
||||
error_message = result
|
||||
await self.send_message_by_expressor(f"哎呀,生成图片时遇到问题:{error_message}")
|
||||
return False, f"图片生成失败: {error_message}"
|
||||
|
||||
def _get_guidance_scale(self) -> float:
|
||||
"""获取guidance_scale配置值"""
|
||||
guidance_scale_input = self.api.get_config("generation.default_guidance_scale", 2.5)
|
||||
try:
|
||||
return float(guidance_scale_input)
|
||||
except (ValueError, TypeError):
|
||||
logger.warning(f"{self.log_prefix} default_guidance_scale 值无效,使用默认值 2.5")
|
||||
return 2.5
|
||||
|
||||
def _get_seed(self) -> int:
|
||||
"""获取seed配置值"""
|
||||
seed_config_value = self.api.get_config("generation.default_seed")
|
||||
if seed_config_value is not None:
|
||||
try:
|
||||
return int(seed_config_value)
|
||||
except (ValueError, TypeError):
|
||||
logger.warning(f"{self.log_prefix} default_seed 值无效,使用默认值 42")
|
||||
return 42
|
||||
|
||||
def _get_watermark(self) -> bool:
|
||||
"""获取watermark配置值"""
|
||||
watermark_source = self.api.get_config("generation.default_watermark", True)
|
||||
if isinstance(watermark_source, bool):
|
||||
return watermark_source
|
||||
elif isinstance(watermark_source, str):
|
||||
return watermark_source.lower() == "true"
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} default_watermark 值无效,使用默认值 True")
|
||||
return True
|
||||
|
||||
async def _send_image(self, base64_image: str) -> bool:
|
||||
"""发送图片"""
|
||||
try:
|
||||
# 使用聊天流信息确定发送目标
|
||||
chat_stream = self.api.get_service("chat_stream")
|
||||
if not chat_stream:
|
||||
logger.error(f"{self.log_prefix} 没有可用的聊天流发送图片")
|
||||
return False
|
||||
|
||||
if chat_stream.group_info:
|
||||
# 群聊
|
||||
return await self.api.send_message_to_target(
|
||||
message_type="image",
|
||||
content=base64_image,
|
||||
platform=chat_stream.platform,
|
||||
target_id=str(chat_stream.group_info.group_id),
|
||||
is_group=True,
|
||||
display_message="发送生成的图片",
|
||||
)
|
||||
else:
|
||||
# 私聊
|
||||
return await self.api.send_message_to_target(
|
||||
message_type="image",
|
||||
content=base64_image,
|
||||
platform=chat_stream.platform,
|
||||
target_id=str(chat_stream.user_info.user_id),
|
||||
is_group=False,
|
||||
display_message="发送生成的图片",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 发送图片时出错: {e}")
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _get_cache_key(cls, description: str, model: str, size: str) -> str:
|
||||
"""生成缓存键"""
|
||||
return f"{description[:100]}|{model}|{size}"
|
||||
|
||||
@classmethod
|
||||
def _cleanup_cache(cls):
|
||||
"""清理缓存,保持大小在限制内"""
|
||||
if len(cls._request_cache) > cls._cache_max_size:
|
||||
keys_to_remove = list(cls._request_cache.keys())[: -cls._cache_max_size // 2]
|
||||
for key in keys_to_remove:
|
||||
del cls._request_cache[key]
|
||||
|
||||
def _validate_image_size(self, image_size: str) -> bool:
|
||||
"""验证图片尺寸格式"""
|
||||
try:
|
||||
width, height = map(int, image_size.split("x"))
|
||||
return 100 <= width <= 10000 and 100 <= height <= 10000
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
|
||||
def _download_and_encode_base64(self, image_url: str) -> Tuple[bool, str]:
|
||||
"""下载图片并将其编码为Base64字符串"""
|
||||
logger.info(f"{self.log_prefix} (B64) 下载并编码图片: {image_url[:70]}...")
|
||||
try:
|
||||
with urllib.request.urlopen(image_url, timeout=30) as response:
|
||||
if response.status == 200:
|
||||
image_bytes = response.read()
|
||||
base64_encoded_image = base64.b64encode(image_bytes).decode("utf-8")
|
||||
logger.info(f"{self.log_prefix} (B64) 图片下载编码完成. Base64长度: {len(base64_encoded_image)}")
|
||||
return True, base64_encoded_image
|
||||
else:
|
||||
error_msg = f"下载图片失败 (状态: {response.status})"
|
||||
logger.error(f"{self.log_prefix} (B64) {error_msg} URL: {image_url}")
|
||||
return False, error_msg
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} (B64) 下载或编码时错误: {e!r}", exc_info=True)
|
||||
traceback.print_exc()
|
||||
return False, f"下载或编码图片时发生错误: {str(e)[:100]}"
|
||||
|
||||
def _make_http_image_request(
|
||||
self, prompt: str, model: str, size: str, seed: int, guidance_scale: float, watermark: bool
|
||||
) -> Tuple[bool, str]:
|
||||
"""发送HTTP请求生成图片"""
|
||||
base_url = self.api.get_config("api.base_url")
|
||||
generate_api_key = self.api.get_config("api.volcano_generate_api_key")
|
||||
|
||||
endpoint = f"{base_url.rstrip('/')}/images/generations"
|
||||
|
||||
payload_dict = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
"response_format": "url",
|
||||
"size": size,
|
||||
"guidance_scale": guidance_scale,
|
||||
"watermark": watermark,
|
||||
"seed": seed,
|
||||
"api-key": generate_api_key,
|
||||
}
|
||||
|
||||
data = json.dumps(payload_dict).encode("utf-8")
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"Bearer {generate_api_key}",
|
||||
}
|
||||
|
||||
logger.info(f"{self.log_prefix} (HTTP) 发起图片请求: {model}, Prompt: {prompt[:30]}... To: {endpoint}")
|
||||
|
||||
req = urllib.request.Request(endpoint, data=data, headers=headers, method="POST")
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=60) as response:
|
||||
response_status = response.status
|
||||
response_body_bytes = response.read()
|
||||
response_body_str = response_body_bytes.decode("utf-8")
|
||||
|
||||
logger.info(f"{self.log_prefix} (HTTP) 响应: {response_status}. Preview: {response_body_str[:150]}...")
|
||||
|
||||
if 200 <= response_status < 300:
|
||||
response_data = json.loads(response_body_str)
|
||||
image_url = None
|
||||
if (
|
||||
isinstance(response_data.get("data"), list)
|
||||
and response_data["data"]
|
||||
and isinstance(response_data["data"][0], dict)
|
||||
):
|
||||
image_url = response_data["data"][0].get("url")
|
||||
elif response_data.get("url"):
|
||||
image_url = response_data.get("url")
|
||||
|
||||
if image_url:
|
||||
logger.info(f"{self.log_prefix} (HTTP) 图片生成成功,URL: {image_url[:70]}...")
|
||||
return True, image_url
|
||||
else:
|
||||
logger.error(f"{self.log_prefix} (HTTP) API成功但无图片URL")
|
||||
return False, "图片生成API响应成功但未找到图片URL"
|
||||
else:
|
||||
logger.error(f"{self.log_prefix} (HTTP) API请求失败. 状态: {response.status}")
|
||||
return False, f"图片API请求失败(状态码 {response.status})"
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} (HTTP) 图片生成时意外错误: {e!r}", exc_info=True)
|
||||
traceback.print_exc()
|
||||
return False, f"图片生成HTTP请求时发生意外错误: {str(e)[:100]}"
|
||||
|
||||
|
||||
# ===== 插件主类 =====
|
||||
|
||||
|
||||
@register_plugin
|
||||
class DoubaoImagePlugin(BasePlugin):
|
||||
"""豆包图片生成插件
|
||||
|
||||
基于火山引擎豆包模型的AI图片生成插件:
|
||||
- 图片生成Action:根据描述使用火山引擎API生成图片
|
||||
"""
|
||||
|
||||
# 插件基本信息
|
||||
plugin_name = "doubao_pic_plugin" # 内部标识符
|
||||
enable_plugin = True
|
||||
config_file_name = "config.toml"
|
||||
|
||||
# 配置节描述
|
||||
config_section_descriptions = {
|
||||
"plugin": "插件基本信息配置",
|
||||
"api": "API相关配置,包含火山引擎API的访问信息",
|
||||
"generation": "图片生成参数配置,控制生成图片的各种参数",
|
||||
"cache": "结果缓存配置",
|
||||
"components": "组件启用配置",
|
||||
}
|
||||
|
||||
# 配置Schema定义
|
||||
config_schema = {
|
||||
"plugin": {
|
||||
"name": ConfigField(type=str, default="doubao_pic_plugin", description="插件名称", required=True),
|
||||
"version": ConfigField(type=str, default="2.0.0", description="插件版本号"),
|
||||
"enabled": ConfigField(type=bool, default=False, description="是否启用插件"),
|
||||
"description": ConfigField(
|
||||
type=str, default="基于火山引擎豆包模型的AI图片生成插件", description="插件描述", required=True
|
||||
),
|
||||
},
|
||||
"api": {
|
||||
"base_url": ConfigField(
|
||||
type=str,
|
||||
default="https://ark.cn-beijing.volces.com/api/v3",
|
||||
description="API基础URL",
|
||||
example="https://api.example.com/v1",
|
||||
),
|
||||
"volcano_generate_api_key": ConfigField(
|
||||
type=str, default="YOUR_DOUBAO_API_KEY_HERE", description="火山引擎豆包API密钥", required=True
|
||||
),
|
||||
},
|
||||
"generation": {
|
||||
"default_model": ConfigField(
|
||||
type=str,
|
||||
default="doubao-seedream-3-0-t2i-250415",
|
||||
description="默认使用的文生图模型",
|
||||
choices=["doubao-seedream-3-0-t2i-250415", "doubao-seedream-2-0-t2i"],
|
||||
),
|
||||
"default_size": ConfigField(
|
||||
type=str,
|
||||
default="1024x1024",
|
||||
description="默认图片尺寸",
|
||||
example="1024x1024",
|
||||
choices=["1024x1024", "1024x1280", "1280x1024", "1024x1536", "1536x1024"],
|
||||
),
|
||||
"default_watermark": ConfigField(type=bool, default=True, description="是否默认添加水印"),
|
||||
"default_guidance_scale": ConfigField(
|
||||
type=float, default=2.5, description="模型指导强度,影响图片与提示的关联性", example="2.0"
|
||||
),
|
||||
"default_seed": ConfigField(type=int, default=42, description="随机种子,用于复现图片"),
|
||||
},
|
||||
"cache": {
|
||||
"enabled": ConfigField(type=bool, default=True, description="是否启用请求缓存"),
|
||||
"max_size": ConfigField(type=int, default=10, description="最大缓存数量"),
|
||||
},
|
||||
"components": {
|
||||
"enable_image_generation": ConfigField(type=bool, default=True, description="是否启用图片生成Action")
|
||||
},
|
||||
}
|
||||
|
||||
def get_plugin_components(self) -> List[Tuple[ComponentInfo, Type]]:
|
||||
"""返回插件包含的组件列表"""
|
||||
|
||||
# 从配置获取组件启用状态
|
||||
enable_image_generation = self.get_config("components.enable_image_generation", True)
|
||||
|
||||
components = []
|
||||
|
||||
# 添加图片生成Action
|
||||
if enable_image_generation:
|
||||
components.append((DoubaoImageGenerationAction.get_action_info(), DoubaoImageGenerationAction))
|
||||
|
||||
return components
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"manifest_version": 1,
|
||||
"name": "群聊禁言管理插件 (Mute Plugin)",
|
||||
"version": "3.0.0",
|
||||
"description": "群聊禁言管理插件,提供智能禁言功能",
|
||||
"author": {
|
||||
"name": "MaiBot开发团队",
|
||||
"url": "https://github.com/MaiM-with-u"
|
||||
},
|
||||
"license": "GPL-v3.0-or-later",
|
||||
"host_application": {
|
||||
"min_version": "0.8.0",
|
||||
"max_version": "0.8.10"
|
||||
},
|
||||
"keywords": ["mute", "ban", "moderation", "admin", "management", "group"],
|
||||
"categories": ["Moderation", "Group Management", "Admin Tools"],
|
||||
"default_locale": "zh-CN",
|
||||
"locales_path": "_locales"
|
||||
}
|
||||
@@ -1,563 +0,0 @@
|
||||
"""
|
||||
禁言插件
|
||||
|
||||
提供智能禁言功能的群聊管理插件。
|
||||
|
||||
功能特性:
|
||||
- 智能LLM判定:根据聊天内容智能判断是否需要禁言
|
||||
- 灵活的时长管理:支持自定义禁言时长限制
|
||||
- 模板化消息:支持自定义禁言提示消息
|
||||
- 参数验证:完整的输入参数验证和错误处理
|
||||
- 配置文件支持:所有设置可通过配置文件调整
|
||||
- 权限管理:支持用户权限和群组权限控制
|
||||
|
||||
包含组件:
|
||||
- 智能禁言Action - 基于LLM判断是否需要禁言(支持群组权限控制)
|
||||
- 禁言命令Command - 手动执行禁言操作(支持用户权限控制)
|
||||
"""
|
||||
|
||||
from typing import List, Tuple, Type, Optional
|
||||
import random
|
||||
|
||||
# 导入新插件系统
|
||||
from src.plugin_system.base.base_plugin import BasePlugin
|
||||
from src.plugin_system.base.base_plugin import register_plugin
|
||||
from src.plugin_system.base.base_action import BaseAction
|
||||
from src.plugin_system.base.base_command import BaseCommand
|
||||
from src.plugin_system.base.component_types import ComponentInfo, ActionActivationType, ChatMode
|
||||
from src.plugin_system.base.config_types import ConfigField
|
||||
from src.common.logger import get_logger
|
||||
|
||||
# 导入配置API(可选的简便方法)
|
||||
from src.plugin_system.apis import person_api, generator_api
|
||||
|
||||
logger = get_logger("mute_plugin")
|
||||
|
||||
|
||||
# ===== Action组件 =====
|
||||
|
||||
|
||||
class MuteAction(BaseAction):
|
||||
"""智能禁言Action - 基于LLM智能判断是否需要禁言"""
|
||||
|
||||
# 激活设置
|
||||
focus_activation_type = ActionActivationType.LLM_JUDGE # Focus模式使用LLM判定,确保谨慎
|
||||
normal_activation_type = ActionActivationType.KEYWORD # Normal模式使用关键词激活,快速响应
|
||||
mode_enable = ChatMode.ALL
|
||||
parallel_action = False
|
||||
|
||||
# 动作基本信息
|
||||
action_name = "mute"
|
||||
action_description = "智能禁言系统,基于LLM判断是否需要禁言"
|
||||
|
||||
# 关键词设置(用于Normal模式)
|
||||
activation_keywords = ["禁言", "mute", "ban", "silence"]
|
||||
keyword_case_sensitive = False
|
||||
|
||||
# LLM判定提示词(用于Focus模式)
|
||||
llm_judge_prompt = """
|
||||
判定是否需要使用禁言动作的严格条件:
|
||||
|
||||
使用禁言的情况:
|
||||
1. 用户发送明显违规内容(色情、暴力、政治敏感等)
|
||||
2. 恶意刷屏或垃圾信息轰炸
|
||||
3. 用户主动明确要求被禁言("禁言我"等)
|
||||
4. 严重违反群规的行为
|
||||
5. 恶意攻击他人或群组管理
|
||||
|
||||
绝对不要使用的情况:
|
||||
2. 情绪化表达但无恶意
|
||||
3. 开玩笑或调侃,除非过分
|
||||
4. 单纯的意见分歧或争论
|
||||
|
||||
"""
|
||||
|
||||
# 动作参数定义
|
||||
action_parameters = {
|
||||
"target": "禁言对象,必填,输入你要禁言的对象的名字,请仔细思考不要弄错禁言对象",
|
||||
"duration": "禁言时长,必填,输入你要禁言的时长(秒),单位为秒,必须为数字",
|
||||
"reason": "禁言理由,可选",
|
||||
}
|
||||
|
||||
# 动作使用场景
|
||||
action_require = [
|
||||
"当有人违反了公序良俗的内容",
|
||||
"当有人刷屏时使用",
|
||||
"当有人发了擦边,或者色情内容时使用",
|
||||
"当有人要求禁言自己时使用",
|
||||
"如果某人已经被禁言了,就不要再次禁言了,除非你想追加时间!!",
|
||||
]
|
||||
|
||||
# 关联类型
|
||||
associated_types = ["text", "command"]
|
||||
|
||||
def _check_group_permission(self) -> Tuple[bool, Optional[str]]:
|
||||
"""检查当前群是否有禁言动作权限
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[str]]: (是否有权限, 错误信息)
|
||||
"""
|
||||
# 如果不是群聊,直接返回False
|
||||
if not self.is_group:
|
||||
return False, "禁言动作只能在群聊中使用"
|
||||
|
||||
# 获取权限配置
|
||||
allowed_groups = self.get_config("permissions.allowed_groups", [])
|
||||
|
||||
# 如果配置为空,表示不启用权限控制
|
||||
if not allowed_groups:
|
||||
logger.info(f"{self.log_prefix} 群组权限未配置,允许所有群使用禁言动作")
|
||||
return True, None
|
||||
|
||||
# 检查当前群是否在允许列表中
|
||||
current_group_key = f"{self.platform}:{self.group_id}"
|
||||
for allowed_group in allowed_groups:
|
||||
if allowed_group == current_group_key:
|
||||
logger.info(f"{self.log_prefix} 群组 {current_group_key} 有禁言动作权限")
|
||||
return True, None
|
||||
|
||||
logger.warning(f"{self.log_prefix} 群组 {current_group_key} 没有禁言动作权限")
|
||||
return False, "当前群组没有使用禁言动作的权限"
|
||||
|
||||
async def execute(self) -> Tuple[bool, Optional[str]]:
|
||||
"""执行智能禁言判定"""
|
||||
logger.info(f"{self.log_prefix} 执行智能禁言动作")
|
||||
|
||||
# 首先检查群组权限
|
||||
has_permission, permission_error = self._check_group_permission()
|
||||
|
||||
# 获取参数
|
||||
target = self.action_data.get("target")
|
||||
duration = self.action_data.get("duration")
|
||||
reason = self.action_data.get("reason", "违反群规")
|
||||
|
||||
# 参数验证
|
||||
if not target:
|
||||
error_msg = "禁言目标不能为空"
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
await self.send_text("没有指定禁言对象呢~")
|
||||
return False, error_msg
|
||||
|
||||
if not duration:
|
||||
error_msg = "禁言时长不能为空"
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
await self.send_text("没有指定禁言时长呢~")
|
||||
return False, error_msg
|
||||
|
||||
# 获取时长限制配置
|
||||
min_duration = self.get_config("mute.min_duration", 60)
|
||||
max_duration = self.get_config("mute.max_duration", 2592000)
|
||||
|
||||
# 验证时长格式并转换
|
||||
try:
|
||||
duration_int = int(duration)
|
||||
if duration_int <= 0:
|
||||
error_msg = "禁言时长必须大于0"
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
await self.send_text("禁言时长必须是正数哦~")
|
||||
return False, error_msg
|
||||
|
||||
# 限制禁言时长范围
|
||||
if duration_int < min_duration:
|
||||
duration_int = min_duration
|
||||
logger.info(f"{self.log_prefix} 禁言时长过短,调整为{min_duration}秒")
|
||||
elif duration_int > max_duration:
|
||||
duration_int = max_duration
|
||||
logger.info(f"{self.log_prefix} 禁言时长过长,调整为{max_duration}秒")
|
||||
|
||||
except (ValueError, TypeError):
|
||||
error_msg = f"禁言时长格式无效: {duration}"
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
# await self.send_text("禁言时长必须是数字哦~")
|
||||
return False, error_msg
|
||||
|
||||
# 获取用户ID
|
||||
person_id = person_api.get_person_id_by_name(target)
|
||||
user_id = await person_api.get_person_value(person_id, "user_id")
|
||||
if not user_id:
|
||||
error_msg = f"未找到用户 {target} 的ID"
|
||||
await self.send_text(f"找不到 {target} 这个人呢~")
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
return False, error_msg
|
||||
|
||||
# 格式化时长显示
|
||||
enable_formatting = self.get_config("mute.enable_duration_formatting", True)
|
||||
time_str = self._format_duration(duration_int) if enable_formatting else f"{duration_int}秒"
|
||||
|
||||
# 获取模板化消息
|
||||
message = self._get_template_message(target, time_str, reason)
|
||||
|
||||
if not has_permission:
|
||||
logger.warning(f"{self.log_prefix} 权限检查失败: {permission_error}")
|
||||
result_status, result_message = await generator_api.rewrite_reply(
|
||||
chat_stream=self.chat_stream,
|
||||
reply_data={
|
||||
"raw_reply": "我想禁言{target},但是我没有权限",
|
||||
"reason": "表达自己没有在这个群禁言的能力",
|
||||
},
|
||||
)
|
||||
|
||||
if result_status:
|
||||
for reply_seg in result_message:
|
||||
data = reply_seg[1]
|
||||
await self.send_text(data)
|
||||
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=f"尝试禁言了用户 {target},但是没有权限,无法禁言",
|
||||
action_done=True,
|
||||
)
|
||||
|
||||
# 不发送错误消息,静默拒绝
|
||||
return False, permission_error
|
||||
|
||||
result_status, result_message = await generator_api.rewrite_reply(
|
||||
chat_stream=self.chat_stream,
|
||||
reply_data={
|
||||
"raw_reply": message,
|
||||
"reason": reason,
|
||||
},
|
||||
)
|
||||
|
||||
if result_status:
|
||||
for reply_seg in result_message:
|
||||
data = reply_seg[1]
|
||||
await self.send_text(data)
|
||||
|
||||
# 发送群聊禁言命令
|
||||
success = await self.send_command(
|
||||
command_name="GROUP_BAN", args={"qq_id": str(user_id), "duration": str(duration_int)}, storage_message=False
|
||||
)
|
||||
|
||||
if success:
|
||||
logger.info(f"{self.log_prefix} 成功发送禁言命令,用户 {target}({user_id}),时长 {duration_int} 秒")
|
||||
# 存储动作信息
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=f"尝试禁言了用户 {target},时长 {time_str},原因:{reason}",
|
||||
action_done=True,
|
||||
)
|
||||
return True, f"成功禁言 {target},时长 {time_str}"
|
||||
else:
|
||||
error_msg = "发送禁言命令失败"
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
|
||||
await self.send_text("执行禁言动作失败")
|
||||
return False, error_msg
|
||||
|
||||
def _get_template_message(self, target: str, duration_str: str, reason: str) -> str:
|
||||
"""获取模板化的禁言消息"""
|
||||
templates = self.get_config("mute.templates")
|
||||
|
||||
template = random.choice(templates)
|
||||
return template.format(target=target, duration=duration_str, reason=reason)
|
||||
|
||||
def _format_duration(self, seconds: int) -> str:
|
||||
"""将秒数格式化为可读的时间字符串"""
|
||||
if seconds < 60:
|
||||
return f"{seconds}秒"
|
||||
elif seconds < 3600:
|
||||
minutes = seconds // 60
|
||||
remaining_seconds = seconds % 60
|
||||
if remaining_seconds > 0:
|
||||
return f"{minutes}分{remaining_seconds}秒"
|
||||
else:
|
||||
return f"{minutes}分钟"
|
||||
elif seconds < 86400:
|
||||
hours = seconds // 3600
|
||||
remaining_minutes = (seconds % 3600) // 60
|
||||
if remaining_minutes > 0:
|
||||
return f"{hours}小时{remaining_minutes}分钟"
|
||||
else:
|
||||
return f"{hours}小时"
|
||||
else:
|
||||
days = seconds // 86400
|
||||
remaining_hours = (seconds % 86400) // 3600
|
||||
if remaining_hours > 0:
|
||||
return f"{days}天{remaining_hours}小时"
|
||||
else:
|
||||
return f"{days}天"
|
||||
|
||||
|
||||
# ===== Command组件 =====
|
||||
|
||||
|
||||
class MuteCommand(BaseCommand):
|
||||
"""禁言命令 - 手动执行禁言操作"""
|
||||
|
||||
# Command基本信息
|
||||
command_name = "mute_command"
|
||||
command_description = "禁言命令,手动执行禁言操作"
|
||||
|
||||
command_pattern = r"^/mute\s+(?P<target>\S+)\s+(?P<duration>\d+)(?:\s+(?P<reason>.+))?$"
|
||||
command_help = "禁言指定用户,用法:/mute <用户名> <时长(秒)> [理由]"
|
||||
command_examples = ["/mute 用户名 300", "/mute 张三 600 刷屏", "/mute @某人 1800 违规内容"]
|
||||
intercept_message = True # 拦截消息处理
|
||||
|
||||
def _check_user_permission(self) -> Tuple[bool, Optional[str]]:
|
||||
"""检查当前用户是否有禁言命令权限
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[str]]: (是否有权限, 错误信息)
|
||||
"""
|
||||
# 获取当前用户信息
|
||||
chat_stream = self.message.chat_stream
|
||||
if not chat_stream:
|
||||
return False, "无法获取聊天流信息"
|
||||
|
||||
current_platform = chat_stream.platform
|
||||
current_user_id = str(chat_stream.user_info.user_id)
|
||||
|
||||
# 获取权限配置
|
||||
allowed_users = self.get_config("permissions.allowed_users", [])
|
||||
|
||||
# 如果配置为空,表示不启用权限控制
|
||||
if not allowed_users:
|
||||
logger.info(f"{self.log_prefix} 用户权限未配置,允许所有用户使用禁言命令")
|
||||
return True, None
|
||||
|
||||
# 检查当前用户是否在允许列表中
|
||||
current_user_key = f"{current_platform}:{current_user_id}"
|
||||
for allowed_user in allowed_users:
|
||||
if allowed_user == current_user_key:
|
||||
logger.info(f"{self.log_prefix} 用户 {current_user_key} 有禁言命令权限")
|
||||
return True, None
|
||||
|
||||
logger.warning(f"{self.log_prefix} 用户 {current_user_key} 没有禁言命令权限")
|
||||
return False, "你没有使用禁言命令的权限"
|
||||
|
||||
async def execute(self) -> Tuple[bool, Optional[str]]:
|
||||
"""执行禁言命令"""
|
||||
try:
|
||||
# 首先检查用户权限
|
||||
has_permission, permission_error = self._check_user_permission()
|
||||
if not has_permission:
|
||||
logger.error(f"{self.log_prefix} 权限检查失败: {permission_error}")
|
||||
await self.send_text(f"❌ {permission_error}")
|
||||
return False, permission_error
|
||||
|
||||
target = self.matched_groups.get("target")
|
||||
duration = self.matched_groups.get("duration")
|
||||
reason = self.matched_groups.get("reason", "管理员操作")
|
||||
|
||||
if not all([target, duration]):
|
||||
await self.send_text("❌ 命令参数不完整,请检查格式")
|
||||
return False, "参数不完整"
|
||||
|
||||
# 获取时长限制配置
|
||||
min_duration = self.get_config("mute.min_duration", 60)
|
||||
max_duration = self.get_config("mute.max_duration", 2592000)
|
||||
|
||||
# 验证时长
|
||||
try:
|
||||
duration_int = int(duration)
|
||||
if duration_int <= 0:
|
||||
await self.send_text("❌ 禁言时长必须大于0")
|
||||
return False, "时长无效"
|
||||
|
||||
# 限制禁言时长范围
|
||||
if duration_int < min_duration:
|
||||
duration_int = min_duration
|
||||
await self.send_text(f"⚠️ 禁言时长过短,调整为{min_duration}秒")
|
||||
elif duration_int > max_duration:
|
||||
duration_int = max_duration
|
||||
await self.send_text(f"⚠️ 禁言时长过长,调整为{max_duration}秒")
|
||||
|
||||
except ValueError:
|
||||
await self.send_text("❌ 禁言时长必须是数字")
|
||||
return False, "时长格式错误"
|
||||
|
||||
# 获取用户ID
|
||||
person_id = person_api.get_person_id_by_name(target)
|
||||
user_id = person_api.get_person_value(person_id, "user_id")
|
||||
if not user_id:
|
||||
error_msg = f"未找到用户 {target} 的ID"
|
||||
await self.send_text(f"❌ 找不到用户: {target}")
|
||||
logger.error(f"{self.log_prefix} {error_msg}")
|
||||
return False, error_msg
|
||||
|
||||
# 格式化时长显示
|
||||
enable_formatting = self.get_config("mute.enable_duration_formatting", True)
|
||||
time_str = self._format_duration(duration_int) if enable_formatting else f"{duration_int}秒"
|
||||
|
||||
logger.info(f"{self.log_prefix} 执行禁言命令: {target}({user_id}) -> {time_str}")
|
||||
|
||||
# 发送群聊禁言命令
|
||||
success = await self.send_command(
|
||||
command_name="GROUP_BAN",
|
||||
args={"qq_id": str(user_id), "duration": str(duration_int)},
|
||||
display_message=f"禁言了 {target} {time_str}",
|
||||
)
|
||||
|
||||
if success:
|
||||
# 获取并发送模板化消息
|
||||
message = self._get_template_message(target, time_str, reason)
|
||||
await self.send_text(message)
|
||||
|
||||
logger.info(f"{self.log_prefix} 成功禁言 {target}({user_id}),时长 {duration_int} 秒")
|
||||
return True, f"成功禁言 {target},时长 {time_str}"
|
||||
else:
|
||||
await self.send_text("❌ 发送禁言命令失败")
|
||||
return False, "发送禁言命令失败"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 禁言命令执行失败: {e}")
|
||||
await self.send_text(f"❌ 禁言命令错误: {str(e)}")
|
||||
return False, str(e)
|
||||
|
||||
def _get_template_message(self, target: str, duration_str: str, reason: str) -> str:
|
||||
"""获取模板化的禁言消息"""
|
||||
templates = self.get_config("mute.templates")
|
||||
|
||||
template = random.choice(templates)
|
||||
return template.format(target=target, duration=duration_str, reason=reason)
|
||||
|
||||
def _format_duration(self, seconds: int) -> str:
|
||||
"""将秒数格式化为可读的时间字符串"""
|
||||
if seconds < 60:
|
||||
return f"{seconds}秒"
|
||||
elif seconds < 3600:
|
||||
minutes = seconds // 60
|
||||
remaining_seconds = seconds % 60
|
||||
if remaining_seconds > 0:
|
||||
return f"{minutes}分{remaining_seconds}秒"
|
||||
else:
|
||||
return f"{minutes}分钟"
|
||||
elif seconds < 86400:
|
||||
hours = seconds // 3600
|
||||
remaining_minutes = (seconds % 3600) // 60
|
||||
if remaining_minutes > 0:
|
||||
return f"{hours}小时{remaining_minutes}分钟"
|
||||
else:
|
||||
return f"{hours}小时"
|
||||
else:
|
||||
days = seconds // 86400
|
||||
remaining_hours = (seconds % 86400) // 3600
|
||||
if remaining_hours > 0:
|
||||
return f"{days}天{remaining_hours}小时"
|
||||
else:
|
||||
return f"{days}天"
|
||||
|
||||
|
||||
# ===== 插件主类 =====
|
||||
|
||||
|
||||
@register_plugin
|
||||
class MutePlugin(BasePlugin):
|
||||
"""禁言插件
|
||||
|
||||
提供智能禁言功能:
|
||||
- 智能禁言Action:基于LLM判断是否需要禁言(支持群组权限控制)
|
||||
- 禁言命令Command:手动执行禁言操作(支持用户权限控制)
|
||||
"""
|
||||
|
||||
# 插件基本信息
|
||||
plugin_name = "mute_plugin" # 内部标识符
|
||||
enable_plugin = True
|
||||
config_file_name = "config.toml"
|
||||
|
||||
# 配置节描述
|
||||
config_section_descriptions = {
|
||||
"plugin": "插件基本信息配置",
|
||||
"components": "组件启用控制",
|
||||
"permissions": "权限管理配置",
|
||||
"mute": "核心禁言功能配置",
|
||||
"smart_mute": "智能禁言Action的专属配置",
|
||||
"mute_command": "禁言命令Command的专属配置",
|
||||
"logging": "日志记录相关配置",
|
||||
}
|
||||
|
||||
# 配置Schema定义
|
||||
config_schema = {
|
||||
"plugin": {
|
||||
"enabled": ConfigField(type=bool, default=False, description="是否启用插件"),
|
||||
"config_version": ConfigField(type=str, default="0.0.2", description="配置文件版本"),
|
||||
},
|
||||
"components": {
|
||||
"enable_smart_mute": ConfigField(type=bool, default=True, description="是否启用智能禁言Action"),
|
||||
"enable_mute_command": ConfigField(type=bool, default=False, description="是否启用禁言命令Command"),
|
||||
},
|
||||
"permissions": {
|
||||
"allowed_users": ConfigField(
|
||||
type=list,
|
||||
default=[],
|
||||
description="允许使用禁言命令的用户列表,格式:['platform:user_id'],如['qq:123456789']。空列表表示不启用权限控制",
|
||||
),
|
||||
"allowed_groups": ConfigField(
|
||||
type=list,
|
||||
default=[],
|
||||
description="允许使用禁言动作的群组列表,格式:['platform:group_id'],如['qq:987654321']。空列表表示不启用权限控制",
|
||||
),
|
||||
},
|
||||
"mute": {
|
||||
"min_duration": ConfigField(type=int, default=60, description="最短禁言时长(秒)"),
|
||||
"max_duration": ConfigField(type=int, default=2592000, description="最长禁言时长(秒),默认30天"),
|
||||
"default_duration": ConfigField(type=int, default=300, description="默认禁言时长(秒),默认5分钟"),
|
||||
"enable_duration_formatting": ConfigField(
|
||||
type=bool, default=True, description="是否启用人性化的时长显示(如 '5分钟' 而非 '300秒')"
|
||||
),
|
||||
"log_mute_history": ConfigField(type=bool, default=True, description="是否记录禁言历史(未来功能)"),
|
||||
"templates": ConfigField(
|
||||
type=list,
|
||||
default=[
|
||||
"好的,禁言 {target} {duration},理由:{reason}",
|
||||
"收到,对 {target} 执行禁言 {duration},因为{reason}",
|
||||
"明白了,禁言 {target} {duration},原因是{reason}",
|
||||
"哇哈哈哈哈哈,已禁言 {target} {duration},理由:{reason}",
|
||||
"哎呦我去,对 {target} 执行禁言 {duration},因为{reason}",
|
||||
"{target},你完蛋了,我要禁言你 {duration} 秒,原因:{reason}",
|
||||
],
|
||||
description="成功禁言后发送的随机消息模板",
|
||||
),
|
||||
"error_messages": ConfigField(
|
||||
type=list,
|
||||
default=[
|
||||
"没有指定禁言对象呢~",
|
||||
"没有指定禁言时长呢~",
|
||||
"禁言时长必须是正数哦~",
|
||||
"禁言时长必须是数字哦~",
|
||||
"找不到 {target} 这个人呢~",
|
||||
"查找用户信息时出现问题~",
|
||||
],
|
||||
description="执行禁言过程中发生错误时发送的随机消息模板",
|
||||
),
|
||||
},
|
||||
"smart_mute": {
|
||||
"strict_mode": ConfigField(type=bool, default=True, description="LLM判定的严格模式"),
|
||||
"keyword_sensitivity": ConfigField(
|
||||
type=str, default="normal", description="关键词激活的敏感度", choices=["low", "normal", "high"]
|
||||
),
|
||||
"allow_parallel": ConfigField(type=bool, default=False, description="是否允许并行执行(暂未启用)"),
|
||||
},
|
||||
"mute_command": {
|
||||
"max_batch_size": ConfigField(type=int, default=5, description="最大批量禁言数量(未来功能)"),
|
||||
"cooldown_seconds": ConfigField(type=int, default=3, description="命令冷却时间(秒)"),
|
||||
},
|
||||
"logging": {
|
||||
"level": ConfigField(
|
||||
type=str, default="INFO", description="日志记录级别", choices=["DEBUG", "INFO", "WARNING", "ERROR"]
|
||||
),
|
||||
"prefix": ConfigField(type=str, default="[MutePlugin]", description="日志记录前缀"),
|
||||
"include_user_info": ConfigField(type=bool, default=True, description="日志中是否包含用户信息"),
|
||||
"include_duration_info": ConfigField(type=bool, default=True, description="日志中是否包含禁言时长信息"),
|
||||
},
|
||||
}
|
||||
|
||||
def get_plugin_components(self) -> List[Tuple[ComponentInfo, Type]]:
|
||||
"""返回插件包含的组件列表"""
|
||||
|
||||
# 从配置获取组件启用状态
|
||||
enable_smart_mute = self.get_config("components.enable_smart_mute", True)
|
||||
enable_mute_command = self.get_config("components.enable_mute_command", True)
|
||||
|
||||
components = []
|
||||
|
||||
# 添加智能禁言Action
|
||||
if enable_smart_mute:
|
||||
components.append((MuteAction.get_action_info(), MuteAction))
|
||||
|
||||
# 添加禁言命令Command
|
||||
if enable_mute_command:
|
||||
components.append((MuteCommand.get_command_info(), MuteCommand))
|
||||
|
||||
return components
|
||||
@@ -36,6 +36,11 @@ class SearchKnowledgeFromLPMMTool(BaseTool):
|
||||
query = function_args.get("query")
|
||||
# threshold = function_args.get("threshold", 0.4)
|
||||
|
||||
# 检查LPMM知识库是否启用
|
||||
if qa_manager is None:
|
||||
logger.debug("LPMM知识库已禁用,跳过知识获取")
|
||||
return {"type": "info", "id": query, "content": "LPMM知识库已禁用"}
|
||||
|
||||
# 调用知识库搜索
|
||||
|
||||
knowledge_info = qa_manager.get_knowledge(query)
|
||||
|
||||
@@ -6,6 +6,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.chat.utils.json_utils import process_llm_tool_calls
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
|
||||
logger = get_logger("tool_executor")
|
||||
|
||||
@@ -42,7 +43,9 @@ class ToolExecutor:
|
||||
cache_ttl: 缓存生存时间(周期数)
|
||||
"""
|
||||
self.chat_id = chat_id
|
||||
self.log_prefix = f"[ToolExecutor:{self.chat_id}] "
|
||||
self.chat_stream = get_chat_manager().get_stream(self.chat_id)
|
||||
self.log_prefix = f"[{get_chat_manager().get_stream_name(self.chat_id) or self.chat_id}]"
|
||||
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.model.tool_use,
|
||||
request_type="tool_executor",
|
||||
@@ -125,6 +128,7 @@ class ToolExecutor:
|
||||
if tool_results:
|
||||
self._set_cache(cache_key, tool_results)
|
||||
|
||||
if used_tools:
|
||||
logger.info(f"{self.log_prefix}工具执行完成,共执行{len(used_tools)}个工具: {used_tools}")
|
||||
|
||||
if return_details:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[inner]
|
||||
version = "3.1.0"
|
||||
version = "3.6.0"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请在修改后将version的值进行变更
|
||||
@@ -61,12 +61,15 @@ enable_relationship = true # 是否启用关系系统
|
||||
relation_frequency = 1 # 关系频率,麦麦构建关系的速度,仅在normal_chat模式下有效
|
||||
|
||||
[chat] #麦麦的聊天通用设置
|
||||
chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,在普通模式和专注模式之间自动切换
|
||||
# chat_mode = "focus"
|
||||
# chat_mode = "auto"
|
||||
chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式:focus,auto模式:在普通模式和专注模式之间自动切换
|
||||
auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天
|
||||
exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天
|
||||
# 普通模式下,麦麦会针对感兴趣的消息进行回复,token消耗量较低
|
||||
# 专注模式下,麦麦会进行主动的观察,并给出回复,token消耗量略高,但是回复时机更准确
|
||||
# 自动模式下,麦麦会根据消息内容自动切换到专注模式或普通模式
|
||||
|
||||
max_context_size = 18 # 上下文长度
|
||||
|
||||
thinking_timeout = 20 # 麦麦一次回复最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
replyer_random_probability = 0.5 # 首要replyer模型被选择的概率
|
||||
|
||||
talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁
|
||||
@@ -96,11 +99,6 @@ talk_frequency_adjust = [
|
||||
# - 时间支持跨天,例如 "00:10,0.3" 表示从凌晨0:10开始使用频率0.3
|
||||
# - 系统会自动将 "platform:id:type" 转换为内部的哈希chat_id进行匹配
|
||||
|
||||
auto_focus_threshold = 1 # 自动切换到专注聊天的阈值,越低越容易进入专注聊天
|
||||
exit_focus_threshold = 1 # 自动退出专注聊天的阈值,越低越容易退出专注聊天
|
||||
# 普通模式下,麦麦会针对感兴趣的消息进行回复,token消耗量较低
|
||||
# 专注模式下,麦麦会进行主动的观察和回复,并给出回复,token消耗量较高
|
||||
# 自动模式下,麦麦会根据消息内容自动切换到专注模式或普通模式
|
||||
|
||||
[message_receive]
|
||||
# 以下是消息过滤,可以根据规则过滤特定消息,将不会读取这些消息
|
||||
@@ -116,33 +114,24 @@ ban_msgs_regex = [
|
||||
|
||||
[normal_chat] #普通聊天
|
||||
#一般回复参数
|
||||
replyer_random_probability = 0.5 # 麦麦回答时选择首要模型的概率(与之相对的,次要模型的概率为1 - replyer_random_probability)
|
||||
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发
|
||||
thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
|
||||
willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现)
|
||||
|
||||
response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数
|
||||
|
||||
emoji_response_penalty = 0 # 对其他人发的表情包回复惩罚系数,设为0为不回复单个表情包,减少单独回复表情包的概率
|
||||
mentioned_bot_inevitable_reply = true # 提及 bot 必然回复
|
||||
at_bot_inevitable_reply = true # @bot 必然回复(包含提及)
|
||||
|
||||
enable_planner = false # 是否启用动作规划器(与focus_chat共享actions)
|
||||
|
||||
enable_planner = true # 是否启用动作规划器(与focus_chat共享actions)
|
||||
|
||||
[focus_chat] #专注聊天
|
||||
think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
|
||||
consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高
|
||||
compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
working_memory_processor = false # 是否启用工作记忆处理器,消耗量大
|
||||
|
||||
[tool]
|
||||
enable_in_normal_chat = false # 是否在普通聊天中启用工具
|
||||
enable_in_focus_chat = true # 是否在专注聊天中启用工具
|
||||
|
||||
[emoji]
|
||||
emoji_chance = 0.6 # 麦麦激活表情包动作的概率
|
||||
emoji_activate_type = "random" # 表情包激活类型,可选:random,llm ; random下,表情包动作随机启用,llm下,表情包动作根据llm判断是否启用
|
||||
|
||||
max_reg_num = 60 # 表情包最大注册数量
|
||||
do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
|
||||
check_interval = 10 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
|
||||
@@ -169,7 +158,7 @@ consolidation_check_percentage = 0.05 # 检查节点比例
|
||||
#不希望记忆的词,已经记忆的不会受到影响,需要手动清理
|
||||
memory_ban_words = [ "表情包", "图片", "回复", "聊天记录" ]
|
||||
|
||||
[mood] # 仅在 普通聊天 有效
|
||||
[mood] # 暂时不再有效,请不要使用
|
||||
enable_mood = false # 是否启用情绪系统
|
||||
mood_update_interval = 1.0 # 情绪更新间隔 单位秒
|
||||
mood_decay_rate = 0.95 # 情绪衰减率
|
||||
@@ -230,7 +219,7 @@ console_log_level = "INFO" # 控制台日志级别,可选: DEBUG, INFO, WARNIN
|
||||
file_log_level = "DEBUG" # 文件日志级别,可选: DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
|
||||
# 第三方库日志控制
|
||||
suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn"] # 完全屏蔽的库
|
||||
suppress_libraries = ["faiss","httpx", "urllib3", "asyncio", "websockets", "httpcore", "requests", "peewee", "openai","uvicorn","jieba"] # 完全屏蔽的库
|
||||
library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别
|
||||
|
||||
#下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env自定义的宏,使用自定义模型则选择定位相似的模型自己填写
|
||||
@@ -242,8 +231,13 @@ library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别
|
||||
# enable_thinking = <true|false> : 用于指定模型是否启用思考
|
||||
# thinking_budget = <int> : 用于指定模型思考最长长度
|
||||
|
||||
[debug]
|
||||
show_prompt = false # 是否显示prompt
|
||||
debug_show_chat_mode = false # 是否在回复后显示当前聊天模式
|
||||
|
||||
|
||||
[model]
|
||||
model_max_output_length = 800 # 模型单次返回的最大token数
|
||||
model_max_output_length = 1000 # 模型单次返回的最大token数
|
||||
|
||||
#------------必填:组件模型------------
|
||||
|
||||
@@ -273,11 +267,12 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||
|
||||
[model.replyer_2] # 次要回复模型
|
||||
name = "Pro/deepseek-ai/DeepSeek-R1"
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗)
|
||||
pri_out = 16.0 #模型的输出价格(非必填,可以记录消耗)
|
||||
temp = 0.7
|
||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
||||
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||
|
||||
|
||||
[model.memory_summary] # 记忆的概括模型
|
||||
@@ -376,7 +371,6 @@ key_file = "" # SSL密钥文件路径,仅在use_wss=true时有效
|
||||
enable = true
|
||||
|
||||
[experimental] #实验性功能
|
||||
debug_show_chat_mode = false # 是否在回复后显示当前聊天模式
|
||||
enable_friend_chat = false # 是否启用好友聊天
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user