Merge branch 'dev' into dev-api-ada to resolve conflicts

This commit is contained in:
UnCLAS-Prommer
2025-07-29 10:22:43 +08:00
92 changed files with 3980 additions and 6492 deletions

View File

@@ -2,7 +2,7 @@ import asyncio
import time
import traceback
import random
from typing import List, Optional, Dict, Any
from typing import List, Optional, Dict, Any, Tuple
from rich.traceback import install
from src.config.config import global_config
@@ -18,11 +18,12 @@ from src.chat.chat_loop.hfc_utils import CycleDetail
from src.person_info.relationship_builder_manager import relationship_builder_manager
from src.person_info.person_info import get_person_info_manager
from src.plugin_system.base.component_types import ActionInfo, ChatMode
from src.plugin_system.apis import generator_api, send_api, message_api
from src.plugin_system.apis import generator_api, send_api, message_api, database_api
from src.chat.willing.willing_manager import get_willing_manager
from src.mais4u.mai_think import mai_thinking_manager
from maim_message.message_base import GroupInfo
from src.mais4u.constant_s4u import ENABLE_S4U
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
from src.chat.chat_loop.hfc_utils import send_typing, stop_typing
ERROR_LOOP_INFO = {
"loop_plan_info": {
@@ -88,11 +89,6 @@ class HeartFChatting:
self.loop_mode = ChatMode.NORMAL # 初始循环模式为普通模式
# 新增:消息计数器和疲惫阈值
self._message_count = 0 # 发送的消息计数
self._message_threshold = max(10, int(30 * global_config.chat.focus_value))
self._fatigue_triggered = False # 是否已触发疲惫退出
self.action_manager = ActionManager()
self.action_planner = ActionPlanner(chat_id=self.stream_id, action_manager=self.action_manager)
self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id)
@@ -112,7 +108,6 @@ class HeartFChatting:
self.last_read_time = time.time() - 1
self.willing_amplifier = 1
self.willing_manager = get_willing_manager()
logger.info(f"{self.log_prefix} HeartFChatting 初始化完成")
@@ -182,6 +177,9 @@ class HeartFChatting:
if self.loop_mode == ChatMode.NORMAL:
self.energy_value -= 0.3
self.energy_value = max(self.energy_value, 0.3)
if self.loop_mode == ChatMode.FOCUS:
self.energy_value -= 0.6
self.energy_value = max(self.energy_value, 0.3)
def print_cycle_info(self, cycle_timers):
# 记录循环信息和计时器结果
@@ -200,9 +198,9 @@ class HeartFChatting:
async def _loopbody(self):
if self.loop_mode == ChatMode.FOCUS:
if await self._observe():
self.energy_value -= 1 * global_config.chat.focus_value
self.energy_value -= 1 / global_config.chat.focus_value
else:
self.energy_value -= 3 * global_config.chat.focus_value
self.energy_value -= 3 / global_config.chat.focus_value
if self.energy_value <= 1:
self.energy_value = 1
self.loop_mode = ChatMode.NORMAL
@@ -218,15 +216,17 @@ class HeartFChatting:
limit_mode="earliest",
filter_bot=True,
)
if global_config.chat.focus_value != 0:
if len(new_messages_data) > 3 / pow(global_config.chat.focus_value, 0.5):
self.loop_mode = ChatMode.FOCUS
self.energy_value = (
10 + (len(new_messages_data) / (3 / pow(global_config.chat.focus_value, 0.5))) * 10
)
return True
if len(new_messages_data) > 3 * global_config.chat.focus_value:
self.loop_mode = ChatMode.FOCUS
self.energy_value = 10 + (len(new_messages_data) / (3 * global_config.chat.focus_value)) * 10
return True
if self.energy_value >= 30 * global_config.chat.focus_value:
self.loop_mode = ChatMode.FOCUS
return True
if self.energy_value >= 30:
self.loop_mode = ChatMode.FOCUS
return True
if new_messages_data:
earliest_messages_data = new_messages_data[0]
@@ -235,10 +235,10 @@ class HeartFChatting:
if_think = await self.normal_response(earliest_messages_data)
if if_think:
factor = max(global_config.chat.focus_value, 0.1)
self.energy_value *= 1.1 / factor
self.energy_value *= 1.1 * factor
logger.info(f"{self.log_prefix} 进行了思考,能量值按倍数增加,当前能量值:{self.energy_value:.1f}")
else:
self.energy_value += 0.1 / global_config.chat.focus_value
self.energy_value += 0.1 * global_config.chat.focus_value
logger.debug(f"{self.log_prefix} 没有进行思考,能量值线性增加,当前能量值:{self.energy_value:.1f}")
logger.debug(f"{self.log_prefix} 当前能量值:{self.energy_value:.1f}")
@@ -257,44 +257,69 @@ class HeartFChatting:
person_name = await person_info_manager.get_value(person_id, "person_name")
return f"{person_name}:{message_data.get('processed_plain_text')}"
async def send_typing(self):
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
async def _send_and_store_reply(
self,
response_set,
reply_to_str,
loop_start_time,
action_message,
cycle_timers: Dict[str, float],
thinking_id,
plan_result,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
with Timer("回复发送", cycle_timers):
reply_text = await self._send_response(response_set, reply_to_str, loop_start_time, action_message)
chat = await get_chat_manager().get_or_create_stream(
platform="amaidesu_default",
user_info=None,
group_info=group_info,
# 存储reply action信息
person_info_manager = get_person_info_manager()
person_id = person_info_manager.get_person_id(
action_message.get("chat_info_platform", ""),
action_message.get("user_id", ""),
)
person_name = await person_info_manager.get_value(person_id, "person_name")
action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
await database_api.store_action_info(
chat_stream=self.chat_stream,
action_build_into_prompt=False,
action_prompt_display=action_prompt_display,
action_done=True,
thinking_id=thinking_id,
action_data={"reply_text": reply_text, "reply_to": reply_to_str},
action_name="reply",
)
await send_api.custom_to_stream(
message_type="state", content="typing", stream_id=chat.stream_id, storage_message=False
)
# 构建循环信息
loop_info: Dict[str, Any] = {
"loop_plan_info": {
"action_result": plan_result.get("action_result", {}),
},
"loop_action_info": {
"action_taken": True,
"reply_text": reply_text,
"command": "",
"taken_time": time.time(),
},
}
async def stop_typing(self):
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
chat = await get_chat_manager().get_or_create_stream(
platform="amaidesu_default",
user_info=None,
group_info=group_info,
)
await send_api.custom_to_stream(
message_type="state", content="stop_typing", stream_id=chat.stream_id, storage_message=False
)
return loop_info, reply_text, cycle_timers
async def _observe(self, message_data: Optional[Dict[str, Any]] = None):
# sourcery skip: hoist-statement-from-if, merge-comparisons, reintroduce-else
if not message_data:
message_data = {}
action_type = "no_action"
reply_text = "" # 初始化reply_text变量避免UnboundLocalError
gen_task = None # 初始化gen_task变量避免UnboundLocalError
reply_to_str = "" # 初始化reply_to_str变量
# 创建新的循环信息
cycle_timers, thinking_id = self.start_cycle()
logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考[模式:{self.loop_mode}]")
if ENABLE_S4U:
await self.send_typing()
await send_typing()
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
loop_start_time = time.time()
@@ -310,95 +335,254 @@ class HeartFChatting:
except Exception as e:
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
# 如果normal开始一个回复生成进程先准备好回复其实是和planer同时进行的
# 检查是否在normal模式下没有可用动作除了reply相关动作
skip_planner = False
if self.loop_mode == ChatMode.NORMAL:
reply_to_str = await self.build_reply_to_str(message_data)
gen_task = asyncio.create_task(self._generate_response(message_data, available_actions, reply_to_str))
# 过滤掉reply相关的动作检查是否还有其他动作
non_reply_actions = {
k: v for k, v in available_actions.items() if k not in ["reply", "no_reply", "no_action"]
}
with Timer("规划器", cycle_timers):
plan_result, target_message = await self.action_planner.plan(mode=self.loop_mode)
if not non_reply_actions:
skip_planner = True
logger.info(f"{self.log_prefix} Normal模式下没有可用动作直接回复")
action_result: dict = plan_result.get("action_result", {}) # type: ignore
action_type, action_data, reasoning, is_parallel = (
action_result.get("action_type", "error"),
action_result.get("action_data", {}),
action_result.get("reasoning", "未提供理由"),
action_result.get("is_parallel", True),
)
# 直接设置为reply动作
action_type = "reply"
reasoning = ""
action_data = {"loop_start_time": loop_start_time}
is_parallel = False
action_data["loop_start_time"] = loop_start_time
# 构建plan_result用于后续处理
plan_result = {
"action_result": {
"action_type": action_type,
"action_data": action_data,
"reasoning": reasoning,
"timestamp": time.time(),
"is_parallel": is_parallel,
},
"action_prompt": "",
}
target_message = message_data
if self.loop_mode == ChatMode.NORMAL:
if action_type == "no_action":
logger.info(f"[{self.log_prefix}] {global_config.bot.nickname} 决定进行回复")
elif is_parallel:
logger.info(
f"[{self.log_prefix}] {global_config.bot.nickname} 决定进行回复, 同时执行{action_type}动作"
# 如果normal模式且不跳过规划器开始一个回复生成进程先准备好回复其实是和planer同时进行的
if not skip_planner:
reply_to_str = await self.build_reply_to_str(message_data)
gen_task = asyncio.create_task(
self._generate_response(
message_data=message_data,
available_actions=available_actions,
reply_to=reply_to_str,
request_type="chat.replyer.normal",
)
)
else:
logger.info(f"[{self.log_prefix}] {global_config.bot.nickname} 决定执行{action_type}动作")
if action_type == "no_action":
if not skip_planner:
with Timer("规划器", cycle_timers):
plan_result, target_message = await self.action_planner.plan(mode=self.loop_mode)
action_result: Dict[str, Any] = plan_result.get("action_result", {}) # type: ignore
action_type, action_data, reasoning, is_parallel = (
action_result.get("action_type", "error"),
action_result.get("action_data", {}),
action_result.get("reasoning", "未提供理由"),
action_result.get("is_parallel", True),
)
action_data["loop_start_time"] = loop_start_time
if action_type == "reply":
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复")
elif is_parallel:
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复, 同时执行{action_type}动作")
else:
# 只有在gen_task存在时才进行相关操作
if gen_task:
if not gen_task.done():
gen_task.cancel()
logger.debug(f"{self.log_prefix} 已取消预生成的回复任务")
logger.info(
f"{self.log_prefix}{global_config.bot.nickname} 原本想要回复,但选择执行{action_type},不发表回复"
)
elif generation_result := gen_task.result():
content = " ".join([item[1] for item in generation_result if item[0] == "text"])
logger.debug(f"{self.log_prefix} 预生成的回复任务已完成")
logger.info(
f"{self.log_prefix}{global_config.bot.nickname} 原本想要回复:{content},但选择执行{action_type},不发表回复"
)
else:
logger.warning(f"{self.log_prefix} 预生成的回复任务未生成有效内容")
action_message: Dict[str, Any] = message_data or target_message # type: ignore
if action_type == "reply":
# 等待回复生成完毕
gather_timeout = global_config.chat.thinking_timeout
try:
response_set = await asyncio.wait_for(gen_task, timeout=gather_timeout)
except asyncio.TimeoutError:
response_set = None
if self.loop_mode == ChatMode.NORMAL:
# 只有在gen_task存在时才等待
if not gen_task:
reply_to_str = await self.build_reply_to_str(message_data)
gen_task = asyncio.create_task(
self._generate_response(
message_data=message_data,
available_actions=available_actions,
reply_to=reply_to_str,
request_type="chat.replyer.normal",
)
)
if response_set:
content = " ".join([item[1] for item in response_set if item[0] == "text"])
gather_timeout = global_config.chat.thinking_timeout
try:
response_set = await asyncio.wait_for(gen_task, timeout=gather_timeout)
except asyncio.TimeoutError:
logger.warning(f"{self.log_prefix} 回复生成超时>{global_config.chat.thinking_timeout}s已跳过")
response_set = None
# 模型炸了,没有回复内容生成
if not response_set:
logger.warning(f"[{self.log_prefix}] 模型未生成回复内容")
return False
elif action_type not in ["no_action"] and not is_parallel:
logger.info(
f"[{self.log_prefix}] {global_config.bot.nickname} 原本想要回复:{content},但选择执行{action_type},不发表回复"
)
return False
# 模型炸了或超时,没有回复内容生成
if not response_set:
logger.warning(f"{self.log_prefix}模型未生成回复内容")
return False
else:
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复 (focus模式)")
logger.info(f"[{self.log_prefix}] {global_config.bot.nickname} 决定的回复内容: {content}")
# 构建reply_to字符串
reply_to_str = await self.build_reply_to_str(action_message)
# 发送回复 (不再需要传入 chat)
reply_text = await self._send_response(response_set, reply_to_str, loop_start_time,message_data)
if ENABLE_S4U:
await self.stop_typing()
await mai_thinking_manager.get_mai_think(self.stream_id).do_think_after_response(reply_text)
# 生成回复
with Timer("回复生成", cycle_timers):
response_set = await self._generate_response(
message_data=action_message,
available_actions=available_actions,
reply_to=reply_to_str,
request_type="chat.replyer.focus",
)
if not response_set:
logger.warning(f"{self.log_prefix}模型未生成回复内容")
return False
loop_info, reply_text, cycle_timers = await self._send_and_store_reply(
response_set, reply_to_str, loop_start_time, action_message, cycle_timers, thinking_id, plan_result
)
return True
else:
action_message: Dict[str, Any] = message_data or target_message # type: ignore
# 并行执行:同时进行回复发送和动作执行
# 先置空防止未定义错误
background_reply_task = None
background_action_task = None
# 如果是并行执行且在normal模式下需要等待预生成的回复任务完成并发送回复
if self.loop_mode == ChatMode.NORMAL and is_parallel and gen_task:
# 动作执行计时
with Timer("动作执行", cycle_timers):
success, reply_text, command = await self._handle_action(
action_type, reasoning, action_data, cycle_timers, thinking_id, action_message
async def handle_reply_task() -> Tuple[Optional[Dict[str, Any]], str, Dict[str, float]]:
# 等待预生成的回复任务完成
gather_timeout = global_config.chat.thinking_timeout
try:
response_set = await asyncio.wait_for(gen_task, timeout=gather_timeout)
except asyncio.TimeoutError:
logger.warning(
f"{self.log_prefix} 并行执行:回复生成超时>{global_config.chat.thinking_timeout}s已跳过"
)
return None, "", {}
except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消")
return None, "", {}
if not response_set:
logger.warning(f"{self.log_prefix} 模型超时或生成回复内容为空")
return None, "", {}
reply_to_str = await self.build_reply_to_str(action_message)
loop_info, reply_text, cycle_timers_reply = await self._send_and_store_reply(
response_set,
reply_to_str,
loop_start_time,
action_message,
cycle_timers,
thinking_id,
plan_result,
)
return loop_info, reply_text, cycle_timers_reply
# 执行回复任务并赋值到变量
background_reply_task = asyncio.create_task(handle_reply_task())
# 动作执行任务
async def handle_action_task():
with Timer("动作执行", cycle_timers):
success, reply_text, command = await self._handle_action(
action_type, reasoning, action_data, cycle_timers, thinking_id, action_message
)
return success, reply_text, command
# 执行动作任务并赋值到变量
background_action_task = asyncio.create_task(handle_action_task())
reply_loop_info = None
reply_text_from_reply = ""
action_success = False
action_reply_text = ""
action_command = ""
# 并行执行所有任务
if background_reply_task:
results = await asyncio.gather(
background_reply_task, background_action_task, return_exceptions=True
)
# 处理回复任务结果
reply_result = results[0]
if isinstance(reply_result, BaseException):
logger.error(f"{self.log_prefix} 回复任务执行异常: {reply_result}")
elif reply_result and reply_result[0] is not None:
reply_loop_info, reply_text_from_reply, _ = reply_result
loop_info = {
"loop_plan_info": {
"action_result": plan_result.get("action_result", {}),
},
"loop_action_info": {
"action_taken": success,
"reply_text": reply_text,
"command": command,
"taken_time": time.time(),
},
}
# 处理动作任务结果
action_task_result = results[1]
if isinstance(action_task_result, BaseException):
logger.error(f"{self.log_prefix} 动作任务执行异常: {action_task_result}")
else:
action_success, action_reply_text, action_command = action_task_result
else:
results = await asyncio.gather(background_action_task, return_exceptions=True)
# 只有动作任务
action_task_result = results[0]
if isinstance(action_task_result, BaseException):
logger.error(f"{self.log_prefix} 动作任务执行异常: {action_task_result}")
else:
action_success, action_reply_text, action_command = action_task_result
if loop_info["loop_action_info"]["command"] == "stop_focus_chat":
logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天")
return False
# 停止该聊天模式的循环
# 构建最终的循环信息
if reply_loop_info:
# 如果有回复信息使用回复的loop_info作为基础
loop_info = reply_loop_info
# 更新动作执行信息
loop_info["loop_action_info"].update(
{
"action_taken": action_success,
"command": action_command,
"taken_time": time.time(),
}
)
reply_text = reply_text_from_reply
else:
# 没有回复信息构建纯动作的loop_info
loop_info = {
"loop_plan_info": {
"action_result": plan_result.get("action_result", {}),
},
"loop_action_info": {
"action_taken": action_success,
"reply_text": action_reply_text,
"command": action_command,
"taken_time": time.time(),
},
}
reply_text = action_reply_text
if ENABLE_S4U:
await stop_typing()
await mai_thinking_manager.get_mai_think(self.stream_id).do_think_after_response(reply_text)
self.end_cycle(loop_info, cycle_timers)
self.print_cycle_info(cycle_timers)
@@ -406,8 +590,16 @@ class HeartFChatting:
if self.loop_mode == ChatMode.NORMAL:
await self.willing_manager.after_generate_reply_handle(message_data.get("message_id", ""))
# 管理no_reply计数器当执行了非no_reply动作时重置计数器
if action_type != "no_reply" and action_type != "no_action":
# 导入NoReplyAction并重置计数器
NoReplyAction.reset_consecutive_count()
logger.info(f"{self.log_prefix} 执行了{action_type}动作重置no_reply计数器")
return True
elif action_type == "no_action":
# 当执行回复动作时也重置no_reply计数器s
NoReplyAction.reset_consecutive_count()
logger.info(f"{self.log_prefix} 执行了回复动作重置no_reply计数器")
return True
@@ -435,7 +627,7 @@ class HeartFChatting:
action: str,
reasoning: str,
action_data: dict,
cycle_timers: dict,
cycle_timers: Dict[str, float],
thinking_id: str,
action_message: dict,
) -> tuple[bool, str, str]:
@@ -501,7 +693,7 @@ class HeartFChatting:
"兴趣"模式下,判断是否回复并生成内容。
"""
interested_rate = (message_data.get("interest_value") or 0.0) * self.willing_amplifier
interested_rate = (message_data.get("interest_value") or 0.0) * global_config.chat.willing_amplifier
self.willing_manager.setup(message_data, self.chat_stream)
@@ -515,8 +707,8 @@ class HeartFChatting:
reply_probability += additional_config["maimcore_reply_probability_gain"]
reply_probability = min(max(reply_probability, 0), 1) # 确保概率在 0-1 之间
talk_frequency = global_config.chat.get_current_talk_frequency(self.stream_id)
reply_probability = talk_frequency * reply_probability
talk_frequency = global_config.chat.get_current_talk_frequency(self.stream_id)
reply_probability = talk_frequency * reply_probability
# 处理表情包
if message_data.get("is_emoji") or message_data.get("is_picid"):
@@ -544,7 +736,11 @@ class HeartFChatting:
return False
async def _generate_response(
self, message_data: dict, available_actions: Optional[Dict[str, ActionInfo]], reply_to: str
self,
message_data: dict,
available_actions: Optional[Dict[str, ActionInfo]],
reply_to: str,
request_type: str = "chat.replyer.normal",
) -> Optional[list]:
"""生成普通回复"""
try:
@@ -552,8 +748,8 @@ class HeartFChatting:
chat_stream=self.chat_stream,
reply_to=reply_to,
available_actions=available_actions,
enable_tool=global_config.tool.enable_in_normal_chat,
request_type="chat.replyer.normal",
enable_tool=global_config.tool.enable_tool,
request_type=request_type,
)
if not success or not reply_set:
@@ -563,10 +759,10 @@ class HeartFChatting:
return reply_set
except Exception as e:
logger.error(f"[{self.log_prefix}] 回复生成出现错误:{str(e)} {traceback.format_exc()}")
logger.error(f"{self.log_prefix}回复生成出现错误:{str(e)} {traceback.format_exc()}")
return None
async def _send_response(self, reply_set, reply_to, thinking_start_time, message_data):
async def _send_response(self, reply_set, reply_to, thinking_start_time, message_data) -> str:
current_time = time.time()
new_message_count = message_api.count_new_messages(
chat_id=self.chat_stream.stream_id, start_time=thinking_start_time, end_time=current_time
@@ -578,13 +774,9 @@ class HeartFChatting:
need_reply = new_message_count >= random.randint(2, 4)
if need_reply:
logger.info(
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复"
)
logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复")
else:
logger.debug(
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,不使用引用回复"
)
logger.info(f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,不使用引用回复")
reply_text = ""
first_replied = False

View File

@@ -1,10 +1,13 @@
import time
from typing import Optional, Dict, Any
from src.config.config import global_config
from src.common.message_repository import count_messages
from src.common.logger import get_logger
from src.chat.message_receive.chat_stream import get_chat_manager
from src.plugin_system.apis import send_api
from maim_message.message_base import GroupInfo
from src.common.message_repository import count_messages
logger = get_logger(__name__)
@@ -106,3 +109,30 @@ def get_recent_message_stats(minutes: float = 30, chat_id: Optional[str] = None)
bot_reply_count = count_messages(bot_filter)
return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count}
async def send_typing():
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
chat = await get_chat_manager().get_or_create_stream(
platform="amaidesu_default",
user_info=None,
group_info=group_info,
)
await send_api.custom_to_stream(
message_type="state", content="typing", stream_id=chat.stream_id, storage_message=False
)
async def stop_typing():
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
chat = await get_chat_manager().get_or_create_stream(
platform="amaidesu_default",
user_info=None,
group_info=group_info,
)
await send_api.custom_to_stream(
message_type="state", content="stop_typing", stream_id=chat.stream_id, storage_message=False
)

View File

@@ -525,9 +525,9 @@ class EmojiManager:
如果文件已被删除,则执行对象的删除方法并从列表中移除
"""
try:
if not self.emoji_objects:
logger.warning("[检查] emoji_objects为空跳过完整性检查")
return
# if not self.emoji_objects:
# logger.warning("[检查] emoji_objects为空跳过完整性检查")
# return
total_count = len(self.emoji_objects)
self.emoji_num = total_count
@@ -707,6 +707,38 @@ class EmojiManager:
return emoji
return None # 如果循环结束还没找到,则返回 None
async def get_emoji_description_by_hash(self, emoji_hash: str) -> Optional[str]:
"""根据哈希值获取已注册表情包的描述
Args:
emoji_hash: 表情包的哈希值
Returns:
Optional[str]: 表情包描述如果未找到则返回None
"""
try:
# 先从内存中查找
emoji = await self.get_emoji_from_manager(emoji_hash)
if emoji and emoji.description:
logger.info(f"[缓存命中] 从内存获取表情包描述: {emoji.description[:50]}...")
return emoji.description
# 如果内存中没有,从数据库查找
self._ensure_db()
try:
emoji_record = Emoji.get_or_none(Emoji.emoji_hash == emoji_hash)
if emoji_record and emoji_record.description:
logger.info(f"[缓存命中] 从数据库获取表情包描述: {emoji_record.description[:50]}...")
return emoji_record.description
except Exception as e:
logger.error(f"从数据库查询表情包描述时出错: {e}")
return None
except Exception as e:
logger.error(f"获取表情包描述失败 (Hash: {emoji_hash}): {str(e)}")
return None
async def delete_emoji(self, emoji_hash: str) -> bool:
"""根据哈希值删除表情包

View File

@@ -51,7 +51,7 @@ def init_prompt() -> None:
"想说明某个具体的事实观点,但懒得明说,或者不便明说,或表达一种默契",使用"懂的都懂"
"当涉及游戏相关时,表示意外的夸赞,略带戏谑意味"时,使用"这么强!"
注意不要总结你自己SELF的发言
注意不要总结你自己SELF的发言
现在请你概括
"""
Prompt(learn_style_prompt, "learn_style_prompt")
@@ -330,48 +330,8 @@ class ExpressionLearner:
"""
current_time = time.time()
# 全局衰减所有已存储的表达方式
for type in ["style", "grammar"]:
base_dir = os.path.join("data", "expression", f"learnt_{type}")
if not os.path.exists(base_dir):
logger.debug(f"目录不存在,跳过衰减: {base_dir}")
continue
try:
chat_ids = os.listdir(base_dir)
logger.debug(f"{base_dir} 中找到 {len(chat_ids)} 个聊天ID目录进行衰减")
except Exception as e:
logger.error(f"读取目录失败 {base_dir}: {e}")
continue
for chat_id in chat_ids:
file_path = os.path.join(base_dir, chat_id, "expressions.json")
if not os.path.exists(file_path):
continue
try:
with open(file_path, "r", encoding="utf-8") as f:
expressions = json.load(f)
if not isinstance(expressions, list):
logger.warning(f"表达方式文件格式错误,跳过衰减: {file_path}")
continue
# 应用全局衰减
decayed_expressions = self.apply_decay_to_expressions(expressions, current_time)
# 保存衰减后的结果
with open(file_path, "w", encoding="utf-8") as f:
json.dump(decayed_expressions, f, ensure_ascii=False, indent=2)
logger.debug(f"已对 {file_path} 应用衰减,剩余 {len(decayed_expressions)} 个表达方式")
except json.JSONDecodeError as e:
logger.error(f"JSON解析失败跳过衰减 {file_path}: {e}")
except PermissionError as e:
logger.error(f"权限不足,无法更新 {file_path}: {e}")
except Exception as e:
logger.error(f"全局衰减{type}表达方式失败 {file_path}: {e}")
continue
# 全局衰减所有已存储的表达方式(直接操作数据库)
self._apply_global_decay_to_database(current_time)
learnt_style: Optional[List[Tuple[str, str, str]]] = []
learnt_grammar: Optional[List[Tuple[str, str, str]]] = []
@@ -388,6 +348,42 @@ class ExpressionLearner:
return learnt_style, learnt_grammar
def _apply_global_decay_to_database(self, current_time: float) -> None:
"""
对数据库中的所有表达方式应用全局衰减
"""
try:
# 获取所有表达方式
all_expressions = Expression.select()
updated_count = 0
deleted_count = 0
for expr in all_expressions:
# 计算时间差
last_active = expr.last_active_time
time_diff_days = (current_time - last_active) / (24 * 3600) # 转换为天
# 计算衰减值
decay_value = self.calculate_decay_factor(time_diff_days)
new_count = max(0.01, expr.count - decay_value)
if new_count <= 0.01:
# 如果count太小删除这个表达方式
expr.delete_instance()
deleted_count += 1
else:
# 更新count
expr.count = new_count
expr.save()
updated_count += 1
if updated_count > 0 or deleted_count > 0:
logger.info(f"全局衰减完成:更新了 {updated_count} 个表达方式,删除了 {deleted_count} 个表达方式")
except Exception as e:
logger.error(f"数据库全局衰减失败: {e}")
def calculate_decay_factor(self, time_diff_days: float) -> float:
"""
计算衰减值
@@ -410,30 +406,6 @@ class ExpressionLearner:
return min(0.01, decay)
def apply_decay_to_expressions(
self, expressions: List[Dict[str, Any]], current_time: float
) -> List[Dict[str, Any]]:
"""
对表达式列表应用衰减
返回衰减后的表达式列表移除count小于0的项
"""
result = []
for expr in expressions:
# 确保last_active_time存在如果不存在则使用current_time
if "last_active_time" not in expr:
expr["last_active_time"] = current_time
last_active = expr["last_active_time"]
time_diff_days = (current_time - last_active) / (24 * 3600) # 转换为天
decay_value = self.calculate_decay_factor(time_diff_days)
expr["count"] = max(0.01, expr.get("count", 1) - decay_value)
if expr["count"] > 0:
result.append(expr)
return result
async def learn_and_store(self, type: str, num: int = 10) -> List[Tuple[str, str, str]]:
# sourcery skip: use-join
"""

View File

@@ -2,7 +2,7 @@ import json
import time
import random
from typing import List, Dict, Tuple, Optional
from typing import List, Dict, Tuple, Optional, Any
from json_repair import repair_json
from src.llm_models.utils_model import LLMRequest
@@ -117,36 +117,42 @@ class ExpressionSelector:
def get_random_expressions(
self, chat_id: str, total_num: int, style_percentage: float, grammar_percentage: float
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
# 支持多chat_id合并抽选
related_chat_ids = self.get_related_chat_ids(chat_id)
style_exprs = []
grammar_exprs = []
for cid in related_chat_ids:
style_query = Expression.select().where((Expression.chat_id == cid) & (Expression.type == "style"))
grammar_query = Expression.select().where((Expression.chat_id == cid) & (Expression.type == "grammar"))
style_exprs.extend([
{
"situation": expr.situation,
"style": expr.style,
"count": expr.count,
"last_active_time": expr.last_active_time,
"source_id": cid,
"type": "style",
"create_date": expr.create_date if expr.create_date is not None else expr.last_active_time,
} for expr in style_query
])
grammar_exprs.extend([
{
"situation": expr.situation,
"style": expr.style,
"count": expr.count,
"last_active_time": expr.last_active_time,
"source_id": cid,
"type": "grammar",
"create_date": expr.create_date if expr.create_date is not None else expr.last_active_time,
} for expr in grammar_query
])
# 优化一次性查询所有相关chat_id的表达方式
style_query = Expression.select().where(
(Expression.chat_id.in_(related_chat_ids)) & (Expression.type == "style")
)
grammar_query = Expression.select().where(
(Expression.chat_id.in_(related_chat_ids)) & (Expression.type == "grammar")
)
style_exprs = [
{
"situation": expr.situation,
"style": expr.style,
"count": expr.count,
"last_active_time": expr.last_active_time,
"source_id": expr.chat_id,
"type": "style",
"create_date": expr.create_date if expr.create_date is not None else expr.last_active_time,
} for expr in style_query
]
grammar_exprs = [
{
"situation": expr.situation,
"style": expr.style,
"count": expr.count,
"last_active_time": expr.last_active_time,
"source_id": expr.chat_id,
"type": "grammar",
"create_date": expr.create_date if expr.create_date is not None else expr.last_active_time,
} for expr in grammar_query
]
style_num = int(total_num * style_percentage)
grammar_num = int(total_num * grammar_percentage)
# 按权重抽样使用count作为权重
@@ -162,7 +168,7 @@ class ExpressionSelector:
selected_grammar = []
return selected_style, selected_grammar
def update_expressions_count_batch(self, expressions_to_update: List[Dict[str, str]], increment: float = 0.1):
def update_expressions_count_batch(self, expressions_to_update: List[Dict[str, Any]], increment: float = 0.1):
"""对一批表达方式更新count值按chat_id+type分组后一次性写入数据库"""
if not expressions_to_update:
return
@@ -203,7 +209,7 @@ class ExpressionSelector:
max_num: int = 10,
min_num: int = 5,
target_message: Optional[str] = None,
) -> List[Dict[str, str]]:
) -> List[Dict[str, Any]]:
# sourcery skip: inline-variable, list-comprehension
"""使用LLM选择适合的表达方式"""
@@ -273,6 +279,7 @@ class ExpressionSelector:
if not isinstance(result, dict) or "selected_situations" not in result:
logger.error("LLM返回格式错误")
logger.info(f"LLM返回结果: \n{content}")
return []
selected_indices = result["selected_situations"]

View File

@@ -12,6 +12,7 @@ from src.chat.message_receive.storage import MessageStorage
from src.chat.heart_flow.heartflow import heartflow
from src.chat.utils.utils import is_mentioned_bot_in_message
from src.chat.utils.timer_calculator import Timer
from src.chat.utils.chat_message_builder import replace_user_references_sync
from src.common.logger import get_logger
from src.person_info.relationship_manager import get_relationship_manager
from src.mood.mood_manager import mood_manager
@@ -56,16 +57,41 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
with Timer("记忆激活"):
interested_rate = await hippocampus_manager.get_activate_from_text(
message.processed_plain_text,
max_depth= 5,
fast_retrieval=False,
)
logger.debug(f"记忆激活率: {interested_rate:.2f}")
text_len = len(message.processed_plain_text)
# 根据文本长度调整兴趣度,长度越大兴趣度越高但增长率递减最低0.01最高0.05
# 采用对数函数实现递减增长
base_interest = 0.01 + (0.05 - 0.01) * (math.log10(text_len + 1) / math.log10(1000 + 1))
base_interest = min(max(base_interest, 0.01), 0.05)
# 根据文本长度分布调整兴趣度,采用分段函数实现更精确的兴趣度计算
# 基于实际分布0-5字符(26.57%), 6-10字符(27.18%), 11-20字符(22.76%), 21-30字符(10.33%), 31+字符(13.86%)
if text_len == 0:
base_interest = 0.01 # 空消息最低兴趣度
elif text_len <= 5:
# 1-5字符线性增长 0.01 -> 0.03
base_interest = 0.01 + (text_len - 1) * (0.03 - 0.01) / 4
elif text_len <= 10:
# 6-10字符线性增长 0.03 -> 0.06
base_interest = 0.03 + (text_len - 5) * (0.06 - 0.03) / 5
elif text_len <= 20:
# 11-20字符线性增长 0.06 -> 0.12
base_interest = 0.06 + (text_len - 10) * (0.12 - 0.06) / 10
elif text_len <= 30:
# 21-30字符线性增长 0.12 -> 0.18
base_interest = 0.12 + (text_len - 20) * (0.18 - 0.12) / 10
elif text_len <= 50:
# 31-50字符线性增长 0.18 -> 0.22
base_interest = 0.18 + (text_len - 30) * (0.22 - 0.18) / 20
elif text_len <= 100:
# 51-100字符线性增长 0.22 -> 0.26
base_interest = 0.22 + (text_len - 50) * (0.26 - 0.22) / 50
else:
# 100+字符:对数增长 0.26 -> 0.3,增长率递减
base_interest = 0.26 + (0.3 - 0.26) * (math.log10(text_len - 99) / math.log10(901)) # 1000-99=901
# 确保在范围内
base_interest = min(max(base_interest, 0.01), 0.3)
interested_rate += base_interest
@@ -123,8 +149,15 @@ class HeartFCMessageReceiver:
# 如果消息中包含图片标识,则将 [picid:...] 替换为 [图片]
picid_pattern = r"\[picid:([^\]]+)\]"
processed_plain_text = re.sub(picid_pattern, "[图片]", message.processed_plain_text)
# 应用用户引用格式替换,将回复<aaa:bbb>和@<aaa:bbb>格式转换为可读格式
processed_plain_text = replace_user_references_sync(
processed_plain_text,
message.message_info.platform, # type: ignore
replace_bot_name=True
)
logger.info(f"[{mes_name}]{userinfo.user_nickname}:{processed_plain_text}") # type: ignore
logger.info(f"[{mes_name}]{userinfo.user_nickname}:{processed_plain_text}[兴趣度:{interested_rate:.2f}]") # type: ignore
logger.debug(f"[{mes_name}][当前时段回复频率: {current_talk_frequency}]")

View File

@@ -224,10 +224,16 @@ class Hippocampus:
return hash((source, target))
@staticmethod
def find_topic_llm(text, topic_num):
def find_topic_llm(text: str, topic_num: int | list[int]):
# sourcery skip: inline-immediately-returned-variable
topic_num_str = ""
if isinstance(topic_num, list):
topic_num_str = f"{topic_num[0]}-{topic_num[1]}"
else:
topic_num_str = topic_num
prompt = (
f"这是一段文字:\n{text}\n\n请你从这段话中总结出最多{topic_num}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,"
f"这是一段文字:\n{text}\n\n请你从这段话中总结出最多{topic_num_str}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,"
f"将主题用逗号隔开,并加上<>,例如<主题1>,<主题2>......尽可能精简。只需要列举最多{topic_num}个话题就好,不要有序号,不要告诉我其他内容。"
f"如果确定找不出主题或者没有明显主题,返回<none>。"
)
@@ -300,6 +306,60 @@ class Hippocampus:
memories.sort(key=lambda x: x[2], reverse=True)
return memories
async def get_keywords_from_text(self, text: str) -> list:
"""从文本中提取关键词。
Args:
text (str): 输入文本
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
如果为True使用jieba分词提取关键词速度更快但可能不够准确。
如果为False使用LLM提取关键词速度较慢但更准确。
"""
if not text:
return []
# 使用LLM提取关键词 - 根据详细文本长度分布优化topic_num计算
text_length = len(text)
topic_num: int | list[int] = 0
if text_length <= 5:
words = jieba.cut(text)
keywords = [word for word in words if len(word) > 1]
keywords = list(set(keywords))[:3] # 限制最多3个关键词
if keywords:
logger.info(f"提取关键词: {keywords}")
return keywords
elif text_length <= 10:
topic_num = [1, 3] # 6-10字符: 1个关键词 (27.18%的文本)
elif text_length <= 20:
topic_num = [2, 4] # 11-20字符: 2个关键词 (22.76%的文本)
elif text_length <= 30:
topic_num = [3, 5] # 21-30字符: 3个关键词 (10.33%的文本)
elif text_length <= 50:
topic_num = [4, 5] # 31-50字符: 4个关键词 (9.79%的文本)
else:
topic_num = 5 # 51+字符: 5个关键词 (其余长文本)
topics_response, (reasoning_content, model_name) = await self.model_summary.generate_response_async(
self.find_topic_llm(text, topic_num)
)
# 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response)
if not keywords:
keywords = []
else:
keywords = [
keyword.strip()
for keyword in ",".join(keywords).replace("", ",").replace("", ",").replace(" ", ",").split(",")
if keyword.strip()
]
if keywords:
logger.info(f"提取关键词: {keywords}")
return keywords
async def get_memory_from_text(
self,
text: str,
@@ -325,39 +385,7 @@ class Hippocampus:
- memory_items: list, 该主题下的记忆项列表
- similarity: float, 与文本的相似度
"""
if not text:
return []
if fast_retrieval:
# 使用jieba分词提取关键词
words = jieba.cut(text)
# 过滤掉停用词和单字词
keywords = [word for word in words if len(word) > 1]
# 去重
keywords = list(set(keywords))
# 限制关键词数量
logger.debug(f"提取关键词: {keywords}")
else:
# 使用LLM提取关键词
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
# logger.info(f"提取关键词数量: {topic_num}")
topics_response, (reasoning_content, model_name) = await self.model_summary.generate_response_async(
self.find_topic_llm(text, topic_num)
)
# 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response)
if not keywords:
keywords = []
else:
keywords = [
keyword.strip()
for keyword in ",".join(keywords).replace("", ",").replace("", ",").replace(" ", ",").split(",")
if keyword.strip()
]
# logger.info(f"提取的关键词: {', '.join(keywords)}")
keywords = await self.get_keywords_from_text(text)
# 过滤掉不存在于记忆图中的关键词
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
@@ -679,38 +707,7 @@ class Hippocampus:
Returns:
float: 激活节点数与总节点数的比值
"""
if not text:
return 0
if fast_retrieval:
# 使用jieba分词提取关键词
words = jieba.cut(text)
# 过滤掉停用词和单字词
keywords = [word for word in words if len(word) > 1]
# 去重
keywords = list(set(keywords))
# 限制关键词数量
keywords = keywords[:5]
else:
# 使用LLM提取关键词
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
# logger.info(f"提取关键词数量: {topic_num}")
topics_response, (reasoning_content, model_name) = await self.model_summary.generate_response_async(
self.find_topic_llm(text, topic_num)
)
# 提取关键词
keywords = re.findall(r"<([^>]+)>", topics_response)
if not keywords:
keywords = []
else:
keywords = [
keyword.strip()
for keyword in ",".join(keywords).replace("", ",").replace("", ",").replace(" ", ",").split(",")
if keyword.strip()
]
# logger.info(f"提取的关键词: {', '.join(keywords)}")
keywords = await self.get_keywords_from_text(text)
# 过滤掉不存在于记忆图中的关键词
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
@@ -727,7 +724,7 @@ class Hippocampus:
for keyword in valid_keywords:
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
# 初始化激活值
activation_values = {keyword: 1.0}
activation_values = {keyword: 1.5}
# 记录已访问的节点
visited_nodes = {keyword}
# 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
@@ -1315,6 +1312,7 @@ class ParahippocampalGyrus:
return compressed_memory, similar_topics_dict
async def operation_build_memory(self):
# sourcery skip: merge-list-appends-into-extend
logger.info("------------------------------------开始构建记忆--------------------------------------")
start_time = time.time()
memory_samples = self.hippocampus.entorhinal_cortex.get_memory_sample()

View File

@@ -444,7 +444,7 @@ class MessageSending(MessageProcessBase):
is_emoji: bool = False,
thinking_start_time: float = 0,
apply_set_reply_logic: bool = False,
reply_to: str = None, # type: ignore
reply_to: Optional[str] = None,
):
# 调用父类初始化
super().__init__(

View File

@@ -3,7 +3,7 @@ from src.plugin_system.base.base_action import BaseAction
from src.chat.message_receive.chat_stream import ChatStream
from src.common.logger import get_logger
from src.plugin_system.core.component_registry import component_registry
from src.plugin_system.base.component_types import ComponentType, ActionActivationType, ChatMode, ActionInfo
from src.plugin_system.base.component_types import ComponentType, ActionInfo
logger = get_logger("action_manager")
@@ -15,11 +15,6 @@ class ActionManager:
现在统一使用新插件系统,简化了原有的新旧兼容逻辑。
"""
# 类常量
DEFAULT_RANDOM_PROBABILITY = 0.3
DEFAULT_MODE = ChatMode.ALL
DEFAULT_ACTIVATION_TYPE = ActionActivationType.ALWAYS
def __init__(self):
"""初始化动作管理器"""

View File

@@ -174,7 +174,7 @@ class ActionModifier:
continue # 总是激活,无需处理
elif activation_type == ActionActivationType.RANDOM:
probability = action_info.random_activation_probability or ActionManager.DEFAULT_RANDOM_PROBABILITY
probability = action_info.random_activation_probability
if random.random() >= probability:
reason = f"RANDOM类型未触发概率{probability}"
deactivated_actions.append((action_name, reason))

View File

@@ -33,10 +33,11 @@ def init_prompt():
{time_block}
{identity_block}
你现在需要根据聊天内容选择的合适的action来参与聊天。
{chat_context_description},以下是具体的聊天内容
{chat_context_description},以下是具体的聊天内容
{chat_content_block}
{moderation_prompt}
现在请你根据{by_what}选择合适的action和触发action的消息:
@@ -45,7 +46,7 @@ def init_prompt():
{no_action_block}
{action_options_text}
你必须从上面列出的可用action中选择一个并说明触发action的消息id原因。
你必须从上面列出的可用action中选择一个并说明触发action的消息id不是消息原文和选择该action的原因。
请根据动作示例,以严格的 JSON 格式输出,且仅包含 JSON 内容:
""",
@@ -128,20 +129,6 @@ class ActionPlanner:
else:
logger.warning(f"{self.log_prefix}使用中的动作 {action_name} 未在已注册动作中找到")
# 如果没有可用动作或只有no_reply动作直接返回no_reply
# 因为现在reply是永远激活所以不需要空跳判定
# if not current_available_actions:
# action = "no_reply" if mode == ChatMode.FOCUS else "no_action"
# reasoning = "没有可用的动作"
# logger.info(f"{self.log_prefix}{reasoning}")
# return {
# "action_result": {
# "action_type": action,
# "action_data": action_data,
# "reasoning": reasoning,
# },
# }, None
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
prompt, message_id_list = await self.build_planner_prompt(
is_group_chat=is_group_chat, # <-- Pass HFC state
@@ -224,7 +211,7 @@ class ActionPlanner:
reasoning = f"Planner 内部处理错误: {outer_e}"
is_parallel = False
if action in current_available_actions:
if mode == ChatMode.NORMAL and action in current_available_actions:
is_parallel = current_available_actions[action].parallel_action
action_result = {
@@ -268,7 +255,7 @@ class ActionPlanner:
actions_before_now = get_actions_by_timestamp_with_chat(
chat_id=self.chat_id,
timestamp_start=time.time()-3600,
timestamp_start=time.time() - 3600,
timestamp_end=time.time(),
limit=5,
)
@@ -276,7 +263,7 @@ class ActionPlanner:
actions_before_now_block = build_readable_actions(
actions=actions_before_now,
)
actions_before_now_block = f"你刚刚选择并执行过的action是\n{actions_before_now_block}"
self.last_obs_time_mark = time.time()
@@ -288,7 +275,6 @@ class ActionPlanner:
if global_config.chat.at_bot_inevitable_reply:
mentioned_bonus = "\n- 有人提到你或者at你"
by_what = "聊天内容"
target_prompt = '\n "target_message_id":"触发action的消息id"'
no_action_block = f"""重要说明:
@@ -311,7 +297,7 @@ class ActionPlanner:
by_what = "聊天内容和用户的最新消息"
target_prompt = ""
no_action_block = """重要说明:
- 'no_action' 表示只进行普通聊天回复,不执行任何额外动作
- 'reply' 表示只进行普通聊天回复,不执行任何额外动作
- 其他action表示在普通回复的基础上执行相应的额外动作"""
chat_context_description = "你现在正在一个群聊中"

View File

@@ -17,7 +17,11 @@ from src.chat.message_receive.uni_message_sender import HeartFCSender
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.utils.utils import get_chat_type_and_target_info
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.chat.utils.chat_message_builder import (
build_readable_messages,
get_raw_msg_before_timestamp_with_chat,
replace_user_references_sync,
)
from src.chat.express.expression_selector import expression_selector
from src.chat.knowledge.knowledge_lib import qa_manager
from src.chat.memory_system.memory_activator import MemoryActivator
@@ -25,42 +29,16 @@ from src.chat.memory_system.instant_memory import InstantMemory
from src.mood.mood_manager import mood_manager
from src.person_info.relationship_fetcher import relationship_fetcher_manager
from src.person_info.person_info import get_person_info_manager
from src.tools.tool_executor import ToolExecutor
from src.plugin_system.base.component_types import ActionInfo
logger = get_logger("replyer")
def init_prompt():
Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
Prompt("在群里聊天", "chat_target_group2")
Prompt("{sender_name}聊天", "chat_target_private2")
Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
Prompt(
"""
{expression_habits_block}
{tool_info_block}
{knowledge_prompt}
{memory_block}
{relation_info_block}
{extra_info_block}
{chat_target}
{time_block}
{chat_info}
{reply_target_block}
{identity}
{action_descriptions}
你正在{chat_target_2},你现在的心情是:{mood_state}
现在请你读读之前的聊天记录,并给出回复
{config_expression_style}。注意不要复读你说过的话
{keywords_reaction_prompt}
{moderation_prompt}
不要浮夸,不要夸张修辞,不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容""",
"default_generator_prompt",
)
Prompt(
"""
@@ -109,7 +87,8 @@ def init_prompt():
{core_dialogue_prompt}
{reply_target_block}
对方最新发送的内容:{message_txt}
你现在的心情是:{mood_state}
{config_expression_style}
注意不要复读你说过的话
@@ -159,6 +138,8 @@ class DefaultReplyer:
self.heart_fc_sender = HeartFCSender()
self.memory_activator = MemoryActivator()
self.instant_memory = InstantMemory(chat_id=self.chat_stream.stream_id)
from src.plugin_system.core.tool_use import ToolExecutor # 延迟导入ToolExecutor不然会循环依赖
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3)
def _select_weighted_model_config(self) -> Dict[str, Any]:
@@ -171,67 +152,49 @@ class DefaultReplyer:
async def generate_reply_with_context(
self,
reply_data: Optional[Dict[str, Any]] = None,
reply_to: str = "",
extra_info: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
enable_tool: bool = True,
enable_timeout: bool = False,
) -> Tuple[bool, Optional[str], Optional[str]]:
"""
回复器 (Replier): 核心逻辑,负责生成回复文本。
(已整合原 HeartFCGenerator 的功能)
回复器 (Replier): 负责生成回复文本的核心逻辑
Args:
reply_to: 回复对象,格式为 "发送者:消息内容"
extra_info: 额外信息,用于补充上下文
available_actions: 可用的动作信息字典
enable_tool: 是否启用工具调用
Returns:
Tuple[bool, Optional[str], Optional[str]]: (是否成功, 生成的回复内容, 使用的prompt)
"""
prompt = None
if available_actions is None:
available_actions = {}
try:
if not reply_data:
reply_data = {
"reply_to": reply_to,
"extra_info": extra_info,
}
for key, value in reply_data.items():
if not value:
logger.debug(f"回复数据跳过{key},生成回复时将忽略。")
# 3. 构建 Prompt
with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await self.build_prompt_reply_context(
reply_data=reply_data, # 传递action_data
reply_to=reply_to,
extra_info=extra_info,
available_actions=available_actions,
enable_timeout=enable_timeout,
enable_tool=enable_tool,
)
if not prompt:
logger.warning("构建prompt失败跳过回复生成")
return False, None, None
# 4. 调用 LLM 生成回复
content = None
reasoning_content = None
model_name = "unknown_model"
# TODO: 复活这里
# reasoning_content = None
# model_name = "unknown_model"
try:
with Timer("LLM生成", {}): # 内部计时器,可选保留
# 加权随机选择一个模型配置
selected_model_config = self._select_weighted_model_config()
# 兼容新旧格式的模型名称获取
model_display_name = selected_model_config.get('model_name', selected_model_config.get('name', 'N/A'))
logger.info(
f"使用模型生成回复: {model_display_name} (选中概率: {selected_model_config.get('weight', 1.0)})"
)
express_model = LLMRequest(
model=selected_model_config,
request_type=self.request_type,
)
if global_config.debug.show_prompt:
logger.info(f"\n{prompt}\n")
else:
logger.debug(f"\n{prompt}\n")
content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt)
logger.debug(f"replyer生成内容: {content}")
content = await self.llm_generate_content(prompt)
logger.debug(f"replyer生成内容: {content}")
except Exception as llm_e:
# 精简报错信息
@@ -247,73 +210,62 @@ class DefaultReplyer:
async def rewrite_reply_with_context(
self,
reply_data: Dict[str, Any],
raw_reply: str = "",
reason: str = "",
reply_to: str = "",
relation_info: str = "",
) -> Tuple[bool, Optional[str]]:
return_prompt: bool = False,
) -> Tuple[bool, Optional[str], Optional[str]]:
"""
表达器 (Expressor): 核心逻辑,负责生成回复文本。
表达器 (Expressor): 负责重写和优化回复文本。
Args:
raw_reply: 原始回复内容
reason: 回复原因
reply_to: 回复对象,格式为 "发送者:消息内容"
relation_info: 关系信息
Returns:
Tuple[bool, Optional[str]]: (是否成功, 重写后的回复内容)
"""
try:
if not reply_data:
reply_data = {
"reply_to": reply_to,
"relation_info": relation_info,
}
with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await self.build_prompt_rewrite_context(
reply_data=reply_data,
raw_reply=raw_reply,
reason=reason,
reply_to=reply_to,
)
content = None
reasoning_content = None
model_name = "unknown_model"
# TODO: 复活这里
# reasoning_content = None
# model_name = "unknown_model"
if not prompt:
logger.error("Prompt 构建失败,无法生成回复。")
return False, None
return False, None, None
try:
with Timer("LLM生成", {}): # 内部计时器,可选保留
# 加权随机选择一个模型配置
selected_model_config = self._select_weighted_model_config()
# 兼容新旧格式的模型名称获取
model_display_name = selected_model_config.get('model_name', selected_model_config.get('name', 'N/A'))
logger.info(
f"使用模型重写回复: {model_display_name} (选中概率: {selected_model_config.get('weight', 1.0)})"
)
express_model = LLMRequest(
model=selected_model_config,
request_type=self.request_type,
)
content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt)
logger.info(f"想要表达:{raw_reply}||理由:{reason}||生成回复: {content}\n")
content = await self.llm_generate_content(prompt)
logger.info(f"想要表达:{raw_reply}||理由:{reason}||生成回复: {content}\n")
except Exception as llm_e:
# 精简报错信息
logger.error(f"LLM 生成失败: {llm_e}")
return False, None # LLM 调用失败则无法生成回复
return False, None, prompt if return_prompt else None # LLM 调用失败则无法生成回复
return True, content
return True, content, prompt if return_prompt else None
except Exception as e:
logger.error(f"回复生成意外失败: {e}")
traceback.print_exc()
return False, None
return False, None, prompt if return_prompt else None
async def build_relation_info(self, reply_data=None):
async def build_relation_info(self, reply_to: str = ""):
if not global_config.relationship.enable_relationship:
return ""
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
if not reply_data:
if not reply_to:
return ""
reply_to = reply_data.get("reply_to", "")
sender, text = self._parse_reply_target(reply_to)
if not sender or not text:
return ""
@@ -327,7 +279,16 @@ class DefaultReplyer:
return await relationship_fetcher.build_relation_info(person_id, points_num=5)
async def build_expression_habits(self, chat_history, target):
async def build_expression_habits(self, chat_history: str, target: str) -> str:
"""构建表达习惯块
Args:
chat_history: 聊天历史记录
target: 目标消息内容
Returns:
str: 表达习惯信息字符串
"""
if not global_config.expression.enable_expression:
return ""
@@ -360,54 +321,65 @@ class DefaultReplyer:
expression_habits_block = ""
expression_habits_title = ""
if style_habits_str.strip():
expression_habits_title = "你可以参考以下的语言习惯,当情景合适就使用,但不要生硬使用,以合理的方式结合到你的回复中:"
expression_habits_title = (
"你可以参考以下的语言习惯,当情景合适就使用,但不要生硬使用,以合理的方式结合到你的回复中:"
)
expression_habits_block += f"{style_habits_str}\n"
if grammar_habits_str.strip():
expression_habits_title = "你可以选择下面的句法进行回复,如果情景合适就使用,不要盲目使用,不要生硬使用,以合理的方式使用:"
expression_habits_title = (
"你可以选择下面的句法进行回复,如果情景合适就使用,不要盲目使用,不要生硬使用,以合理的方式使用:"
)
expression_habits_block += f"{grammar_habits_str}\n"
if style_habits_str.strip() and grammar_habits_str.strip():
expression_habits_title = "你可以参考以下的语言习惯和句法,如果情景合适就使用,不要盲目使用,不要生硬使用,以合理的方式结合到你的回复中:"
expression_habits_block = f"{expression_habits_title}\n{expression_habits_block}"
return expression_habits_block
return f"{expression_habits_title}\n{expression_habits_block}"
async def build_memory_block(self, chat_history, target):
async def build_memory_block(self, chat_history: str, target: str) -> str:
"""构建记忆块
Args:
chat_history: 聊天历史记录
target: 目标消息内容
Returns:
str: 记忆信息字符串
"""
if not global_config.memory.enable_memory:
return ""
instant_memory = None
running_memories = await self.memory_activator.activate_memory_with_chat_history(
target_message=target, chat_history_prompt=chat_history
)
if global_config.memory.enable_instant_memory:
asyncio.create_task(self.instant_memory.create_and_store_memory(chat_history))
instant_memory = await self.instant_memory.get_memory(target)
logger.info(f"即时记忆:{instant_memory}")
if not running_memories:
return ""
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memories:
memory_str += f"- {running_memory['content']}\n"
if instant_memory:
memory_str += f"- {instant_memory}\n"
return memory_str
async def build_tool_info(self, chat_history, reply_data: Optional[Dict], enable_tool: bool = True):
async def build_tool_info(self, chat_history: str, reply_to: str = "", enable_tool: bool = True) -> str:
"""构建工具信息块
Args:
reply_data: 回复数据,包含要回复的消息内容
chat_history: 聊天历史
chat_history: 聊天历史记录
reply_to: 回复对象,格式为 "发送者:消息内容"
enable_tool: 是否启用工具调用
Returns:
str: 工具信息字符串
@@ -416,10 +388,9 @@ class DefaultReplyer:
if not enable_tool:
return ""
if not reply_data:
if not reply_to:
return ""
reply_to = reply_data.get("reply_to", "")
sender, text = self._parse_reply_target(reply_to)
if not text:
@@ -442,7 +413,7 @@ class DefaultReplyer:
tool_info_str += "以上是你获取到的实时信息,请在回复时参考这些信息。"
logger.info(f"获取到 {len(tool_results)} 个工具结果")
return tool_info_str
else:
logger.debug("未获取到任何工具结果")
@@ -452,7 +423,15 @@ class DefaultReplyer:
logger.error(f"工具信息获取失败: {e}")
return ""
def _parse_reply_target(self, target_message: str) -> tuple:
def _parse_reply_target(self, target_message: str) -> Tuple[str, str]:
"""解析回复目标消息
Args:
target_message: 目标消息,格式为 "发送者:消息内容""发送者:消息内容"
Returns:
Tuple[str, str]: (发送者名称, 消息内容)
"""
sender = ""
target = ""
# 添加None检查防止NoneType错误
@@ -466,14 +445,22 @@ class DefaultReplyer:
target = parts[1].strip()
return sender, target
async def build_keywords_reaction_prompt(self, target):
async def build_keywords_reaction_prompt(self, target: Optional[str]) -> str:
"""构建关键词反应提示
Args:
target: 目标消息内容
Returns:
str: 关键词反应提示字符串
"""
# 关键词检测与反应
keywords_reaction_prompt = ""
try:
# 添加None检查防止NoneType错误
if target is None:
return keywords_reaction_prompt
# 处理关键词规则
for rule in global_config.keyword_reaction.keyword_rules:
if any(keyword in target for keyword in rule.keywords):
@@ -500,15 +487,25 @@ class DefaultReplyer:
return keywords_reaction_prompt
async def _time_and_run_task(self, coroutine, name: str):
"""一个简单的帮助函数,用于计时运行异步任务,返回任务名、结果和耗时"""
async def _time_and_run_task(self, coroutine, name: str) -> Tuple[str, Any, float]:
"""计时运行异步任务的辅助函数
Args:
coroutine: 要执行的协程
name: 任务名称
Returns:
Tuple[str, Any, float]: (任务名称, 任务结果, 执行耗时)
"""
start_time = time.time()
result = await coroutine
end_time = time.time()
duration = end_time - start_time
return name, result, duration
def build_s4u_chat_history_prompts(self, message_list_before_now: list, target_user_id: str) -> tuple[str, str]:
def build_s4u_chat_history_prompts(
self, message_list_before_now: List[Dict[str, Any]], target_user_id: str
) -> Tuple[str, str]:
"""
构建 s4u 风格的分离对话 prompt
@@ -517,7 +514,7 @@ class DefaultReplyer:
target_user_id: 目标用户ID当前对话对象
Returns:
tuple: (核心对话prompt, 背景对话prompt)
Tuple[str, str]: (核心对话prompt, 背景对话prompt)
"""
core_dialogue_list = []
background_dialogue_list = []
@@ -536,7 +533,7 @@ class DefaultReplyer:
# 其他用户的对话
background_dialogue_list.append(msg_dict)
except Exception as e:
logger.error(f"![1753364551656](image/default_generator/1753364551656.png)记录: {msg_dict}, 错误: {e}")
logger.error(f"处理消息记录时出错: {msg_dict}, 错误: {e}")
# 构建背景对话 prompt
background_dialogue_prompt = ""
@@ -581,8 +578,25 @@ class DefaultReplyer:
sender: str,
target: str,
chat_info: str,
):
"""构建 mai_think 上下文信息"""
) -> Any:
"""构建 mai_think 上下文信息
Args:
chat_id: 聊天ID
memory_block: 记忆块内容
relation_info: 关系信息
time_block: 时间块内容
chat_target_1: 聊天目标1
chat_target_2: 聊天目标2
mood_prompt: 情绪提示
identity_block: 身份块内容
sender: 发送者名称
target: 目标消息内容
chat_info: 聊天信息
Returns:
Any: mai_think 实例
"""
mai_think = mai_thinking_manager.get_mai_think(chat_id)
mai_think.memory_block = memory_block
mai_think.relation_info_block = relation_info
@@ -598,21 +612,20 @@ class DefaultReplyer:
async def build_prompt_reply_context(
self,
reply_data: Dict[str, Any],
reply_to: str,
extra_info: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None,
enable_timeout: bool = False,
enable_tool: bool = True,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
"""
构建回复器上下文
Args:
reply_data: 回复数据
replay_data 包含以下字段:
structured_info: 结构化信息,一般是工具调用获得的信息
reply_to: 回复对象
extra_info/extra_info_block: 额外信息
reply_to: 回复对象,格式为 "发送者:消息内容"
extra_info: 额外信息,用于补充上下文
available_actions: 可用动作
enable_timeout: 是否启用超时处理
enable_tool: 是否启用工具调用
Returns:
str: 构建好的上下文
@@ -623,9 +636,7 @@ class DefaultReplyer:
chat_id = chat_stream.stream_id
person_info_manager = get_person_info_manager()
is_group_chat = bool(chat_stream.group_info)
reply_to = reply_data.get("reply_to", "none")
extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "")
if global_config.mood.enable_mood:
chat_mood = mood_manager.get_mood_by_chat_id(chat_id)
mood_prompt = chat_mood.mood_state
@@ -633,6 +644,15 @@ class DefaultReplyer:
mood_prompt = ""
sender, target = self._parse_reply_target(reply_to)
person_info_manager = get_person_info_manager()
person_id = person_info_manager.get_person_id_by_person_name(sender)
user_id = person_info_manager.get_value_sync(person_id, "user_id")
platform = chat_stream.platform
if user_id == global_config.bot.qq_account and platform == global_config.bot.platform:
logger.warning("选取了自身作为回复对象跳过构建prompt")
return ""
target = replace_user_references_sync(target, chat_stream.platform, replace_bot_name=True)
# 构建action描述 (如果启用planner)
action_descriptions = ""
@@ -649,21 +669,6 @@ class DefaultReplyer:
limit=global_config.chat.max_context_size * 2,
)
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(),
limit=global_config.chat.max_context_size,
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
message_list_before_short = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(),
@@ -683,25 +688,21 @@ class DefaultReplyer:
self._time_and_run_task(
self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits"
),
self._time_and_run_task(
self.build_relation_info(reply_data), "relation_info"
),
self._time_and_run_task(self.build_relation_info(reply_to), "relation_info"),
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"),
self._time_and_run_task(
self.build_tool_info(chat_talking_prompt_short, reply_data, enable_tool=enable_tool), "tool_info"
),
self._time_and_run_task(
get_prompt_info(target, threshold=0.38), "prompt_info"
self.build_tool_info(chat_talking_prompt_short, reply_to, enable_tool=enable_tool), "tool_info"
),
self._time_and_run_task(get_prompt_info(target, threshold=0.38), "prompt_info"),
)
# 任务名称中英文映射
task_name_mapping = {
"expression_habits": "选取表达方式",
"relation_info": "感受关系",
"relation_info": "感受关系",
"memory_block": "回忆",
"tool_info": "使用工具",
"prompt_info": "获取知识"
"prompt_info": "获取知识",
}
# 处理结果
@@ -723,8 +724,8 @@ class DefaultReplyer:
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
if extra_info_block:
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
if extra_info:
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
else:
extra_info_block = ""
@@ -779,116 +780,74 @@ class DefaultReplyer:
# 根据sender通过person_info_manager反向查找person_id再获取user_id
person_id = person_info_manager.get_person_id_by_person_name(sender)
# 根据配置选择使用哪种 prompt 构建模式
if global_config.chat.use_s4u_prompt_mode and person_id:
# 使用 s4u 对话构建模式:分离当前对话对象和其他对话
try:
user_id_value = await person_info_manager.get_value(person_id, "user_id")
if user_id_value:
target_user_id = str(user_id_value)
except Exception as e:
logger.warning(f"无法从person_id {person_id} 获取user_id: {e}")
target_user_id = ""
# 使用 s4u 对话构建模式:分离当前对话对象和其他对话
try:
user_id_value = await person_info_manager.get_value(person_id, "user_id")
if user_id_value:
target_user_id = str(user_id_value)
except Exception as e:
logger.warning(f"无法从person_id {person_id} 获取user_id: {e}")
target_user_id = ""
# 构建分离的对话 prompt
core_dialogue_prompt, background_dialogue_prompt = self.build_s4u_chat_history_prompts(
message_list_before_now_long, target_user_id
)
self.build_mai_think_context(
chat_id=chat_id,
memory_block=memory_block,
relation_info=relation_info,
time_block=time_block,
chat_target_1=chat_target_1,
chat_target_2=chat_target_2,
mood_prompt=mood_prompt,
identity_block=identity_block,
sender=sender,
target=target,
chat_info=f"""
# 构建分离的对话 prompt
core_dialogue_prompt, background_dialogue_prompt = self.build_s4u_chat_history_prompts(
message_list_before_now_long, target_user_id
)
self.build_mai_think_context(
chat_id=chat_id,
memory_block=memory_block,
relation_info=relation_info,
time_block=time_block,
chat_target_1=chat_target_1,
chat_target_2=chat_target_2,
mood_prompt=mood_prompt,
identity_block=identity_block,
sender=sender,
target=target,
chat_info=f"""
{background_dialogue_prompt}
--------------------------------
{time_block}
这是你和{sender}的对话,你们正在交流中:
{core_dialogue_prompt}"""
)
{core_dialogue_prompt}""",
)
# 使用 s4u 风格的模板
template_name = "s4u_style_prompt"
# 使用 s4u 风格的模板
template_name = "s4u_style_prompt"
return await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
memory_block=memory_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=identity_block,
action_descriptions=action_descriptions,
sender_name=sender,
mood_state=mood_prompt,
background_dialogue_prompt=background_dialogue_prompt,
time_block=time_block,
core_dialogue_prompt=core_dialogue_prompt,
reply_target_block=reply_target_block,
message_txt=target,
config_expression_style=global_config.expression.expression_style,
keywords_reaction_prompt=keywords_reaction_prompt,
moderation_prompt=moderation_prompt_block,
)
else:
self.build_mai_think_context(
chat_id=chat_id,
memory_block=memory_block,
relation_info=relation_info,
time_block=time_block,
chat_target_1=chat_target_1,
chat_target_2=chat_target_2,
mood_prompt=mood_prompt,
identity_block=identity_block,
sender=sender,
target=target,
chat_info=chat_talking_prompt
)
# 使用原有的模式
return await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
memory_block=memory_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
extra_info_block=extra_info_block,
relation_info_block=relation_info,
time_block=time_block,
reply_target_block=reply_target_block,
moderation_prompt=moderation_prompt_block,
keywords_reaction_prompt=keywords_reaction_prompt,
identity=identity_block,
target_message=target,
sender_name=sender,
config_expression_style=global_config.expression.expression_style,
action_descriptions=action_descriptions,
chat_target_2=chat_target_2,
mood_state=mood_prompt,
)
return await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
memory_block=memory_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
identity=identity_block,
action_descriptions=action_descriptions,
sender_name=sender,
mood_state=mood_prompt,
background_dialogue_prompt=background_dialogue_prompt,
time_block=time_block,
core_dialogue_prompt=core_dialogue_prompt,
reply_target_block=reply_target_block,
message_txt=target,
config_expression_style=global_config.expression.expression_style,
keywords_reaction_prompt=keywords_reaction_prompt,
moderation_prompt=moderation_prompt_block,
)
async def build_prompt_rewrite_context(
self,
reply_data: Dict[str, Any],
raw_reply: str,
reason: str,
reply_to: str,
) -> str:
chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
is_group_chat = bool(chat_stream.group_info)
reply_to = reply_data.get("reply_to", "none")
raw_reply = reply_data.get("raw_reply", "")
reason = reply_data.get("reason", "")
sender, target = self._parse_reply_target(reply_to)
# 添加情绪状态获取
@@ -915,7 +874,7 @@ class DefaultReplyer:
# 并行执行2个构建任务
expression_habits_block, relation_info = await asyncio.gather(
self.build_expression_habits(chat_talking_prompt_half, target),
self.build_relation_info(reply_data),
self.build_relation_info(reply_to),
)
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
@@ -1018,6 +977,31 @@ class DefaultReplyer:
display_message=display_message,
)
async def llm_generate_content(self, prompt: str) -> str:
with Timer("LLM生成", {}): # 内部计时器,可选保留
# 加权随机选择一个模型配置
selected_model_config = self._select_weighted_model_config()
model_display_name = selected_model_config.get('model_name') or selected_model_config.get('name', 'N/A')
logger.info(
f"使用模型生成回复: {model_display_name} (选中概率: {selected_model_config.get('weight', 1.0)})"
)
express_model = LLMRequest(
model=selected_model_config,
request_type=self.request_type,
)
if global_config.debug.show_prompt:
logger.info(f"\n{prompt}\n")
else:
logger.debug(f"\n{prompt}\n")
# TODO: 这里的_应该做出替换
content, _ = await express_model.generate_response_async(prompt)
logger.debug(f"replyer生成内容: {content}")
return content
def weighted_sample_no_replacement(items, weights, k) -> list:
"""
@@ -1075,10 +1059,8 @@ async def get_prompt_info(message: str, threshold: float):
related_info += found_knowledge_from_lpmm
logger.debug(f"获取知识库内容耗时: {(end_time - start_time):.3f}")
logger.debug(f"获取知识库内容,相关信息:{related_info[:100]}...,信息长度: {len(related_info)}")
# 格式化知识信息
formatted_prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=related_info)
return formatted_prompt_info
return f"你有以下这些**知识**\n{related_info}\n请你**记住上面的知识**,之后可能会用到。\n"
else:
logger.debug("从LPMM知识库获取知识失败可能是从未导入过知识返回空知识...")
return ""

View File

@@ -2,7 +2,7 @@ import time # 导入 time 模块以获取当前时间
import random
import re
from typing import List, Dict, Any, Tuple, Optional
from typing import List, Dict, Any, Tuple, Optional, Callable
from rich.traceback import install
from src.config.config import global_config
@@ -10,11 +10,161 @@ from src.common.message_repository import find_messages, count_messages
from src.common.database.database_model import ActionRecords
from src.common.database.database_model import Images
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
from src.chat.utils.utils import translate_timestamp_to_human_readable,assign_message_ids
from src.chat.utils.utils import translate_timestamp_to_human_readable, assign_message_ids
install(extra_lines=3)
def replace_user_references_sync(
content: str,
platform: str,
name_resolver: Optional[Callable[[str, str], str]] = None,
replace_bot_name: bool = True,
) -> str:
"""
替换内容中的用户引用格式,包括回复<aaa:bbb>和@<aaa:bbb>格式
Args:
content: 要处理的内容字符串
platform: 平台标识
name_resolver: 名称解析函数,接收(platform, user_id)参数,返回用户名称
如果为None则使用默认的person_info_manager
replace_bot_name: 是否将机器人的user_id替换为"机器人昵称(你)"
Returns:
str: 处理后的内容字符串
"""
if name_resolver is None:
person_info_manager = get_person_info_manager()
def default_resolver(platform: str, user_id: str) -> str:
# 检查是否是机器人自己
if replace_bot_name and user_id == global_config.bot.qq_account:
return f"{global_config.bot.nickname}(你)"
person_id = PersonInfoManager.get_person_id(platform, user_id)
return person_info_manager.get_value_sync(person_id, "person_name") or user_id # type: ignore
name_resolver = default_resolver
# 处理回复<aaa:bbb>格式
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
match = re.search(reply_pattern, content)
if match:
aaa = match[1]
bbb = match[2]
try:
# 检查是否是机器人自己
if replace_bot_name and bbb == global_config.bot.qq_account:
reply_person_name = f"{global_config.bot.nickname}(你)"
else:
reply_person_name = name_resolver(platform, bbb) or aaa
content = re.sub(reply_pattern, f"回复 {reply_person_name}", content, count=1)
except Exception:
# 如果解析失败,使用原始昵称
content = re.sub(reply_pattern, f"回复 {aaa}", content, count=1)
# 处理@<aaa:bbb>格式
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
at_matches = list(re.finditer(at_pattern, content))
if at_matches:
new_content = ""
last_end = 0
for m in at_matches:
new_content += content[last_end : m.start()]
aaa = m.group(1)
bbb = m.group(2)
try:
# 检查是否是机器人自己
if replace_bot_name and bbb == global_config.bot.qq_account:
at_person_name = f"{global_config.bot.nickname}(你)"
else:
at_person_name = name_resolver(platform, bbb) or aaa
new_content += f"@{at_person_name}"
except Exception:
# 如果解析失败,使用原始昵称
new_content += f"@{aaa}"
last_end = m.end()
new_content += content[last_end:]
content = new_content
return content
async def replace_user_references_async(
content: str,
platform: str,
name_resolver: Optional[Callable[[str, str], Any]] = None,
replace_bot_name: bool = True,
) -> str:
"""
替换内容中的用户引用格式,包括回复<aaa:bbb>和@<aaa:bbb>格式
Args:
content: 要处理的内容字符串
platform: 平台标识
name_resolver: 名称解析函数,接收(platform, user_id)参数,返回用户名称
如果为None则使用默认的person_info_manager
replace_bot_name: 是否将机器人的user_id替换为"机器人昵称(你)"
Returns:
str: 处理后的内容字符串
"""
if name_resolver is None:
person_info_manager = get_person_info_manager()
async def default_resolver(platform: str, user_id: str) -> str:
# 检查是否是机器人自己
if replace_bot_name and user_id == global_config.bot.qq_account:
return f"{global_config.bot.nickname}(你)"
person_id = PersonInfoManager.get_person_id(platform, user_id)
return await person_info_manager.get_value(person_id, "person_name") or user_id # type: ignore
name_resolver = default_resolver
# 处理回复<aaa:bbb>格式
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
match = re.search(reply_pattern, content)
if match:
aaa = match.group(1)
bbb = match.group(2)
try:
# 检查是否是机器人自己
if replace_bot_name and bbb == global_config.bot.qq_account:
reply_person_name = f"{global_config.bot.nickname}(你)"
else:
reply_person_name = await name_resolver(platform, bbb) or aaa
content = re.sub(reply_pattern, f"回复 {reply_person_name}", content, count=1)
except Exception:
# 如果解析失败,使用原始昵称
content = re.sub(reply_pattern, f"回复 {aaa}", content, count=1)
# 处理@<aaa:bbb>格式
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
at_matches = list(re.finditer(at_pattern, content))
if at_matches:
new_content = ""
last_end = 0
for m in at_matches:
new_content += content[last_end : m.start()]
aaa = m.group(1)
bbb = m.group(2)
try:
# 检查是否是机器人自己
if replace_bot_name and bbb == global_config.bot.qq_account:
at_person_name = f"{global_config.bot.nickname}(你)"
else:
at_person_name = await name_resolver(platform, bbb) or aaa
new_content += f"@{at_person_name}"
except Exception:
# 如果解析失败,使用原始昵称
new_content += f"@{aaa}"
last_end = m.end()
new_content += content[last_end:]
content = new_content
return content
def get_raw_msg_by_timestamp(
timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest"
) -> List[Dict[str, Any]]:
@@ -374,33 +524,8 @@ def _build_readable_messages_internal(
else:
person_name = "某人"
# 检查是否有 回复<aaa:bbb> 字段
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
match = re.search(reply_pattern, content)
if match:
aaa: str = match[1]
bbb: str = match[2]
reply_person_id = PersonInfoManager.get_person_id(platform, bbb)
reply_person_name = person_info_manager.get_value_sync(reply_person_id, "person_name") or aaa
# 在内容前加上回复信息
content = re.sub(reply_pattern, lambda m, name=reply_person_name: f"回复 {name}", content, count=1)
# 检查是否有 @<aaa:bbb> 字段 @<{member_info.get('nickname')}:{member_info.get('user_id')}>
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
at_matches = list(re.finditer(at_pattern, content))
if at_matches:
new_content = ""
last_end = 0
for m in at_matches:
new_content += content[last_end : m.start()]
aaa = m.group(1)
bbb = m.group(2)
at_person_id = PersonInfoManager.get_person_id(platform, bbb)
at_person_name = person_info_manager.get_value_sync(at_person_id, "person_name") or aaa
new_content += f"@{at_person_name}"
last_end = m.end()
new_content += content[last_end:]
content = new_content
# 使用独立函数处理用户引用格式
content = replace_user_references_sync(content, platform, replace_bot_name=replace_bot_name)
target_str = "这是QQ的一个功能用于提及某人但没那么明显"
if target_str in content and random.random() < 0.6:
@@ -654,6 +779,7 @@ async def build_readable_messages_with_list(
return formatted_string, details_list
def build_readable_messages_with_id(
messages: List[Dict[str, Any]],
replace_bot_name: bool = True,
@@ -669,9 +795,9 @@ def build_readable_messages_with_id(
允许通过参数控制格式化行为。
"""
message_id_list = assign_message_ids(messages)
formatted_string = build_readable_messages(
messages = messages,
messages=messages,
replace_bot_name=replace_bot_name,
merge_messages=merge_messages,
timestamp_mode=timestamp_mode,
@@ -682,10 +808,7 @@ def build_readable_messages_with_id(
message_id_list=message_id_list,
)
return formatted_string , message_id_list
return formatted_string, message_id_list
def build_readable_messages(
@@ -770,7 +893,13 @@ def build_readable_messages(
if read_mark <= 0:
# 没有有效的 read_mark直接格式化所有消息
formatted_string, _, pic_id_mapping, _ = _build_readable_messages_internal(
copy_messages, replace_bot_name, merge_messages, timestamp_mode, truncate, show_pic=show_pic, message_id_list=message_id_list
copy_messages,
replace_bot_name,
merge_messages,
timestamp_mode,
truncate,
show_pic=show_pic,
message_id_list=message_id_list,
)
# 生成图片映射信息并添加到最前面
@@ -893,7 +1022,7 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
for msg in messages:
try:
platform = msg.get("chat_info_platform")
platform: str = msg.get("chat_info_platform") # type: ignore
user_id = msg.get("user_id")
_timestamp = msg.get("time")
content: str = ""
@@ -916,38 +1045,14 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
anon_name = get_anon_name(platform, user_id)
# print(f"anon_name:{anon_name}")
# 处理 回复<aaa:bbb>
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
match = re.search(reply_pattern, content)
if match:
# print(f"发现回复match:{match}")
bbb = match.group(2)
# 使用独立函数处理用户引用格式,传入自定义的匿名名称解析器
def anon_name_resolver(platform: str, user_id: str) -> str:
try:
anon_reply = get_anon_name(platform, bbb)
# print(f"anon_reply:{anon_reply}")
return get_anon_name(platform, user_id)
except Exception:
anon_reply = "?"
content = re.sub(reply_pattern, f"回复 {anon_reply}", content, count=1)
return "?"
# 处理 @<aaa:bbb>无嵌套def
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
at_matches = list(re.finditer(at_pattern, content))
if at_matches:
# print(f"发现@match:{at_matches}")
new_content = ""
last_end = 0
for m in at_matches:
new_content += content[last_end : m.start()]
bbb = m.group(2)
try:
anon_at = get_anon_name(platform, bbb)
# print(f"anon_at:{anon_at}")
except Exception:
anon_at = "?"
new_content += f"@{anon_at}"
last_end = m.end()
new_content += content[last_end:]
content = new_content
content = replace_user_references_sync(content, platform, anon_name_resolver, replace_bot_name=False)
header = f"{anon_name}"
output_lines.append(header)

View File

@@ -37,7 +37,7 @@ class ImageManager:
self._ensure_image_dir()
self._initialized = True
self._llm = LLMRequest(model=global_config.model.vlm, temperature=0.4, max_tokens=300, request_type="image")
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.4, max_tokens=300, request_type="image")
try:
db.connect(reuse_if_open=True)
@@ -94,7 +94,7 @@ class ImageManager:
logger.error(f"保存描述到数据库失败 (Peewee): {str(e)}")
async def get_emoji_description(self, image_base64: str) -> str:
"""获取表情包描述,使用二步走识别并带缓存优化"""
"""获取表情包描述,优先使用Emoji表中的缓存数据"""
try:
# 计算图片哈希
# 确保base64字符串只包含ASCII字符
@@ -104,9 +104,21 @@ class ImageManager:
image_hash = hashlib.md5(image_bytes).hexdigest()
image_format = Image.open(io.BytesIO(image_bytes)).format.lower() # type: ignore
# 查询缓存的描述
# 优先使用EmojiManager查询已注册表情包的描述
try:
from src.chat.emoji_system.emoji_manager import get_emoji_manager
emoji_manager = get_emoji_manager()
cached_emoji_description = await emoji_manager.get_emoji_description_by_hash(image_hash)
if cached_emoji_description:
logger.info(f"[缓存命中] 使用已注册表情包描述: {cached_emoji_description[:50]}...")
return cached_emoji_description
except Exception as e:
logger.debug(f"查询EmojiManager时出错: {e}")
# 查询ImageDescriptions表的缓存描述
cached_description = self._get_description_from_db(image_hash, "emoji")
if cached_description:
logger.info(f"[缓存命中] 使用ImageDescriptions表中的描述: {cached_description[:50]}...")
return f"[表情包:{cached_description}]"
# === 二步走识别流程 ===
@@ -118,10 +130,10 @@ class ImageManager:
logger.warning("GIF转换失败无法获取描述")
return "[表情包(GIF处理失败)]"
vlm_prompt = "这是一个动态图表情包,每一张图代表了动态图的某一帧,黑色背景代表透明,描述一下表情包表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
detailed_description, _ = await self._llm.generate_response_for_image(vlm_prompt, image_base64_processed, "jpg")
detailed_description, _ = await self.vlm.generate_response_for_image(vlm_prompt, image_base64_processed, "jpg")
else:
vlm_prompt = "这是一个表情包,请详细描述一下表情包所表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
detailed_description, _ = await self._llm.generate_response_for_image(vlm_prompt, image_base64, image_format)
detailed_description, _ = await self.vlm.generate_response_for_image(vlm_prompt, image_base64, image_format)
if detailed_description is None:
logger.warning("VLM未能生成表情包详细描述")
@@ -158,7 +170,7 @@ class ImageManager:
if len(emotions) > 1 and emotions[1] != emotions[0]:
final_emotion = f"{emotions[0]}{emotions[1]}"
logger.info(f"[二步走识别] 详细描述: {detailed_description[:50]}... -> 情感标签: {final_emotion}")
logger.info(f"[emoji识别] 详细描述: {detailed_description[:50]}... -> 情感标签: {final_emotion}")
# 再次检查缓存,防止并发写入时重复生成
cached_description = self._get_description_from_db(image_hash, "emoji")
@@ -201,13 +213,13 @@ class ImageManager:
self._save_description_to_db(image_hash, final_emotion, "emoji")
return f"[表情包:{final_emotion}]"
except Exception as e:
logger.error(f"获取表情包描述失败: {str(e)}")
return "[表情包]"
return "[表情包(处理失败)]"
async def get_image_description(self, image_base64: str) -> str:
"""获取普通图片描述,带查重和保存功能"""
"""获取普通图片描述,优先使用Images表中的缓存数据"""
try:
# 计算图片哈希
if isinstance(image_base64, str):
@@ -215,7 +227,7 @@ class ImageManager:
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
# 检查图片是否已存在
# 优先检查Images表中是否已有完整的描述
existing_image = Images.get_or_none(Images.emoji_hash == image_hash)
if existing_image:
# 更新计数
@@ -227,18 +239,20 @@ class ImageManager:
# 如果已有描述,直接返回
if existing_image.description:
logger.debug(f"[缓存命中] 使用Images表中的图片描述: {existing_image.description[:50]}...")
return f"[图片:{existing_image.description}]"
# 查询缓存描述
# 查询ImageDescriptions表的缓存描述
cached_description = self._get_description_from_db(image_hash, "image")
if cached_description:
logger.debug(f"图片描述缓存中 {cached_description}")
logger.debug(f"[缓存命中] 使用ImageDescriptions表中的描述: {cached_description[:50]}...")
return f"[图片:{cached_description}]"
# 调用AI获取描述
image_format = Image.open(io.BytesIO(image_bytes)).format.lower() # type: ignore
prompt = global_config.custom_prompt.image_prompt
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
logger.info(f"[VLM调用] 为图片生成新描述 (Hash: {image_hash[:8]}...)")
description, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format)
if description is None:
logger.warning("AI未能生成图片描述")
@@ -266,6 +280,7 @@ class ImageManager:
if not hasattr(existing_image, "vlm_processed") or existing_image.vlm_processed is None:
existing_image.vlm_processed = True
existing_image.save()
logger.debug(f"[数据库] 更新已有图片记录: {image_hash[:8]}...")
else:
Images.create(
image_id=str(uuid.uuid4()),
@@ -277,16 +292,18 @@ class ImageManager:
vlm_processed=True,
count=1,
)
logger.debug(f"[数据库] 创建新图片记录: {image_hash[:8]}...")
except Exception as e:
logger.error(f"保存图片文件或元数据失败: {str(e)}")
# 保存描述到ImageDescriptions表
# 保存描述到ImageDescriptions表作为备用缓存
self._save_description_to_db(image_hash, description, "image")
logger.info(f"[VLM完成] 图片描述生成: {description[:50]}...")
return f"[图片:{description}]"
except Exception as e:
logger.error(f"获取图片描述失败: {str(e)}")
return "[图片]"
return "[图片(处理失败)]"
@staticmethod
def transform_gif(gif_base64: str, similarity_threshold: float = 1000.0, max_frames: int = 15) -> Optional[str]:
@@ -502,12 +519,28 @@ class ImageManager:
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
# 先检查缓存的描述
# 获取当前图片记录
image = Images.get(Images.image_id == image_id)
# 优先检查是否已有其他相同哈希的图片记录包含描述
existing_with_description = Images.get_or_none(
(Images.emoji_hash == image_hash) &
(Images.description.is_null(False)) &
(Images.description != "")
)
if existing_with_description and existing_with_description.id != image.id:
logger.debug(f"[缓存复用] 从其他相同图片记录复用描述: {existing_with_description.description[:50]}...")
image.description = existing_with_description.description
image.vlm_processed = True
image.save()
# 同时保存到ImageDescriptions表作为备用缓存
self._save_description_to_db(image_hash, existing_with_description.description, "image")
return
# 检查ImageDescriptions表的缓存描述
cached_description = self._get_description_from_db(image_hash, "image")
if cached_description:
logger.debug(f"VLM处理时发现缓存描述: {cached_description}")
# 更新数据库
image = Images.get(Images.image_id == image_id)
logger.debug(f"[缓存复用] 从ImageDescriptions表复用描述: {cached_description[:50]}...")
image.description = cached_description
image.vlm_processed = True
image.save()
@@ -520,7 +553,8 @@ class ImageManager:
prompt = global_config.custom_prompt.image_prompt
# 获取VLM描述
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
logger.info(f"[VLM异步调用] 为图片生成描述 (ID: {image_id}, Hash: {image_hash[:8]}...)")
description, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format)
if description is None:
logger.warning("VLM未能生成图片描述")
@@ -533,14 +567,15 @@ class ImageManager:
description = cached_description
# 更新数据库
image = Images.get(Images.image_id == image_id)
image.description = description
image.vlm_processed = True
image.save()
# 保存描述到ImageDescriptions表
# 保存描述到ImageDescriptions表作为备用缓存
self._save_description_to_db(image_hash, description, "image")
logger.info(f"[VLM异步完成] 图片描述生成: {description[:50]}...")
except Exception as e:
logger.error(f"VLM处理图片失败: {str(e)}")

View File

@@ -28,7 +28,7 @@ class ClassicalWillingManager(BaseWillingManager):
# print(f"[{chat_id}] 回复意愿: {current_willing}")
interested_rate = willing_info.interested_rate * global_config.normal_chat.response_interested_rate_amplifier
interested_rate = willing_info.interested_rate
# print(f"[{chat_id}] 兴趣值: {interested_rate}")
@@ -36,20 +36,18 @@ class ClassicalWillingManager(BaseWillingManager):
current_willing += interested_rate - 0.2
if willing_info.is_mentioned_bot and global_config.chat.mentioned_bot_inevitable_reply and current_willing < 2:
current_willing += 1 if current_willing < 1.0 else 0.05
current_willing += 1 if current_willing < 1.0 else 0.2
self.chat_reply_willing[chat_id] = min(current_willing, 1.0)
reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1)
reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1.5)
# print(f"[{chat_id}] 回复概率: {reply_probability}")
return reply_probability
async def before_generate_reply_handle(self, message_id):
chat_id = self.ongoing_messages[message_id].chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0)
self.chat_reply_willing[chat_id] = max(0.0, current_willing - 1.8)
pass
async def after_generate_reply_handle(self, message_id):
if message_id not in self.ongoing_messages:
@@ -58,7 +56,7 @@ class ClassicalWillingManager(BaseWillingManager):
chat_id = self.ongoing_messages[message_id].chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0)
if current_willing < 1:
self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.4)
self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.3)
async def not_reply_handle(self, message_id):
return await super().not_reply_handle(message_id)