Merge branch 'dev' of https://github.com/Windpicker-owo/MaiBot into dev
This commit is contained in:
@@ -18,11 +18,12 @@ from src.chat.chat_loop.hfc_utils import CycleDetail
|
||||
from src.person_info.relationship_builder_manager import relationship_builder_manager
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
from src.plugin_system.base.component_types import ActionInfo, ChatMode
|
||||
from src.plugin_system.apis import generator_api, send_api, message_api
|
||||
from src.plugin_system.apis import generator_api, send_api, message_api, database_api
|
||||
from src.chat.willing.willing_manager import get_willing_manager
|
||||
from src.mais4u.mai_think import mai_thinking_manager
|
||||
from maim_message.message_base import GroupInfo
|
||||
from src.mais4u.constant_s4u import ENABLE_S4U
|
||||
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
|
||||
from src.chat.chat_loop.hfc_utils import send_typing, stop_typing
|
||||
|
||||
ERROR_LOOP_INFO = {
|
||||
"loop_plan_info": {
|
||||
@@ -254,44 +255,19 @@ class HeartFChatting:
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
return f"{person_name}:{message_data.get('processed_plain_text')}"
|
||||
|
||||
async def send_typing(self):
|
||||
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
|
||||
|
||||
chat = await get_chat_manager().get_or_create_stream(
|
||||
platform="amaidesu_default",
|
||||
user_info=None,
|
||||
group_info=group_info,
|
||||
)
|
||||
|
||||
await send_api.custom_to_stream(
|
||||
message_type="state", content="typing", stream_id=chat.stream_id, storage_message=False
|
||||
)
|
||||
|
||||
async def stop_typing(self):
|
||||
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
|
||||
|
||||
chat = await get_chat_manager().get_or_create_stream(
|
||||
platform="amaidesu_default",
|
||||
user_info=None,
|
||||
group_info=group_info,
|
||||
)
|
||||
|
||||
await send_api.custom_to_stream(
|
||||
message_type="state", content="stop_typing", stream_id=chat.stream_id, storage_message=False
|
||||
)
|
||||
|
||||
async def _observe(self, message_data: Optional[Dict[str, Any]] = None):
|
||||
# sourcery skip: hoist-statement-from-if, merge-comparisons, reintroduce-else
|
||||
if not message_data:
|
||||
message_data = {}
|
||||
action_type = "no_action"
|
||||
reply_text = "" # 初始化reply_text变量,避免UnboundLocalError
|
||||
# 创建新的循环信息
|
||||
cycle_timers, thinking_id = self.start_cycle()
|
||||
|
||||
logger.info(f"{self.log_prefix} 开始第{self._cycle_counter}次思考[模式:{self.loop_mode}]")
|
||||
|
||||
if ENABLE_S4U:
|
||||
await self.send_typing()
|
||||
await send_typing()
|
||||
|
||||
async with global_prompt_manager.async_message_scope(self.chat_stream.context.get_template_name()):
|
||||
loop_start_time = time.time()
|
||||
@@ -308,9 +284,11 @@ class HeartFChatting:
|
||||
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
|
||||
|
||||
# 如果normal,开始一个回复生成进程,先准备好回复(其实是和planer同时进行的)
|
||||
gen_task = None
|
||||
reply_to_str = ""
|
||||
if self.loop_mode == ChatMode.NORMAL:
|
||||
reply_to_str = await self.build_reply_to_str(message_data)
|
||||
gen_task = asyncio.create_task(self._generate_response(message_data, available_actions, reply_to_str))
|
||||
gen_task = asyncio.create_task(self._generate_response(message_data, available_actions, reply_to_str, "chat.replyer.normal"))
|
||||
|
||||
with Timer("规划器", cycle_timers):
|
||||
plan_result, target_message = await self.action_planner.plan(mode=self.loop_mode)
|
||||
@@ -325,55 +303,118 @@ class HeartFChatting:
|
||||
|
||||
action_data["loop_start_time"] = loop_start_time
|
||||
|
||||
if self.loop_mode == ChatMode.NORMAL:
|
||||
if action_type == "no_action":
|
||||
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复")
|
||||
elif is_parallel:
|
||||
|
||||
if action_type == "reply":
|
||||
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复")
|
||||
elif is_parallel:
|
||||
logger.info(
|
||||
f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复, 同时执行{action_type}动作"
|
||||
)
|
||||
else:
|
||||
if not gen_task.done():
|
||||
gen_task.cancel()
|
||||
logger.debug(f"{self.log_prefix} 已取消预生成的回复任务")
|
||||
logger.info(
|
||||
f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复, 同时执行{action_type}动作"
|
||||
f"{self.log_prefix}{global_config.bot.nickname} 原本想要回复,但选择执行{action_type},不发表回复"
|
||||
)
|
||||
else:
|
||||
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定执行{action_type}动作")
|
||||
|
||||
if action_type == "no_action":
|
||||
# 等待回复生成完毕
|
||||
gather_timeout = global_config.chat.thinking_timeout
|
||||
try:
|
||||
response_set = await asyncio.wait_for(gen_task, timeout=gather_timeout)
|
||||
except asyncio.TimeoutError:
|
||||
response_set = None
|
||||
|
||||
if response_set:
|
||||
content = " ".join([item[1] for item in response_set if item[0] == "text"])
|
||||
|
||||
# 模型炸了,没有回复内容生成
|
||||
if not response_set:
|
||||
logger.warning(f"{self.log_prefix}模型未生成回复内容")
|
||||
return False
|
||||
elif action_type not in ["no_action"] and not is_parallel:
|
||||
content = " ".join([item[1] for item in gen_task.result() if item[0] == "text"])
|
||||
logger.debug(f"{self.log_prefix} 预生成的回复任务已完成")
|
||||
logger.info(
|
||||
f"{self.log_prefix}{global_config.bot.nickname} 原本想要回复:{content},但选择执行{action_type},不发表回复"
|
||||
)
|
||||
return False
|
||||
|
||||
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定的回复内容: {content}")
|
||||
|
||||
# 发送回复 (不再需要传入 chat)
|
||||
reply_text = await self._send_response(response_set, reply_to_str, loop_start_time,message_data)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if ENABLE_S4U:
|
||||
await self.stop_typing()
|
||||
await mai_thinking_manager.get_mai_think(self.stream_id).do_think_after_response(reply_text)
|
||||
action_message: Dict[str, Any] = message_data or target_message # type: ignore
|
||||
if action_type == "reply" or is_parallel:
|
||||
# 等待回复生成完毕
|
||||
if self.loop_mode == ChatMode.NORMAL:
|
||||
gather_timeout = global_config.chat.thinking_timeout
|
||||
try:
|
||||
response_set = await asyncio.wait_for(gen_task, timeout=gather_timeout)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"{self.log_prefix} 回复生成超时>{global_config.chat.thinking_timeout}s,已跳过")
|
||||
response_set = None
|
||||
|
||||
# 模型炸了或超时,没有回复内容生成
|
||||
if not response_set:
|
||||
logger.warning(f"{self.log_prefix}模型未生成回复内容")
|
||||
return False
|
||||
else:
|
||||
logger.info(f"{self.log_prefix}{global_config.bot.nickname} 决定进行回复 (focus模式)")
|
||||
|
||||
# 构建reply_to字符串
|
||||
reply_to_str = await self.build_reply_to_str(action_message)
|
||||
|
||||
# 生成回复
|
||||
with Timer("回复生成", cycle_timers):
|
||||
response_set = await self._generate_response(action_message, available_actions, reply_to_str, request_type="chat.replyer.focus")
|
||||
|
||||
if not response_set:
|
||||
logger.warning(f"{self.log_prefix}模型未生成回复内容")
|
||||
return False
|
||||
|
||||
# 发送回复
|
||||
with Timer("回复发送", cycle_timers):
|
||||
reply_text = await self._send_response(response_set, reply_to_str, loop_start_time, action_message)
|
||||
|
||||
# 存储reply action信息 (focus模式)
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_id = person_info_manager.get_person_id(
|
||||
action_message.get("chat_info_platform", ""),
|
||||
action_message.get("user_id", ""),
|
||||
)
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
|
||||
|
||||
await database_api.store_action_info(
|
||||
chat_stream=self.chat_stream,
|
||||
action_build_into_prompt=False,
|
||||
action_prompt_display=action_prompt_display,
|
||||
action_done=True,
|
||||
thinking_id=thinking_id,
|
||||
action_data={"reply_text": reply_text, "reply_to": reply_to_str},
|
||||
action_name="reply",
|
||||
)
|
||||
|
||||
|
||||
# 构建循环信息
|
||||
loop_info = {
|
||||
"loop_plan_info": {
|
||||
"action_result": plan_result.get("action_result", {}),
|
||||
},
|
||||
"loop_action_info": {
|
||||
"action_taken": True,
|
||||
"reply_text": reply_text,
|
||||
"command": "",
|
||||
"taken_time": time.time(),
|
||||
},
|
||||
}
|
||||
|
||||
success = True
|
||||
command = ""
|
||||
|
||||
return True
|
||||
|
||||
else:
|
||||
action_message: Dict[str, Any] = message_data or target_message # type: ignore
|
||||
|
||||
# 如果是并行执行且在normal模式下,需要等待预生成的回复任务完成
|
||||
# if self.loop_mode == ChatMode.NORMAL and is_parallel and gen_task:
|
||||
# # 等待预生成的回复任务完成
|
||||
# gather_timeout = global_config.chat.thinking_timeout
|
||||
# try:
|
||||
# response_set = await asyncio.wait_for(gen_task, timeout=gather_timeout)
|
||||
# if response_set:
|
||||
# # 发送回复
|
||||
# with Timer("回复发送", cycle_timers):
|
||||
# reply_text_parallel = await self._send_response(response_set, reply_to_str, loop_start_time, action_message)
|
||||
# logger.info(f"{self.log_prefix} 并行执行:已发送回复内容")
|
||||
# else:
|
||||
# logger.warning(f"{self.log_prefix} 并行执行:预生成回复内容为空")
|
||||
# except asyncio.TimeoutError:
|
||||
# logger.warning(f"{self.log_prefix} 并行执行:回复生成超时>{global_config.chat.thinking_timeout}s,已跳过")
|
||||
# except asyncio.CancelledError:
|
||||
# logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消")
|
||||
|
||||
# 动作执行计时
|
||||
with Timer("动作执行", cycle_timers):
|
||||
success, reply_text, command = await self._handle_action(
|
||||
@@ -392,11 +433,11 @@ class HeartFChatting:
|
||||
},
|
||||
}
|
||||
|
||||
if loop_info["loop_action_info"]["command"] == "stop_focus_chat":
|
||||
logger.info(f"{self.log_prefix} 麦麦决定停止专注聊天")
|
||||
return False
|
||||
# 停止该聊天模式的循环
|
||||
|
||||
if ENABLE_S4U:
|
||||
await stop_typing()
|
||||
await mai_thinking_manager.get_mai_think(self.stream_id).do_think_after_response(reply_text)
|
||||
|
||||
self.end_cycle(loop_info, cycle_timers)
|
||||
self.print_cycle_info(cycle_timers)
|
||||
|
||||
@@ -406,13 +447,11 @@ class HeartFChatting:
|
||||
# 管理no_reply计数器:当执行了非no_reply动作时,重置计数器
|
||||
if action_type != "no_reply" and action_type != "no_action":
|
||||
# 导入NoReplyAction并重置计数器
|
||||
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
|
||||
NoReplyAction.reset_consecutive_count()
|
||||
logger.info(f"{self.log_prefix} 执行了{action_type}动作,重置no_reply计数器")
|
||||
return True
|
||||
elif action_type == "no_action":
|
||||
# 当执行回复动作时,也重置no_reply计数器
|
||||
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
|
||||
# 当执行回复动作时,也重置no_reply计数器s
|
||||
NoReplyAction.reset_consecutive_count()
|
||||
logger.info(f"{self.log_prefix} 执行了回复动作,重置no_reply计数器")
|
||||
|
||||
@@ -551,7 +590,7 @@ class HeartFChatting:
|
||||
return False
|
||||
|
||||
async def _generate_response(
|
||||
self, message_data: dict, available_actions: Optional[Dict[str, ActionInfo]], reply_to: str
|
||||
self, message_data: dict, available_actions: Optional[Dict[str, ActionInfo]], reply_to: str, request_type: str = "chat.replyer.normal"
|
||||
) -> Optional[list]:
|
||||
"""生成普通回复"""
|
||||
try:
|
||||
@@ -559,8 +598,8 @@ class HeartFChatting:
|
||||
chat_stream=self.chat_stream,
|
||||
reply_to=reply_to,
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||
request_type="chat.replyer.normal",
|
||||
enable_tool=global_config.tool.enable_tool,
|
||||
request_type=request_type,
|
||||
)
|
||||
|
||||
if not success or not reply_set:
|
||||
@@ -589,7 +628,7 @@ class HeartFChatting:
|
||||
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,使用引用回复"
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
logger.info(
|
||||
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,不使用引用回复"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import time
|
||||
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from src.config.config import global_config
|
||||
from src.common.message_repository import count_messages
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.plugin_system.apis import send_api
|
||||
from maim_message.message_base import GroupInfo
|
||||
|
||||
from src.common.message_repository import count_messages
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
@@ -106,3 +109,30 @@ def get_recent_message_stats(minutes: float = 30, chat_id: Optional[str] = None)
|
||||
bot_reply_count = count_messages(bot_filter)
|
||||
|
||||
return {"bot_reply_count": bot_reply_count, "total_message_count": total_message_count}
|
||||
|
||||
|
||||
async def send_typing():
|
||||
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
|
||||
|
||||
chat = await get_chat_manager().get_or_create_stream(
|
||||
platform="amaidesu_default",
|
||||
user_info=None,
|
||||
group_info=group_info,
|
||||
)
|
||||
|
||||
await send_api.custom_to_stream(
|
||||
message_type="state", content="typing", stream_id=chat.stream_id, storage_message=False
|
||||
)
|
||||
|
||||
async def stop_typing():
|
||||
group_info = GroupInfo(platform="amaidesu_default", group_id="114514", group_name="内心")
|
||||
|
||||
chat = await get_chat_manager().get_or_create_stream(
|
||||
platform="amaidesu_default",
|
||||
user_info=None,
|
||||
group_info=group_info,
|
||||
)
|
||||
|
||||
await send_api.custom_to_stream(
|
||||
message_type="state", content="stop_typing", stream_id=chat.stream_id, storage_message=False
|
||||
)
|
||||
@@ -33,10 +33,11 @@ def init_prompt():
|
||||
{time_block}
|
||||
{identity_block}
|
||||
你现在需要根据聊天内容,选择的合适的action来参与聊天。
|
||||
{chat_context_description},以下是具体的聊天内容:
|
||||
{chat_context_description},以下是具体的聊天内容
|
||||
{chat_content_block}
|
||||
|
||||
|
||||
|
||||
{moderation_prompt}
|
||||
|
||||
现在请你根据{by_what}选择合适的action和触发action的消息:
|
||||
@@ -224,8 +225,9 @@ class ActionPlanner:
|
||||
reasoning = f"Planner 内部处理错误: {outer_e}"
|
||||
|
||||
is_parallel = False
|
||||
if action in current_available_actions:
|
||||
is_parallel = current_available_actions[action].parallel_action
|
||||
if mode == ChatMode.NORMAL:
|
||||
if action in current_available_actions:
|
||||
is_parallel = current_available_actions[action].parallel_action
|
||||
|
||||
action_result = {
|
||||
"action_type": action,
|
||||
@@ -311,7 +313,7 @@ class ActionPlanner:
|
||||
by_what = "聊天内容和用户的最新消息"
|
||||
target_prompt = ""
|
||||
no_action_block = """重要说明:
|
||||
- 'no_action' 表示只进行普通聊天回复,不执行任何额外动作
|
||||
- 'reply' 表示只进行普通聊天回复,不执行任何额外动作
|
||||
- 其他action表示在普通回复的基础上,执行相应的额外动作"""
|
||||
|
||||
chat_context_description = "你现在正在一个群聊中"
|
||||
|
||||
@@ -39,33 +39,7 @@ def init_prompt():
|
||||
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
|
||||
Prompt("在群里聊天", "chat_target_group2")
|
||||
Prompt("和{sender_name}聊天", "chat_target_private2")
|
||||
Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{expression_habits_block}
|
||||
{tool_info_block}
|
||||
{knowledge_prompt}
|
||||
{memory_block}
|
||||
{relation_info_block}
|
||||
{extra_info_block}
|
||||
|
||||
{chat_target}
|
||||
{time_block}
|
||||
{chat_info}
|
||||
{reply_target_block}
|
||||
{identity}
|
||||
|
||||
{action_descriptions}
|
||||
你正在{chat_target_2},你现在的心情是:{mood_state}
|
||||
现在请你读读之前的聊天记录,并给出回复
|
||||
{config_expression_style}。注意不要复读你说过的话
|
||||
{keywords_reaction_prompt}
|
||||
{moderation_prompt}
|
||||
不要浮夸,不要夸张修辞,不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容""",
|
||||
"default_generator_prompt",
|
||||
)
|
||||
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{expression_habits_block}
|
||||
@@ -113,7 +87,8 @@ def init_prompt():
|
||||
{core_dialogue_prompt}
|
||||
|
||||
{reply_target_block}
|
||||
对方最新发送的内容:{message_txt}
|
||||
|
||||
|
||||
你现在的心情是:{mood_state}
|
||||
{config_expression_style}
|
||||
注意不要复读你说过的话
|
||||
@@ -177,7 +152,6 @@ class DefaultReplyer:
|
||||
|
||||
async def generate_reply_with_context(
|
||||
self,
|
||||
reply_data: Optional[Dict[str, Any]] = None,
|
||||
reply_to: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: Optional[Dict[str, ActionInfo]] = None,
|
||||
@@ -186,29 +160,24 @@ class DefaultReplyer:
|
||||
) -> Tuple[bool, Optional[str], Optional[str]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
(已整合原 HeartFCGenerator 的功能)
|
||||
"""
|
||||
prompt = None
|
||||
if available_actions is None:
|
||||
available_actions = {}
|
||||
try:
|
||||
if not reply_data:
|
||||
reply_data = {
|
||||
"reply_to": reply_to,
|
||||
"extra_info": extra_info,
|
||||
}
|
||||
for key, value in reply_data.items():
|
||||
if not value:
|
||||
logger.debug(f"回复数据跳过{key},生成回复时将忽略。")
|
||||
|
||||
# 3. 构建 Prompt
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await self.build_prompt_reply_context(
|
||||
reply_data=reply_data, # 传递action_data
|
||||
reply_to = reply_to,
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
enable_timeout=enable_timeout,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
|
||||
if not prompt:
|
||||
logger.warning("构建prompt失败,跳过回复生成")
|
||||
return False, None, None
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
content = None
|
||||
@@ -308,14 +277,13 @@ class DefaultReplyer:
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
|
||||
async def build_relation_info(self, reply_data=None):
|
||||
async def build_relation_info(self, reply_to: str = ""):
|
||||
if not global_config.relationship.enable_relationship:
|
||||
return ""
|
||||
|
||||
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
|
||||
if not reply_data:
|
||||
if not reply_to:
|
||||
return ""
|
||||
reply_to = reply_data.get("reply_to", "")
|
||||
sender, text = self._parse_reply_target(reply_to)
|
||||
if not sender or not text:
|
||||
return ""
|
||||
@@ -407,7 +375,7 @@ class DefaultReplyer:
|
||||
|
||||
return memory_str
|
||||
|
||||
async def build_tool_info(self, chat_history, reply_data: Optional[Dict], enable_tool: bool = True):
|
||||
async def build_tool_info(self, chat_history, reply_to: str = "", enable_tool: bool = True):
|
||||
"""构建工具信息块
|
||||
|
||||
Args:
|
||||
@@ -421,10 +389,9 @@ class DefaultReplyer:
|
||||
if not enable_tool:
|
||||
return ""
|
||||
|
||||
if not reply_data:
|
||||
if not reply_to:
|
||||
return ""
|
||||
|
||||
reply_to = reply_data.get("reply_to", "")
|
||||
sender, text = self._parse_reply_target(reply_to)
|
||||
|
||||
if not text:
|
||||
@@ -603,7 +570,8 @@ class DefaultReplyer:
|
||||
|
||||
async def build_prompt_reply_context(
|
||||
self,
|
||||
reply_data: Dict[str, Any],
|
||||
reply_to: str,
|
||||
extra_info: str = "",
|
||||
available_actions: Optional[Dict[str, ActionInfo]] = None,
|
||||
enable_timeout: bool = False,
|
||||
enable_tool: bool = True,
|
||||
@@ -628,8 +596,6 @@ class DefaultReplyer:
|
||||
chat_id = chat_stream.stream_id
|
||||
person_info_manager = get_person_info_manager()
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "")
|
||||
|
||||
if global_config.mood.enable_mood:
|
||||
chat_mood = mood_manager.get_mood_by_chat_id(chat_id)
|
||||
@@ -638,6 +604,13 @@ class DefaultReplyer:
|
||||
mood_prompt = ""
|
||||
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_id = person_info_manager.get_person_id_by_person_name(sender)
|
||||
user_id = person_info_manager.get_value_sync(person_id, "user_id")
|
||||
platform = chat_stream.platform
|
||||
if user_id == global_config.bot.qq_account and platform == global_config.bot.platform:
|
||||
logger.warning("选取了自身作为回复对象,跳过构建prompt")
|
||||
return ""
|
||||
|
||||
target = replace_user_references_sync(target, chat_stream.platform, replace_bot_name=True)
|
||||
|
||||
@@ -656,21 +629,6 @@ class DefaultReplyer:
|
||||
limit=global_config.chat.max_context_size * 2,
|
||||
)
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.chat.max_context_size,
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
message_list_before_short = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_id,
|
||||
timestamp=time.time(),
|
||||
@@ -690,10 +648,10 @@ class DefaultReplyer:
|
||||
self._time_and_run_task(
|
||||
self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits"
|
||||
),
|
||||
self._time_and_run_task(self.build_relation_info(reply_data), "relation_info"),
|
||||
self._time_and_run_task(self.build_relation_info(reply_to), "relation_info"),
|
||||
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"),
|
||||
self._time_and_run_task(
|
||||
self.build_tool_info(chat_talking_prompt_short, reply_data, enable_tool=enable_tool), "tool_info"
|
||||
self.build_tool_info(chat_talking_prompt_short, reply_to, enable_tool=enable_tool), "tool_info"
|
||||
),
|
||||
self._time_and_run_task(get_prompt_info(target, threshold=0.38), "prompt_info"),
|
||||
)
|
||||
@@ -726,8 +684,8 @@ class DefaultReplyer:
|
||||
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
|
||||
if extra_info_block:
|
||||
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
|
||||
if extra_info:
|
||||
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
|
||||
else:
|
||||
extra_info_block = ""
|
||||
|
||||
@@ -782,103 +740,63 @@ class DefaultReplyer:
|
||||
# 根据sender通过person_info_manager反向查找person_id,再获取user_id
|
||||
person_id = person_info_manager.get_person_id_by_person_name(sender)
|
||||
|
||||
# 根据配置选择使用哪种 prompt 构建模式
|
||||
if global_config.chat.use_s4u_prompt_mode and person_id:
|
||||
# 使用 s4u 对话构建模式:分离当前对话对象和其他对话
|
||||
try:
|
||||
user_id_value = await person_info_manager.get_value(person_id, "user_id")
|
||||
if user_id_value:
|
||||
target_user_id = str(user_id_value)
|
||||
except Exception as e:
|
||||
logger.warning(f"无法从person_id {person_id} 获取user_id: {e}")
|
||||
target_user_id = ""
|
||||
# 使用 s4u 对话构建模式:分离当前对话对象和其他对话
|
||||
try:
|
||||
user_id_value = await person_info_manager.get_value(person_id, "user_id")
|
||||
if user_id_value:
|
||||
target_user_id = str(user_id_value)
|
||||
except Exception as e:
|
||||
logger.warning(f"无法从person_id {person_id} 获取user_id: {e}")
|
||||
target_user_id = ""
|
||||
|
||||
# 构建分离的对话 prompt
|
||||
core_dialogue_prompt, background_dialogue_prompt = self.build_s4u_chat_history_prompts(
|
||||
message_list_before_now_long, target_user_id
|
||||
)
|
||||
# 构建分离的对话 prompt
|
||||
core_dialogue_prompt, background_dialogue_prompt = self.build_s4u_chat_history_prompts(
|
||||
message_list_before_now_long, target_user_id
|
||||
)
|
||||
|
||||
self.build_mai_think_context(
|
||||
chat_id=chat_id,
|
||||
memory_block=memory_block,
|
||||
relation_info=relation_info,
|
||||
time_block=time_block,
|
||||
chat_target_1=chat_target_1,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_prompt=mood_prompt,
|
||||
identity_block=identity_block,
|
||||
sender=sender,
|
||||
target=target,
|
||||
chat_info=f"""
|
||||
self.build_mai_think_context(
|
||||
chat_id=chat_id,
|
||||
memory_block=memory_block,
|
||||
relation_info=relation_info,
|
||||
time_block=time_block,
|
||||
chat_target_1=chat_target_1,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_prompt=mood_prompt,
|
||||
identity_block=identity_block,
|
||||
sender=sender,
|
||||
target=target,
|
||||
chat_info=f"""
|
||||
{background_dialogue_prompt}
|
||||
--------------------------------
|
||||
{time_block}
|
||||
这是你和{sender}的对话,你们正在交流中:
|
||||
{core_dialogue_prompt}""",
|
||||
)
|
||||
)
|
||||
|
||||
# 使用 s4u 风格的模板
|
||||
template_name = "s4u_style_prompt"
|
||||
# 使用 s4u 风格的模板
|
||||
template_name = "s4u_style_prompt"
|
||||
|
||||
return await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
tool_info_block=tool_info,
|
||||
knowledge_prompt=prompt_info,
|
||||
memory_block=memory_block,
|
||||
relation_info_block=relation_info,
|
||||
extra_info_block=extra_info_block,
|
||||
identity=identity_block,
|
||||
action_descriptions=action_descriptions,
|
||||
sender_name=sender,
|
||||
mood_state=mood_prompt,
|
||||
background_dialogue_prompt=background_dialogue_prompt,
|
||||
time_block=time_block,
|
||||
core_dialogue_prompt=core_dialogue_prompt,
|
||||
reply_target_block=reply_target_block,
|
||||
message_txt=target,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
)
|
||||
else:
|
||||
self.build_mai_think_context(
|
||||
chat_id=chat_id,
|
||||
memory_block=memory_block,
|
||||
relation_info=relation_info,
|
||||
time_block=time_block,
|
||||
chat_target_1=chat_target_1,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_prompt=mood_prompt,
|
||||
identity_block=identity_block,
|
||||
sender=sender,
|
||||
target=target,
|
||||
chat_info=chat_talking_prompt,
|
||||
)
|
||||
|
||||
# 使用原有的模式
|
||||
return await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
memory_block=memory_block,
|
||||
tool_info_block=tool_info,
|
||||
knowledge_prompt=prompt_info,
|
||||
extra_info_block=extra_info_block,
|
||||
relation_info_block=relation_info,
|
||||
time_block=time_block,
|
||||
reply_target_block=reply_target_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
identity=identity_block,
|
||||
target_message=target,
|
||||
sender_name=sender,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
action_descriptions=action_descriptions,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_state=mood_prompt,
|
||||
)
|
||||
return await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
tool_info_block=tool_info,
|
||||
knowledge_prompt=prompt_info,
|
||||
memory_block=memory_block,
|
||||
relation_info_block=relation_info,
|
||||
extra_info_block=extra_info_block,
|
||||
identity=identity_block,
|
||||
action_descriptions=action_descriptions,
|
||||
sender_name=sender,
|
||||
mood_state=mood_prompt,
|
||||
background_dialogue_prompt=background_dialogue_prompt,
|
||||
time_block=time_block,
|
||||
core_dialogue_prompt=core_dialogue_prompt,
|
||||
reply_target_block=reply_target_block,
|
||||
message_txt=target,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
)
|
||||
|
||||
async def build_prompt_rewrite_context(
|
||||
self,
|
||||
@@ -1079,9 +997,7 @@ async def get_prompt_info(message: str, threshold: float):
|
||||
logger.debug(f"获取知识库内容,相关信息:{related_info[:100]}...,信息长度: {len(related_info)}")
|
||||
|
||||
# 格式化知识信息
|
||||
formatted_prompt_info = await global_prompt_manager.format_prompt(
|
||||
"knowledge_prompt", prompt_info=related_info
|
||||
)
|
||||
formatted_prompt_info = f"你有以下这些**知识**:\n{related_info}\n请你**记住上面的知识**,之后可能会用到。\n"
|
||||
return formatted_prompt_info
|
||||
else:
|
||||
logger.debug("从LPMM知识库获取知识失败,可能是从未导入过知识,返回空知识...")
|
||||
|
||||
Reference in New Issue
Block a user