refc:重构插件api,补全文档,合并expressor和replyer,分离reply和sender,新log浏览器
This commit is contained in:
@@ -1,534 +0,0 @@
|
||||
import traceback
|
||||
from typing import List, Optional, Dict, Any, Tuple
|
||||
|
||||
from src.chat.focus_chat.expressors.exprssion_learner import get_expression_learner
|
||||
from src.chat.message_receive.message import MessageRecv, MessageThinking, MessageSending
|
||||
from src.chat.message_receive.message import Seg # Local import needed after move
|
||||
from src.chat.message_receive.message import UserInfo
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.common.logger import get_logger
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
|
||||
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
from src.chat.focus_chat.heartFC_sender import HeartFCSender
|
||||
from src.chat.utils.utils import process_llm_response
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
import time
|
||||
import random
|
||||
|
||||
logger = get_logger("expressor")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
你可以参考你的以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
|
||||
{style_habbits}
|
||||
|
||||
你现在正在群里聊天,以下是群里正在进行的聊天内容:
|
||||
{chat_info}
|
||||
|
||||
以上是聊天内容,你需要了解聊天记录中的内容
|
||||
|
||||
{chat_target}
|
||||
你的名字是{bot_name},{prompt_personality},在这聊天中,"{target_message}"引起了你的注意,对这句话,你想表达:{in_mind_reply},原因是:{reason}。你现在要思考怎么回复
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。请你修改你想表达的原句,符合你的表达风格和语言习惯
|
||||
请你根据情景使用以下句法:
|
||||
{grammar_habbits}
|
||||
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_expressor_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
|
||||
{style_habbits}
|
||||
|
||||
你现在正在群里聊天,以下是群里正在进行的聊天内容:
|
||||
{chat_info}
|
||||
|
||||
以上是聊天内容,你需要了解聊天记录中的内容
|
||||
|
||||
{chat_target}
|
||||
你的名字是{bot_name},{prompt_personality},在这聊天中,"{target_message}"引起了你的注意,对这句话,你想表达:{in_mind_reply},原因是:{reason}。你现在要思考怎么回复
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。
|
||||
请你根据情景使用以下句法:
|
||||
{grammar_habbits}
|
||||
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_expressor_private_prompt", # New template for private FOCUSED chat
|
||||
)
|
||||
|
||||
|
||||
class DefaultExpressor:
|
||||
def __init__(self, chat_stream: ChatStream):
|
||||
self.log_prefix = "expressor"
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_model = LLMRequest(
|
||||
model=global_config.model.replyer_1,
|
||||
request_type="focus.expressor",
|
||||
)
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
|
||||
self.chat_id = chat_stream.stream_id
|
||||
self.chat_stream = chat_stream
|
||||
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_id)
|
||||
|
||||
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv], thinking_id: str):
|
||||
"""创建思考消息 (尝试锚定到 anchor_message)"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。")
|
||||
return None
|
||||
|
||||
chat = anchor_message.chat_stream
|
||||
messageinfo = anchor_message.message_info
|
||||
thinking_time_point = parse_thinking_id_to_timestamp(thinking_id)
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=messageinfo.platform,
|
||||
)
|
||||
|
||||
thinking_message = MessageThinking(
|
||||
message_id=thinking_id,
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
reply=anchor_message, # 回复的是锚点消息
|
||||
thinking_start_time=thinking_time_point,
|
||||
)
|
||||
# logger.debug(f"创建思考消息thinking_message:{thinking_message}")
|
||||
|
||||
await self.heart_fc_sender.register_thinking(thinking_message)
|
||||
return None
|
||||
|
||||
async def deal_reply(
|
||||
self,
|
||||
cycle_timers: dict,
|
||||
action_data: Dict[str, Any],
|
||||
reasoning: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
) -> tuple[bool, Optional[List[Tuple[str, str]]]]:
|
||||
# 创建思考消息
|
||||
await self._create_thinking_message(anchor_message, thinking_id)
|
||||
|
||||
reply = [] # 初始化 reply,防止未定义
|
||||
try:
|
||||
has_sent_something = False
|
||||
|
||||
# 处理文本部分
|
||||
text_part = action_data.get("text", [])
|
||||
if text_part:
|
||||
with Timer("生成回复", cycle_timers):
|
||||
# 可以保留原有的文本处理逻辑或进行适当调整
|
||||
reply = await self.express(
|
||||
in_mind_reply=text_part,
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
reason=reasoning,
|
||||
action_data=action_data,
|
||||
)
|
||||
|
||||
with Timer("选择表情", cycle_timers):
|
||||
emoji_keyword = action_data.get("emojis", [])
|
||||
emoji_base64 = await self._choose_emoji(emoji_keyword)
|
||||
if emoji_base64:
|
||||
reply.append(("emoji", emoji_base64))
|
||||
|
||||
if reply:
|
||||
with Timer("发送消息", cycle_timers):
|
||||
sent_msg_list = await self.send_response_messages(
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
response_set=reply,
|
||||
)
|
||||
has_sent_something = True
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 文本回复生成失败")
|
||||
|
||||
if not has_sent_something:
|
||||
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
|
||||
|
||||
return has_sent_something, sent_msg_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"回复失败: {e}")
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
|
||||
# --- 回复器 (Replier) 的定义 --- #
|
||||
|
||||
async def express(
|
||||
self,
|
||||
in_mind_reply: str,
|
||||
reason: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
action_data: Dict[str, Any],
|
||||
) -> Optional[List[str]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
(已整合原 HeartFCGenerator 的功能)
|
||||
"""
|
||||
try:
|
||||
# --- Determine sender_name for private chat ---
|
||||
sender_name_for_prompt = "某人" # Default for group or if info unavailable
|
||||
if not self.is_group_chat and self.chat_target_info:
|
||||
# Prioritize person_name, then nickname
|
||||
sender_name_for_prompt = (
|
||||
self.chat_target_info.get("person_name")
|
||||
or self.chat_target_info.get("user_nickname")
|
||||
or sender_name_for_prompt
|
||||
)
|
||||
# --- End determining sender_name ---
|
||||
|
||||
target_message = action_data.get("target", "")
|
||||
|
||||
# 3. 构建 Prompt
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await self.build_prompt_focus(
|
||||
chat_stream=self.chat_stream, # Pass the stream object
|
||||
in_mind_reply=in_mind_reply,
|
||||
reason=reason,
|
||||
sender_name=sender_name_for_prompt, # Pass determined name
|
||||
target_message=target_message,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
)
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
content = None
|
||||
reasoning_content = None
|
||||
model_name = "unknown_model"
|
||||
if not prompt:
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。")
|
||||
return None
|
||||
|
||||
try:
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
# TODO: API-Adapter修改标记
|
||||
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
|
||||
content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt)
|
||||
|
||||
logger.info(f"想要表达:{in_mind_reply}||理由:{reason}")
|
||||
logger.info(f"最终回复: {content}\n")
|
||||
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}")
|
||||
return None # LLM 调用失败则无法生成回复
|
||||
|
||||
processed_response = process_llm_response(content)
|
||||
|
||||
# 5. 处理 LLM 响应
|
||||
if not content:
|
||||
logger.warning(f"{self.log_prefix}LLM 生成了空内容。")
|
||||
return None
|
||||
if not processed_response:
|
||||
logger.warning(f"{self.log_prefix}处理后的回复为空。")
|
||||
return None
|
||||
|
||||
reply_set = []
|
||||
for str in processed_response:
|
||||
reply_seg = ("text", str)
|
||||
reply_set.append(reply_seg)
|
||||
|
||||
return reply_set
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}回复生成意外失败: {e}")
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
async def build_prompt_focus(
|
||||
self,
|
||||
reason,
|
||||
chat_stream,
|
||||
sender_name,
|
||||
in_mind_reply,
|
||||
target_message,
|
||||
config_expression_style,
|
||||
) -> str:
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.focus_chat.observation_context_size,
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=True,
|
||||
timestamp_mode="relative",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
)
|
||||
|
||||
expression_learner = get_expression_learner()
|
||||
(
|
||||
learnt_style_expressions,
|
||||
learnt_grammar_expressions,
|
||||
personality_expressions,
|
||||
) = await expression_learner.get_expression_by_chat_id(chat_stream.stream_id)
|
||||
|
||||
style_habbits = []
|
||||
grammar_habbits = []
|
||||
# 1. learnt_expressions加权随机选3条
|
||||
if learnt_style_expressions:
|
||||
weights = [expr["count"] for expr in learnt_style_expressions]
|
||||
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
|
||||
for expr in selected_learnt:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
# 2. learnt_grammar_expressions加权随机选3条
|
||||
if learnt_grammar_expressions:
|
||||
weights = [expr["count"] for expr in learnt_grammar_expressions]
|
||||
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
|
||||
for expr in selected_learnt:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
grammar_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
# 3. personality_expressions随机选1条
|
||||
if personality_expressions:
|
||||
expr = random.choice(personality_expressions)
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
|
||||
style_habbits_str = "\n".join(style_habbits)
|
||||
grammar_habbits_str = "\n".join(grammar_habbits)
|
||||
|
||||
logger.debug("开始构建 focus prompt")
|
||||
|
||||
# --- Choose template based on chat type ---
|
||||
if is_group_chat:
|
||||
template_name = "default_expressor_prompt"
|
||||
# Group specific formatting variables (already fetched or default)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
|
||||
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
style_habbits=style_habbits_str,
|
||||
grammar_habbits=grammar_habbits_str,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
in_mind_reply=in_mind_reply,
|
||||
target_message=target_message,
|
||||
config_expression_style=config_expression_style,
|
||||
)
|
||||
else: # Private chat
|
||||
template_name = "default_expressor_private_prompt"
|
||||
chat_target_1 = "你正在和人私聊"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
style_habbits=style_habbits_str,
|
||||
grammar_habbits=grammar_habbits_str,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
in_mind_reply=in_mind_reply,
|
||||
target_message=target_message,
|
||||
config_expression_style=config_expression_style,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
# --- 发送器 (Sender) --- #
|
||||
|
||||
async def send_response_messages(
|
||||
self,
|
||||
anchor_message: Optional[MessageRecv],
|
||||
response_set: List[Tuple[str, str]],
|
||||
thinking_id: str = "",
|
||||
display_message: str = "",
|
||||
) -> Optional[MessageSending]:
|
||||
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
|
||||
chat = self.chat_stream
|
||||
chat_id = self.chat_id
|
||||
if chat is None:
|
||||
logger.error(f"{self.log_prefix} 无法发送回复,chat_stream 为空。")
|
||||
return None
|
||||
if not anchor_message:
|
||||
logger.error(f"{self.log_prefix} 无法发送回复,anchor_message 为空。")
|
||||
return None
|
||||
|
||||
stream_name = get_chat_manager().get_stream_name(chat_id) or chat_id # 获取流名称用于日志
|
||||
|
||||
# 检查思考过程是否仍在进行,并获取开始时间
|
||||
if thinking_id:
|
||||
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id)
|
||||
else:
|
||||
thinking_id = "ds" + str(round(time.time(), 2))
|
||||
thinking_start_time = time.time()
|
||||
|
||||
if thinking_start_time is None:
|
||||
logger.error(f"[{stream_name}]expressor思考过程未找到或已结束,无法发送回复。")
|
||||
return None
|
||||
|
||||
mark_head = False
|
||||
# first_bot_msg: Optional[MessageSending] = None
|
||||
reply_message_ids = [] # 记录实际发送的消息ID
|
||||
|
||||
sent_msg_list = []
|
||||
|
||||
for i, msg_text in enumerate(response_set):
|
||||
# 为每个消息片段生成唯一ID
|
||||
type = msg_text[0]
|
||||
data = msg_text[1]
|
||||
|
||||
if global_config.experimental.debug_show_chat_mode and type == "text":
|
||||
data += "ᶠ"
|
||||
|
||||
part_message_id = f"{thinking_id}_{i}"
|
||||
message_segment = Seg(type=type, data=data)
|
||||
|
||||
if type == "emoji":
|
||||
is_emoji = True
|
||||
else:
|
||||
is_emoji = False
|
||||
reply_to = not mark_head
|
||||
|
||||
bot_message = await self._build_single_sending_message(
|
||||
anchor_message=anchor_message,
|
||||
message_id=part_message_id,
|
||||
message_segment=message_segment,
|
||||
display_message=display_message,
|
||||
reply_to=reply_to,
|
||||
is_emoji=is_emoji,
|
||||
thinking_id=thinking_id,
|
||||
thinking_start_time=thinking_start_time,
|
||||
)
|
||||
|
||||
try:
|
||||
if not mark_head:
|
||||
mark_head = True
|
||||
# first_bot_msg = bot_message # 保存第一个成功发送的消息对象
|
||||
typing = False
|
||||
else:
|
||||
typing = True
|
||||
|
||||
if type == "emoji":
|
||||
typing = False
|
||||
|
||||
if anchor_message.raw_message:
|
||||
set_reply = True
|
||||
else:
|
||||
set_reply = False
|
||||
sent_msg = await self.heart_fc_sender.send_message(
|
||||
bot_message, has_thinking=True, typing=typing, set_reply=set_reply
|
||||
)
|
||||
|
||||
reply_message_ids.append(part_message_id) # 记录我们生成的ID
|
||||
|
||||
sent_msg_list.append((type, sent_msg))
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}发送回复片段 {i} ({part_message_id}) 时失败: {e}")
|
||||
traceback.print_exc()
|
||||
# 这里可以选择是继续发送下一个片段还是中止
|
||||
|
||||
# 在尝试发送完所有片段后,完成原始的 thinking_id 状态
|
||||
try:
|
||||
await self.heart_fc_sender.complete_thinking(chat_id, thinking_id)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}完成思考状态 {thinking_id} 时出错: {e}")
|
||||
|
||||
return sent_msg_list
|
||||
|
||||
async def _choose_emoji(self, send_emoji: str):
|
||||
"""
|
||||
选择表情,根据send_emoji文本选择表情,返回表情base64
|
||||
"""
|
||||
emoji_base64 = ""
|
||||
emoji_raw = await get_emoji_manager().get_emoji_for_text(send_emoji)
|
||||
if emoji_raw:
|
||||
emoji_path, _description, _emotion = emoji_raw
|
||||
emoji_base64 = image_path_to_base64(emoji_path)
|
||||
return emoji_base64
|
||||
|
||||
async def _build_single_sending_message(
|
||||
self,
|
||||
anchor_message: MessageRecv,
|
||||
message_id: str,
|
||||
message_segment: Seg,
|
||||
reply_to: bool,
|
||||
is_emoji: bool,
|
||||
thinking_id: str,
|
||||
thinking_start_time: float,
|
||||
display_message: str,
|
||||
) -> MessageSending:
|
||||
"""构建单个发送消息"""
|
||||
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=self.chat_stream.platform,
|
||||
)
|
||||
|
||||
bot_message = MessageSending(
|
||||
message_id=message_id, # 使用片段的唯一ID
|
||||
chat_stream=self.chat_stream,
|
||||
bot_user_info=bot_user_info,
|
||||
sender_info=anchor_message.message_info.user_info,
|
||||
message_segment=message_segment,
|
||||
reply=anchor_message, # 回复原始锚点
|
||||
is_head=reply_to,
|
||||
is_emoji=is_emoji,
|
||||
thinking_start_time=thinking_start_time, # 传递原始思考开始时间
|
||||
display_message=display_message,
|
||||
)
|
||||
|
||||
return bot_message
|
||||
|
||||
|
||||
def weighted_sample_no_replacement(items, weights, k) -> list:
|
||||
"""
|
||||
加权且不放回地随机抽取k个元素。
|
||||
|
||||
参数:
|
||||
items: 待抽取的元素列表
|
||||
weights: 每个元素对应的权重(与items等长,且为正数)
|
||||
k: 需要抽取的元素个数
|
||||
返回:
|
||||
selected: 按权重加权且不重复抽取的k个元素组成的列表
|
||||
|
||||
如果 items 中的元素不足 k 个,就只会返回所有可用的元素
|
||||
|
||||
实现思路:
|
||||
每次从当前池中按权重加权随机选出一个元素,选中后将其从池中移除,重复k次。
|
||||
这样保证了:
|
||||
1. count越大被选中概率越高
|
||||
2. 不会重复选中同一个元素
|
||||
"""
|
||||
selected = []
|
||||
pool = list(zip(items, weights))
|
||||
for _ in range(min(k, len(pool))):
|
||||
total = sum(w for _, w in pool)
|
||||
r = random.uniform(0, total)
|
||||
upto = 0
|
||||
for idx, (item, weight) in enumerate(pool):
|
||||
upto += weight
|
||||
if upto >= r:
|
||||
selected.append(item)
|
||||
pool.pop(idx)
|
||||
break
|
||||
return selected
|
||||
|
||||
|
||||
init_prompt()
|
||||
@@ -21,8 +21,6 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObserva
|
||||
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
||||
from src.chat.heart_flow.observation.actions_observation import ActionObservation
|
||||
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
|
||||
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
|
||||
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
from src.chat.focus_chat.info_processors.base_processor import BaseProcessor
|
||||
from src.chat.focus_chat.info_processors.self_processor import SelfProcessor
|
||||
@@ -125,9 +123,6 @@ class HeartFChatting:
|
||||
self.processors: List[BaseProcessor] = []
|
||||
self._register_default_processors()
|
||||
|
||||
self.expressor = DefaultExpressor(chat_stream=self.chat_stream)
|
||||
self.replyer = DefaultReplyer(chat_stream=self.chat_stream)
|
||||
|
||||
self.action_manager = ActionManager()
|
||||
self.action_planner = PlannerFactory.create_planner(
|
||||
log_prefix=self.log_prefix, action_manager=self.action_manager
|
||||
@@ -543,6 +538,7 @@ class HeartFChatting:
|
||||
|
||||
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
|
||||
try:
|
||||
loop_start_time = time.time()
|
||||
with Timer("观察", cycle_timers):
|
||||
# 执行所有观察器的观察
|
||||
for observation in self.observations:
|
||||
@@ -583,7 +579,7 @@ class HeartFChatting:
|
||||
}
|
||||
|
||||
with Timer("规划器", cycle_timers):
|
||||
plan_result = await self.action_planner.plan(all_plan_info, running_memorys)
|
||||
plan_result = await self.action_planner.plan(all_plan_info, running_memorys, loop_start_time)
|
||||
|
||||
loop_plan_info = {
|
||||
"action_result": plan_result.get("action_result", {}),
|
||||
@@ -607,7 +603,7 @@ class HeartFChatting:
|
||||
logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}'")
|
||||
|
||||
success, reply_text, command = await self._handle_action(
|
||||
action_type, reasoning, action_data, cycle_timers, thinking_id, self.observations
|
||||
action_type, reasoning, action_data, cycle_timers, thinking_id
|
||||
)
|
||||
|
||||
loop_action_info = {
|
||||
@@ -646,7 +642,6 @@ class HeartFChatting:
|
||||
action_data: dict,
|
||||
cycle_timers: dict,
|
||||
thinking_id: str,
|
||||
observations: List[Observation],
|
||||
) -> tuple[bool, str, str]:
|
||||
"""
|
||||
处理规划动作,使用动作工厂创建相应的动作处理器
|
||||
@@ -670,9 +665,6 @@ class HeartFChatting:
|
||||
reasoning=reasoning,
|
||||
cycle_timers=cycle_timers,
|
||||
thinking_id=thinking_id,
|
||||
observations=observations,
|
||||
expressor=self.expressor,
|
||||
replyer=self.replyer,
|
||||
chat_stream=self.chat_stream,
|
||||
log_prefix=self.log_prefix,
|
||||
shutting_down=self._shutting_down,
|
||||
|
||||
@@ -15,7 +15,7 @@ install(extra_lines=3)
|
||||
logger = get_logger("sender")
|
||||
|
||||
|
||||
async def send_message(message: MessageSending) -> str:
|
||||
async def send_message(message: MessageSending) -> bool:
|
||||
"""合并后的消息发送函数,包含WS发送和日志记录"""
|
||||
message_preview = truncate_message(message.processed_plain_text, max_length=40)
|
||||
|
||||
@@ -23,7 +23,7 @@ async def send_message(message: MessageSending) -> str:
|
||||
# 直接调用API发送消息
|
||||
await get_global_api().send_message(message)
|
||||
logger.info(f"已将消息 '{message_preview}' 发往平台'{message.message_info.platform}'")
|
||||
return message.processed_plain_text
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"发送消息 '{message_preview}' 发往平台'{message.message_info.platform}' 失败: {str(e)}")
|
||||
@@ -73,17 +73,15 @@ class HeartFCSender:
|
||||
thinking_message = self.thinking_messages.get(chat_id, {}).get(message_id)
|
||||
return thinking_message.thinking_start_time if thinking_message else None
|
||||
|
||||
async def send_message(self, message: MessageSending, has_thinking=False, typing=False, set_reply=False):
|
||||
async def send_message(self, message: MessageSending, typing=False, set_reply=False, storage_message=True):
|
||||
"""
|
||||
处理、发送并存储一条消息。
|
||||
|
||||
参数:
|
||||
message: MessageSending 对象,待发送的消息。
|
||||
has_thinking: 是否管理思考状态,表情包无思考状态(如需调用 register_thinking/complete_thinking)。
|
||||
typing: 是否模拟打字等待(根据 has_thinking 控制等待时长)。
|
||||
typing: 是否模拟打字等待。
|
||||
|
||||
用法:
|
||||
- has_thinking=True 时,自动处理思考消息的时间和清理。
|
||||
- typing=True 时,发送前会有打字等待。
|
||||
"""
|
||||
if not message.chat_stream:
|
||||
@@ -98,40 +96,29 @@ class HeartFCSender:
|
||||
|
||||
try:
|
||||
if set_reply:
|
||||
_ = message.update_thinking_time()
|
||||
message.build_reply()
|
||||
logger.debug(f"[{chat_id}] 选择回复引用消息: {message.processed_plain_text[:20]}...")
|
||||
|
||||
# --- 条件应用 set_reply 逻辑 ---
|
||||
if (
|
||||
message.is_head
|
||||
and not message.is_private_message()
|
||||
and message.reply.processed_plain_text != "[System Trigger Context]"
|
||||
):
|
||||
# message.set_reply(message.reply)
|
||||
message.set_reply()
|
||||
logger.debug(f"[{chat_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}...")
|
||||
|
||||
# print(f"message.display_message: {message.display_message}")
|
||||
await message.process()
|
||||
# print(f"message.display_message: {message.display_message}")
|
||||
|
||||
if typing:
|
||||
if has_thinking:
|
||||
typing_time = calculate_typing_time(
|
||||
input_string=message.processed_plain_text,
|
||||
thinking_start_time=message.thinking_start_time,
|
||||
is_emoji=message.is_emoji,
|
||||
)
|
||||
await asyncio.sleep(typing_time)
|
||||
else:
|
||||
await asyncio.sleep(0.5)
|
||||
typing_time = calculate_typing_time(
|
||||
input_string=message.processed_plain_text,
|
||||
thinking_start_time=message.thinking_start_time,
|
||||
is_emoji=message.is_emoji,
|
||||
)
|
||||
await asyncio.sleep(typing_time)
|
||||
|
||||
|
||||
sent_msg = await send_message(message)
|
||||
await self.storage.store_message(message, message.chat_stream)
|
||||
if not sent_msg:
|
||||
return False
|
||||
|
||||
if sent_msg:
|
||||
return sent_msg
|
||||
else:
|
||||
return "发送失败"
|
||||
if storage_message:
|
||||
await self.storage.store_message(message, message.chat_stream)
|
||||
|
||||
return sent_msg
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{chat_id}] 处理或存储消息 {message_id} 时出错: {e}")
|
||||
|
||||
@@ -172,6 +172,7 @@ class HeartFCMessageReceiver:
|
||||
return
|
||||
|
||||
# 5. 消息存储
|
||||
print(f"message: {message.message_info.time}")
|
||||
await self.storage.store_message(message, chat)
|
||||
|
||||
# 6. 兴趣度计算与更新
|
||||
|
||||
@@ -11,7 +11,7 @@ from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from .base_processor import BaseProcessor
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from src.chat.focus_chat.info.expression_selection_info import ExpressionSelectionInfo
|
||||
from src.chat.focus_chat.expressors.exprssion_learner import get_expression_learner
|
||||
from src.chat.express.exprssion_learner import get_expression_learner
|
||||
from json_repair import repair_json
|
||||
import json
|
||||
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
from typing import Dict, List, Optional, Type, Any
|
||||
from src.plugin_system.base.base_action import BaseAction
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
|
||||
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.common.logger import get_logger
|
||||
|
||||
# 不再需要导入动作类,因为已经在main.py中导入
|
||||
# import src.chat.actions.default_actions # noqa
|
||||
from src.plugin_system.core.component_registry import component_registry
|
||||
from src.plugin_system.base.component_types import ComponentType
|
||||
|
||||
logger = get_logger("action_manager")
|
||||
|
||||
@@ -15,87 +11,11 @@ logger = get_logger("action_manager")
|
||||
ActionInfo = Dict[str, Any]
|
||||
|
||||
|
||||
class PluginActionWrapper(BaseAction):
|
||||
"""
|
||||
新插件系统Action组件的兼容性包装器
|
||||
|
||||
将新插件系统的Action组件包装为旧系统兼容的BaseAction接口
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, plugin_action, action_name: str, action_data: dict, reasoning: str, cycle_timers: dict, thinking_id: str
|
||||
):
|
||||
"""初始化包装器"""
|
||||
# 调用旧系统BaseAction初始化,只传递它能接受的参数
|
||||
super().__init__(
|
||||
action_data=action_data, reasoning=reasoning, cycle_timers=cycle_timers, thinking_id=thinking_id
|
||||
)
|
||||
|
||||
# 存储插件Action实例(它已经包含了所有必要的服务对象)
|
||||
self.plugin_action = plugin_action
|
||||
self.action_name = action_name
|
||||
|
||||
# 从插件Action实例复制属性到包装器
|
||||
self._sync_attributes_from_plugin_action()
|
||||
|
||||
def _sync_attributes_from_plugin_action(self):
|
||||
"""从插件Action实例同步属性到包装器"""
|
||||
# 基本属性
|
||||
self.action_name = getattr(self.plugin_action, "action_name", self.action_name)
|
||||
|
||||
# 设置兼容的默认值
|
||||
self.action_description = f"插件Action: {self.action_name}"
|
||||
self.action_parameters = {}
|
||||
self.action_require = []
|
||||
|
||||
# 激活类型属性(从新插件系统转换)
|
||||
plugin_focus_type = getattr(self.plugin_action, "focus_activation_type", None)
|
||||
plugin_normal_type = getattr(self.plugin_action, "normal_activation_type", None)
|
||||
|
||||
if plugin_focus_type:
|
||||
self.focus_activation_type = (
|
||||
plugin_focus_type.value if hasattr(plugin_focus_type, "value") else str(plugin_focus_type)
|
||||
)
|
||||
if plugin_normal_type:
|
||||
self.normal_activation_type = (
|
||||
plugin_normal_type.value if hasattr(plugin_normal_type, "value") else str(plugin_normal_type)
|
||||
)
|
||||
|
||||
# 其他属性
|
||||
self.random_activation_probability = getattr(self.plugin_action, "random_activation_probability", 0.0)
|
||||
self.llm_judge_prompt = getattr(self.plugin_action, "llm_judge_prompt", "")
|
||||
self.activation_keywords = getattr(self.plugin_action, "activation_keywords", [])
|
||||
self.keyword_case_sensitive = getattr(self.plugin_action, "keyword_case_sensitive", False)
|
||||
|
||||
# 模式和并行设置
|
||||
plugin_mode = getattr(self.plugin_action, "mode_enable", None)
|
||||
if plugin_mode:
|
||||
self.mode_enable = plugin_mode.value if hasattr(plugin_mode, "value") else str(plugin_mode)
|
||||
|
||||
self.parallel_action = getattr(self.plugin_action, "parallel_action", True)
|
||||
self.enable_plugin = True
|
||||
|
||||
async def execute(self) -> tuple[bool, str]:
|
||||
"""实现抽象方法execute,委托给插件Action的execute方法"""
|
||||
try:
|
||||
# 调用插件Action的execute方法
|
||||
success, response = await self.plugin_action.execute()
|
||||
|
||||
logger.debug(f"插件Action {self.action_name} 执行{'成功' if success else '失败'}: {response}")
|
||||
return success, response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"插件Action {self.action_name} 执行异常: {e}")
|
||||
return False, f"插件Action执行失败: {str(e)}"
|
||||
|
||||
async def handle_action(self) -> tuple[bool, str]:
|
||||
"""兼容旧系统的动作处理接口,委托给execute方法"""
|
||||
return await self.execute()
|
||||
|
||||
|
||||
class ActionManager:
|
||||
"""
|
||||
动作管理器,用于管理各种类型的动作
|
||||
|
||||
现在统一使用新插件系统,简化了原有的新旧兼容逻辑。
|
||||
"""
|
||||
|
||||
# 类常量
|
||||
@@ -119,23 +39,20 @@ class ActionManager:
|
||||
# 初始化时将默认动作加载到使用中的动作
|
||||
self._using_actions = self._default_actions.copy()
|
||||
|
||||
# 添加系统核心动作
|
||||
# self._add_system_core_actions()
|
||||
|
||||
def _load_plugin_actions(self) -> None:
|
||||
"""
|
||||
加载所有插件目录中的动作
|
||||
加载所有插件系统中的动作
|
||||
"""
|
||||
try:
|
||||
# 从新插件系统获取Action组件
|
||||
self._load_plugin_system_actions()
|
||||
logger.debug("从新插件系统加载Action组件成功")
|
||||
logger.debug("从插件系统加载Action组件成功")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"加载插件动作失败: {e}")
|
||||
|
||||
def _load_plugin_system_actions(self) -> None:
|
||||
"""从新插件系统的component_registry加载Action组件"""
|
||||
"""从插件系统的component_registry加载Action组件"""
|
||||
try:
|
||||
from src.plugin_system.core.component_registry import component_registry
|
||||
from src.plugin_system.base.component_types import ComponentType
|
||||
@@ -148,7 +65,7 @@ class ActionManager:
|
||||
logger.debug(f"Action组件 {action_name} 已存在,跳过")
|
||||
continue
|
||||
|
||||
# 将新插件系统的ActionInfo转换为旧系统格式
|
||||
# 将插件系统的ActionInfo转换为ActionManager格式
|
||||
converted_action_info = {
|
||||
"description": action_info.description,
|
||||
"parameters": getattr(action_info, "action_parameters", {}),
|
||||
@@ -165,8 +82,7 @@ class ActionManager:
|
||||
# 模式和并行设置
|
||||
"mode_enable": action_info.mode_enable.value,
|
||||
"parallel_action": action_info.parallel_action,
|
||||
# 标记这是来自新插件系统的组件
|
||||
"_plugin_system_component": True,
|
||||
# 插件信息
|
||||
"_plugin_name": getattr(action_info, "plugin_name", ""),
|
||||
}
|
||||
|
||||
@@ -180,7 +96,7 @@ class ActionManager:
|
||||
f"从插件系统加载Action组件: {action_name} (插件: {getattr(action_info, 'plugin_name', 'unknown')})"
|
||||
)
|
||||
|
||||
logger.info(f"从新插件系统加载了 {len(action_components)} 个Action组件")
|
||||
logger.info(f"从插件系统加载了 {len(action_components)} 个Action组件")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"从插件系统加载Action组件失败: {e}")
|
||||
@@ -195,12 +111,9 @@ class ActionManager:
|
||||
reasoning: str,
|
||||
cycle_timers: dict,
|
||||
thinking_id: str,
|
||||
observations: List[Observation],
|
||||
chat_stream: ChatStream,
|
||||
log_prefix: str,
|
||||
shutting_down: bool = False,
|
||||
expressor: DefaultExpressor = None,
|
||||
replyer: DefaultReplyer = None,
|
||||
) -> Optional[BaseAction]:
|
||||
"""
|
||||
创建动作处理器实例
|
||||
@@ -211,9 +124,6 @@ class ActionManager:
|
||||
reasoning: 执行理由
|
||||
cycle_timers: 计时器字典
|
||||
thinking_id: 思考ID
|
||||
observations: 观察列表
|
||||
expressor: 表达器
|
||||
replyer: 回复器
|
||||
chat_stream: 聊天流
|
||||
log_prefix: 日志前缀
|
||||
shutting_down: 是否正在关闭
|
||||
@@ -221,122 +131,39 @@ class ActionManager:
|
||||
Returns:
|
||||
Optional[BaseAction]: 创建的动作处理器实例,如果动作名称未注册则返回None
|
||||
"""
|
||||
# 检查动作是否在当前使用的动作集中
|
||||
# if action_name not in self._using_actions:
|
||||
# logger.warning(f"当前不可用的动作类型: {action_name}")
|
||||
# return None
|
||||
|
||||
# 检查是否是新插件系统的Action组件
|
||||
action_info = self._registered_actions.get(action_name)
|
||||
if action_info and action_info.get("_plugin_system_component", False):
|
||||
return self._create_plugin_system_action(
|
||||
action_name,
|
||||
action_data,
|
||||
reasoning,
|
||||
cycle_timers,
|
||||
thinking_id,
|
||||
observations,
|
||||
chat_stream,
|
||||
log_prefix,
|
||||
shutting_down,
|
||||
expressor,
|
||||
replyer,
|
||||
)
|
||||
|
||||
# 旧系统的动作创建逻辑
|
||||
from src.plugin_system.core.component_registry import component_registry
|
||||
|
||||
action_registry = component_registry.get_action_registry()
|
||||
handler_class = action_registry.get(action_name)
|
||||
if not handler_class:
|
||||
logger.warning(f"未注册的动作类型: {action_name}")
|
||||
return None
|
||||
|
||||
try:
|
||||
# 创建动作实例
|
||||
instance = handler_class(
|
||||
action_data=action_data,
|
||||
reasoning=reasoning,
|
||||
cycle_timers=cycle_timers,
|
||||
thinking_id=thinking_id,
|
||||
observations=observations,
|
||||
expressor=expressor,
|
||||
replyer=replyer,
|
||||
chat_stream=chat_stream,
|
||||
log_prefix=log_prefix,
|
||||
shutting_down=shutting_down,
|
||||
)
|
||||
|
||||
return instance
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"创建动作处理器实例失败: {e}")
|
||||
return None
|
||||
|
||||
def _create_plugin_system_action(
|
||||
self,
|
||||
action_name: str,
|
||||
action_data: dict,
|
||||
reasoning: str,
|
||||
cycle_timers: dict,
|
||||
thinking_id: str,
|
||||
observations: List[Observation],
|
||||
chat_stream: ChatStream,
|
||||
log_prefix: str,
|
||||
shutting_down: bool = False,
|
||||
expressor: DefaultExpressor = None,
|
||||
replyer: DefaultReplyer = None,
|
||||
) -> Optional["PluginActionWrapper"]:
|
||||
"""
|
||||
创建新插件系统的Action组件实例,并包装为兼容旧系统的接口
|
||||
|
||||
Returns:
|
||||
Optional[PluginActionWrapper]: 包装后的Action实例
|
||||
"""
|
||||
try:
|
||||
from src.plugin_system.core.component_registry import component_registry
|
||||
|
||||
# 获取组件类
|
||||
component_class = component_registry.get_component_class(action_name)
|
||||
# 获取组件类 - 明确指定查询Action类型
|
||||
component_class = component_registry.get_component_class(action_name, ComponentType.ACTION)
|
||||
if not component_class:
|
||||
logger.error(f"未找到插件Action组件类: {action_name}")
|
||||
logger.warning(f"{log_prefix} 未找到Action组件: {action_name}")
|
||||
return None
|
||||
|
||||
# 获取组件信息
|
||||
component_info = component_registry.get_component_info(action_name, ComponentType.ACTION)
|
||||
if not component_info:
|
||||
logger.warning(f"{log_prefix} 未找到Action组件信息: {action_name}")
|
||||
return None
|
||||
|
||||
# 获取插件配置
|
||||
component_info = component_registry.get_component_info(action_name)
|
||||
plugin_config = None
|
||||
if component_info and component_info.plugin_name:
|
||||
plugin_config = component_registry.get_plugin_config(component_info.plugin_name)
|
||||
plugin_config = component_registry.get_plugin_config(component_info.plugin_name)
|
||||
|
||||
# 创建插件Action实例
|
||||
plugin_action_instance = component_class(
|
||||
# 创建动作实例
|
||||
instance = component_class(
|
||||
action_data=action_data,
|
||||
reasoning=reasoning,
|
||||
cycle_timers=cycle_timers,
|
||||
thinking_id=thinking_id,
|
||||
chat_stream=chat_stream,
|
||||
expressor=expressor,
|
||||
replyer=replyer,
|
||||
observations=observations,
|
||||
log_prefix=log_prefix,
|
||||
shutting_down=shutting_down,
|
||||
plugin_config=plugin_config,
|
||||
)
|
||||
|
||||
# 创建兼容性包装器
|
||||
wrapper = PluginActionWrapper(
|
||||
plugin_action=plugin_action_instance,
|
||||
action_name=action_name,
|
||||
action_data=action_data,
|
||||
reasoning=reasoning,
|
||||
cycle_timers=cycle_timers,
|
||||
thinking_id=thinking_id,
|
||||
)
|
||||
|
||||
logger.debug(f"创建插件Action实例成功: {action_name}")
|
||||
return wrapper
|
||||
logger.debug(f"创建Action实例成功: {action_name}")
|
||||
return instance
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"创建插件Action实例失败 {action_name}: {e}")
|
||||
logger.error(f"创建Action实例失败 {action_name}: {e}")
|
||||
import traceback
|
||||
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -366,19 +193,13 @@ class ActionManager:
|
||||
"""
|
||||
filtered_actions = {}
|
||||
|
||||
# print(self._using_actions)
|
||||
|
||||
for action_name, action_info in self._using_actions.items():
|
||||
# print(f"action_info: {action_info}")
|
||||
# print(f"action_name: {action_name}")
|
||||
action_mode = action_info.get("mode_enable", "all")
|
||||
|
||||
# 检查动作是否在当前模式下启用
|
||||
if action_mode == "all" or action_mode == mode:
|
||||
filtered_actions[action_name] = action_info
|
||||
logger.debug(f"动作 {action_name} 在模式 {mode} 下可用 (mode_enable: {action_mode})")
|
||||
# else:
|
||||
# logger.debug(f"动作 {action_name} 在模式 {mode} 下不可用 (mode_enable: {action_mode})")
|
||||
|
||||
logger.debug(f"模式 {mode} 下可用动作: {list(filtered_actions.keys())}")
|
||||
return filtered_actions
|
||||
@@ -474,20 +295,6 @@ class ActionManager:
|
||||
def restore_default_actions(self) -> None:
|
||||
"""恢复默认动作集到使用集"""
|
||||
self._using_actions = self._default_actions.copy()
|
||||
# 添加系统核心动作(即使enable_plugin为False的系统动作)
|
||||
# self._add_system_core_actions()
|
||||
|
||||
# def _add_system_core_actions(self) -> None:
|
||||
# """
|
||||
# 添加系统核心动作到使用集
|
||||
# 系统核心动作是那些enable_plugin为False但是系统必需的动作
|
||||
# """
|
||||
# system_core_actions = ["exit_focus_chat"] # 可以根据需要扩展
|
||||
|
||||
# for action_name in system_core_actions:
|
||||
# if action_name in self._registered_actions and action_name not in self._using_actions:
|
||||
# self._using_actions[action_name] = self._registered_actions[action_name]
|
||||
# logger.debug(f"添加系统核心动作到使用集: {action_name}")
|
||||
|
||||
def add_system_action_if_needed(self, action_name: str) -> bool:
|
||||
"""
|
||||
@@ -517,5 +324,4 @@ class ActionManager:
|
||||
"""
|
||||
from src.plugin_system.core.component_registry import component_registry
|
||||
|
||||
action_registry = component_registry.get_action_registry()
|
||||
return action_registry.get(action_name)
|
||||
return component_registry.get_component_class(action_name)
|
||||
|
||||
@@ -12,14 +12,14 @@ class BasePlanner(ABC):
|
||||
self.action_manager = action_manager
|
||||
|
||||
@abstractmethod
|
||||
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]], loop_start_time: float) -> Dict[str, Any]:
|
||||
"""
|
||||
规划下一步行动
|
||||
|
||||
Args:
|
||||
all_plan_info: 所有计划信息
|
||||
running_memorys: 回忆信息
|
||||
|
||||
loop_start_time: 循环开始时间
|
||||
Returns:
|
||||
Dict[str, Any]: 规划结果
|
||||
"""
|
||||
|
||||
@@ -242,6 +242,8 @@ class ActionModifier:
|
||||
|
||||
for action_name, action_info in actions_with_info.items():
|
||||
activation_type = action_info.get("focus_activation_type", "always")
|
||||
|
||||
print(f"action_name: {action_name}, activation_type: {activation_type}")
|
||||
|
||||
# 现在统一是字符串格式的激活类型值
|
||||
if activation_type == "always":
|
||||
|
||||
@@ -32,11 +32,7 @@ def init_prompt():
|
||||
{self_info_block}
|
||||
请记住你的性格,身份和特点。
|
||||
|
||||
{extra_info_block}
|
||||
{memory_str}
|
||||
|
||||
{time_block}
|
||||
|
||||
你是群内的一员,你现在正在参与群内的闲聊,以下是群内的聊天内容:
|
||||
|
||||
{chat_content_block}
|
||||
@@ -86,13 +82,14 @@ class ActionPlanner(BasePlanner):
|
||||
request_type="focus.planner", # 用于动作规划
|
||||
)
|
||||
|
||||
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]], loop_start_time: float) -> Dict[str, Any]:
|
||||
"""
|
||||
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
|
||||
|
||||
参数:
|
||||
all_plan_info: 所有计划信息
|
||||
running_memorys: 回忆信息
|
||||
loop_start_time: 循环开始时间
|
||||
"""
|
||||
|
||||
action = "no_reply" # 默认动作
|
||||
@@ -246,6 +243,8 @@ class ActionPlanner(BasePlanner):
|
||||
if selected_expressions:
|
||||
action_data["selected_expressions"] = selected_expressions
|
||||
logger.debug(f"{self.log_prefix} 传递{len(selected_expressions)}个选中的表达方式到action_data")
|
||||
|
||||
action_data["loop_start_time"] = loop_start_time
|
||||
|
||||
# 对于reply动作不需要额外处理,因为相关字段已经在上面的循环中添加到action_data
|
||||
|
||||
@@ -326,7 +325,7 @@ class ActionPlanner(BasePlanner):
|
||||
|
||||
chat_content_block = ""
|
||||
if observed_messages_str:
|
||||
chat_content_block = f"聊天记录:\n{observed_messages_str}"
|
||||
chat_content_block = f"\n{observed_messages_str}"
|
||||
else:
|
||||
chat_content_block = "你还未开始聊天"
|
||||
|
||||
@@ -387,7 +386,7 @@ class ActionPlanner(BasePlanner):
|
||||
prompt = planner_prompt_template.format(
|
||||
relation_info_block=relation_info_block,
|
||||
self_info_block=self_info_block,
|
||||
memory_str=memory_str,
|
||||
# memory_str=memory_str,
|
||||
time_block=time_block,
|
||||
# bot_name=global_config.bot.nickname,
|
||||
prompt_personality=personality_block,
|
||||
@@ -397,7 +396,7 @@ class ActionPlanner(BasePlanner):
|
||||
cycle_info_block=cycle_info,
|
||||
action_options_text=action_options_block,
|
||||
# action_available_block=action_available_block,
|
||||
extra_info_block=extra_info_block,
|
||||
# extra_info_block=extra_info_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
)
|
||||
return prompt
|
||||
|
||||
@@ -8,9 +8,7 @@ from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.common.logger import get_logger
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
|
||||
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
from src.chat.focus_chat.heartFC_sender import HeartFCSender
|
||||
from src.chat.utils.utils import process_llm_response
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
@@ -18,6 +16,7 @@ from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
from src.chat.express.exprssion_learner import get_expression_learner
|
||||
import time
|
||||
import random
|
||||
from datetime import datetime
|
||||
@@ -50,7 +49,7 @@ def init_prompt():
|
||||
不要浮夸,不要夸张修辞,只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_replyer_prompt",
|
||||
"default_generator_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
@@ -70,7 +69,51 @@ def init_prompt():
|
||||
不要浮夸,不要夸张修辞,只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_replyer_private_prompt",
|
||||
"default_generator_private_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你可以参考你的以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
|
||||
{style_habbits}
|
||||
|
||||
你现在正在群里聊天,以下是群里正在进行的聊天内容:
|
||||
{chat_info}
|
||||
|
||||
以上是聊天内容,你需要了解聊天记录中的内容
|
||||
|
||||
{chat_target}
|
||||
你的名字是{bot_name},{prompt_personality},在这聊天中,"{sender_name}"说的"{target_message}"引起了你的注意,对这句话,你想表达:{raw_reply},原因是:{reason}。你现在要思考怎么回复
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。请你修改你想表达的原句,符合你的表达风格和语言习惯
|
||||
请你根据情景使用以下句法:
|
||||
{grammar_habbits}
|
||||
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_expressor_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
|
||||
{style_habbits}
|
||||
|
||||
你现在正在群里聊天,以下是群里正在进行的聊天内容:
|
||||
{chat_info}
|
||||
|
||||
以上是聊天内容,你需要了解聊天记录中的内容
|
||||
|
||||
{chat_target}
|
||||
你的名字是{bot_name},{prompt_personality},在这聊天中,"{sender_name}"说的"{target_message}"引起了你的注意,对这句话,你想表达:{raw_reply},原因是:{reason}。你现在要思考怎么回复
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。
|
||||
请你根据情景使用以下句法:
|
||||
{grammar_habbits}
|
||||
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_expressor_private_prompt", # New template for private FOCUSED chat
|
||||
)
|
||||
|
||||
|
||||
@@ -84,9 +127,8 @@ class DefaultReplyer:
|
||||
)
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
|
||||
self.chat_id = chat_stream.stream_id
|
||||
self.chat_stream = chat_stream
|
||||
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_id)
|
||||
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id)
|
||||
|
||||
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv], thinking_id: str):
|
||||
"""创建思考消息 (尝试锚定到 anchor_message)"""
|
||||
@@ -114,213 +156,152 @@ class DefaultReplyer:
|
||||
|
||||
await self.heart_fc_sender.register_thinking(thinking_message)
|
||||
return None
|
||||
|
||||
async def deal_reply(
|
||||
|
||||
async def generate_reply_with_context(
|
||||
self,
|
||||
cycle_timers: dict,
|
||||
action_data: Dict[str, Any],
|
||||
reasoning: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
) -> tuple[bool, Optional[List[Tuple[str, str]]]]:
|
||||
# 创建思考消息
|
||||
await self._create_thinking_message(anchor_message, thinking_id)
|
||||
|
||||
reply = [] # 初始化 reply,防止未定义
|
||||
try:
|
||||
has_sent_something = False
|
||||
|
||||
# 处理文本部分
|
||||
# text_part = action_data.get("text", [])
|
||||
# if text_part:
|
||||
sent_msg_list = []
|
||||
|
||||
with Timer("生成回复", cycle_timers):
|
||||
# 可以保留原有的文本处理逻辑或进行适当调整
|
||||
reply = await self.reply(
|
||||
# in_mind_reply=text_part,
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
reason=reasoning,
|
||||
action_data=action_data,
|
||||
)
|
||||
|
||||
if reply:
|
||||
with Timer("发送消息", cycle_timers):
|
||||
sent_msg_list = await self.send_response_messages(
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
response_set=reply,
|
||||
)
|
||||
has_sent_something = True
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 文本回复生成失败")
|
||||
|
||||
if not has_sent_something:
|
||||
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
|
||||
|
||||
return has_sent_something, sent_msg_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"回复失败: {e}")
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
|
||||
# --- 回复器 (Replier) 的定义 --- #
|
||||
|
||||
async def deal_emoji(
|
||||
self,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
action_data: Dict[str, Any],
|
||||
cycle_timers: dict,
|
||||
) -> Optional[List[str]]:
|
||||
"""
|
||||
表情动作处理类
|
||||
"""
|
||||
|
||||
await self._create_thinking_message(anchor_message, thinking_id)
|
||||
|
||||
try:
|
||||
has_sent_something = False
|
||||
sent_msg_list = []
|
||||
reply = []
|
||||
with Timer("选择表情", cycle_timers):
|
||||
emoji_keyword = action_data.get("description", [])
|
||||
emoji_base64, _description, emotion = await self._choose_emoji(emoji_keyword)
|
||||
if emoji_base64:
|
||||
# logger.info(f"选择表情: {_description}")
|
||||
reply.append(("emoji", emoji_base64))
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 没有找到合适表情")
|
||||
|
||||
if reply:
|
||||
with Timer("发送表情", cycle_timers):
|
||||
sent_msg_list = await self.send_response_messages(
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
response_set=reply,
|
||||
)
|
||||
has_sent_something = True
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 表情发送失败")
|
||||
|
||||
if not has_sent_something:
|
||||
logger.warning(f"{self.log_prefix} 表情发送失败")
|
||||
|
||||
return has_sent_something, sent_msg_list
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"回复失败: {e}")
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
|
||||
async def reply(
|
||||
self,
|
||||
# in_mind_reply: str,
|
||||
reason: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
action_data: Dict[str, Any],
|
||||
) -> Optional[List[str]]:
|
||||
reply_data: Dict[str, Any],
|
||||
) -> Tuple[bool, Optional[List[str]]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
(已整合原 HeartFCGenerator 的功能)
|
||||
"""
|
||||
try:
|
||||
# 1. 获取情绪影响因子并调整模型温度
|
||||
# arousal_multiplier = mood_manager.get_arousal_multiplier()
|
||||
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
|
||||
# self.express_model.params["temperature"] = current_temp # 动态调整温度
|
||||
|
||||
reply_to = action_data.get("reply_to", "none")
|
||||
|
||||
sender = ""
|
||||
targer = ""
|
||||
if ":" in reply_to or ":" in reply_to:
|
||||
# 使用正则表达式匹配中文或英文冒号
|
||||
parts = re.split(pattern=r"[::]", string=reply_to, maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
sender = parts[0].strip()
|
||||
targer = parts[1].strip()
|
||||
|
||||
identity = action_data.get("identity", "")
|
||||
extra_info_block = action_data.get("extra_info_block", "")
|
||||
relation_info_block = action_data.get("relation_info_block", "")
|
||||
|
||||
# 3. 构建 Prompt
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await self.build_prompt_focus(
|
||||
chat_stream=self.chat_stream, # Pass the stream object
|
||||
# in_mind_reply=in_mind_reply,
|
||||
identity=identity,
|
||||
extra_info_block=extra_info_block,
|
||||
relation_info_block=relation_info_block,
|
||||
reason=reason,
|
||||
sender_name=sender, # Pass determined name
|
||||
target_message=targer,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
action_data=action_data, # 传递action_data
|
||||
prompt = await self.build_prompt_reply_context(
|
||||
reply_data=reply_data, # 传递action_data
|
||||
)
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
content = None
|
||||
reasoning_content = None
|
||||
model_name = "unknown_model"
|
||||
if not prompt:
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。")
|
||||
return None
|
||||
|
||||
try:
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n")
|
||||
content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt)
|
||||
|
||||
# logger.info(f"prompt: {prompt}")
|
||||
logger.info(f"最终回复: {content}")
|
||||
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}")
|
||||
return None # LLM 调用失败则无法生成回复
|
||||
return False, None # LLM 调用失败则无法生成回复
|
||||
|
||||
processed_response = process_llm_response(content)
|
||||
|
||||
# 5. 处理 LLM 响应
|
||||
if not content:
|
||||
logger.warning(f"{self.log_prefix}LLM 生成了空内容。")
|
||||
return None
|
||||
return False, None
|
||||
if not processed_response:
|
||||
logger.warning(f"{self.log_prefix}处理后的回复为空。")
|
||||
return None
|
||||
return False, None
|
||||
|
||||
reply_set = []
|
||||
for str in processed_response:
|
||||
reply_seg = ("text", str)
|
||||
reply_set.append(reply_seg)
|
||||
|
||||
return reply_set
|
||||
return True , reply_set
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}回复生成意外失败: {e}")
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
async def build_prompt_focus(
|
||||
return False, None
|
||||
|
||||
async def rewrite_reply_with_context(
|
||||
self,
|
||||
reason,
|
||||
chat_stream,
|
||||
sender_name,
|
||||
# in_mind_reply,
|
||||
extra_info_block,
|
||||
relation_info_block,
|
||||
identity,
|
||||
target_message,
|
||||
config_expression_style,
|
||||
action_data=None,
|
||||
# stuation,
|
||||
reply_data: Dict[str, Any],
|
||||
) -> Tuple[bool, Optional[List[str]]]:
|
||||
"""
|
||||
表达器 (Expressor): 核心逻辑,负责生成回复文本。
|
||||
"""
|
||||
try:
|
||||
|
||||
|
||||
reply_to = reply_data.get("reply_to", "")
|
||||
raw_reply = reply_data.get("raw_reply", "")
|
||||
reason = reply_data.get("reason", "")
|
||||
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await self.build_prompt_rewrite_context(
|
||||
raw_reply=raw_reply,
|
||||
reason=reason,
|
||||
reply_to=reply_to,
|
||||
)
|
||||
|
||||
content = None
|
||||
reasoning_content = None
|
||||
model_name = "unknown_model"
|
||||
if not prompt:
|
||||
logger.error(f"{self.log_prefix}Prompt 构建失败,无法生成回复。")
|
||||
return False, None
|
||||
|
||||
try:
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
# TODO: API-Adapter修改标记
|
||||
content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt)
|
||||
|
||||
logger.info(f"想要表达:{raw_reply}||理由:{reason}")
|
||||
logger.info(f"最终回复: {content}\n")
|
||||
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}")
|
||||
return False, None # LLM 调用失败则无法生成回复
|
||||
|
||||
processed_response = process_llm_response(content)
|
||||
|
||||
# 5. 处理 LLM 响应
|
||||
if not content:
|
||||
logger.warning(f"{self.log_prefix}LLM 生成了空内容。")
|
||||
return False, None
|
||||
if not processed_response:
|
||||
logger.warning(f"{self.log_prefix}处理后的回复为空。")
|
||||
return False, None
|
||||
|
||||
reply_set = []
|
||||
for str in processed_response:
|
||||
reply_seg = ("text", str)
|
||||
reply_set.append(reply_seg)
|
||||
|
||||
return True, reply_set
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}回复生成意外失败: {e}")
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
async def build_prompt_reply_context(
|
||||
self,
|
||||
reply_data=None,
|
||||
) -> str:
|
||||
chat_stream = self.chat_stream
|
||||
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
identity = reply_data.get("identity", "")
|
||||
extra_info_block = reply_data.get("extra_info_block", "")
|
||||
relation_info_block = reply_data.get("relation_info_block", "")
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
|
||||
sender = ""
|
||||
target = ""
|
||||
if ":" in reply_to or ":" in reply_to:
|
||||
# 使用正则表达式匹配中文或英文冒号
|
||||
parts = re.split(pattern=r"[::]", string=reply_to, maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
sender = parts[0].strip()
|
||||
target = parts[1].strip()
|
||||
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
@@ -341,7 +322,7 @@ class DefaultReplyer:
|
||||
grammar_habbits = []
|
||||
|
||||
# 使用从处理器传来的选中表达方式
|
||||
selected_expressions = action_data.get("selected_expressions", []) if action_data else []
|
||||
selected_expressions = reply_data.get("selected_expressions", []) if reply_data else []
|
||||
|
||||
if selected_expressions:
|
||||
logger.info(f"{self.log_prefix} 使用处理器选中的{len(selected_expressions)}个表达方式")
|
||||
@@ -371,7 +352,7 @@ class DefaultReplyer:
|
||||
try:
|
||||
# 处理关键词规则
|
||||
for rule in global_config.keyword_reaction.keyword_rules:
|
||||
if any(keyword in target_message for keyword in rule.keywords):
|
||||
if any(keyword in target for keyword in rule.keywords):
|
||||
logger.info(f"检测到关键词规则:{rule.keywords},触发反应:{rule.reaction}")
|
||||
keywords_reaction_prompt += f"{rule.reaction},"
|
||||
|
||||
@@ -380,7 +361,7 @@ class DefaultReplyer:
|
||||
for pattern_str in rule.regex:
|
||||
try:
|
||||
pattern = re.compile(pattern_str)
|
||||
if result := pattern.search(target_message):
|
||||
if result := pattern.search(target):
|
||||
reaction = rule.reaction
|
||||
for name, content in result.groupdict().items():
|
||||
reaction = reaction.replace(f"[{name}]", content)
|
||||
@@ -397,18 +378,18 @@ class DefaultReplyer:
|
||||
|
||||
# logger.debug("开始构建 focus prompt")
|
||||
|
||||
if sender_name:
|
||||
if sender:
|
||||
reply_target_block = (
|
||||
f"现在{sender_name}说的:{target_message}。引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
)
|
||||
elif target_message:
|
||||
reply_target_block = f"现在{target_message}引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要在群里发言或者回复消息。"
|
||||
|
||||
# --- Choose template based on chat type ---
|
||||
if is_group_chat:
|
||||
template_name = "default_replyer_prompt"
|
||||
template_name = "default_generator_prompt"
|
||||
# Group specific formatting variables (already fetched or default)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
|
||||
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
@@ -422,18 +403,14 @@ class DefaultReplyer:
|
||||
relation_info_block=relation_info_block,
|
||||
time_block=time_block,
|
||||
reply_target_block=reply_target_block,
|
||||
# bot_name=global_config.bot.nickname,
|
||||
# prompt_personality="",
|
||||
# reason=reason,
|
||||
# in_mind_reply=in_mind_reply,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
identity=identity,
|
||||
target_message=target_message,
|
||||
sender_name=sender_name,
|
||||
config_expression_style=config_expression_style,
|
||||
target_message=target,
|
||||
sender_name=sender,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
)
|
||||
else: # Private chat
|
||||
template_name = "default_replyer_private_prompt"
|
||||
template_name = "default_generator_private_prompt"
|
||||
chat_target_1 = "你正在和人私聊"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
@@ -444,20 +421,125 @@ class DefaultReplyer:
|
||||
relation_info_block=relation_info_block,
|
||||
time_block=time_block,
|
||||
reply_target_block=reply_target_block,
|
||||
# bot_name=global_config.bot.nickname,
|
||||
# prompt_personality="",
|
||||
# reason=reason,
|
||||
# in_mind_reply=in_mind_reply,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
identity=identity,
|
||||
target_message=target_message,
|
||||
sender_name=sender_name,
|
||||
config_expression_style=config_expression_style,
|
||||
target_message=target,
|
||||
sender_name=sender,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
# --- 发送器 (Sender) --- #
|
||||
async def build_prompt_rewrite_context(
|
||||
self,
|
||||
reason,
|
||||
raw_reply,
|
||||
reply_to,
|
||||
) -> str:
|
||||
|
||||
|
||||
|
||||
sender = ""
|
||||
target = ""
|
||||
if ":" in reply_to or ":" in reply_to:
|
||||
# 使用正则表达式匹配中文或英文冒号
|
||||
parts = re.split(pattern=r"[::]", string=reply_to, maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
sender = parts[0].strip()
|
||||
target = parts[1].strip()
|
||||
|
||||
chat_stream = self.chat_stream
|
||||
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.focus_chat.observation_context_size,
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=True,
|
||||
timestamp_mode="relative",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
)
|
||||
|
||||
expression_learner = get_expression_learner()
|
||||
(
|
||||
learnt_style_expressions,
|
||||
learnt_grammar_expressions,
|
||||
personality_expressions,
|
||||
) = await expression_learner.get_expression_by_chat_id(chat_stream.stream_id)
|
||||
|
||||
style_habbits = []
|
||||
grammar_habbits = []
|
||||
# 1. learnt_expressions加权随机选3条
|
||||
if learnt_style_expressions:
|
||||
weights = [expr["count"] for expr in learnt_style_expressions]
|
||||
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
|
||||
for expr in selected_learnt:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
# 2. learnt_grammar_expressions加权随机选3条
|
||||
if learnt_grammar_expressions:
|
||||
weights = [expr["count"] for expr in learnt_grammar_expressions]
|
||||
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
|
||||
for expr in selected_learnt:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
grammar_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
# 3. personality_expressions随机选1条
|
||||
if personality_expressions:
|
||||
expr = random.choice(personality_expressions)
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
|
||||
style_habbits_str = "\n".join(style_habbits)
|
||||
grammar_habbits_str = "\n".join(grammar_habbits)
|
||||
|
||||
logger.debug("开始构建 focus prompt")
|
||||
|
||||
# --- Choose template based on chat type ---
|
||||
if is_group_chat:
|
||||
template_name = "default_expressor_prompt"
|
||||
# Group specific formatting variables (already fetched or default)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
|
||||
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
style_habbits=style_habbits_str,
|
||||
grammar_habbits=grammar_habbits_str,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
raw_reply=raw_reply,
|
||||
sender_name=sender,
|
||||
target_message=target,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
)
|
||||
else: # Private chat
|
||||
template_name = "default_expressor_private_prompt"
|
||||
chat_target_1 = "你正在和人私聊"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
style_habbits=style_habbits_str,
|
||||
grammar_habbits=grammar_habbits_str,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
raw_reply=raw_reply,
|
||||
sender_name=sender,
|
||||
target_message=target,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
async def send_response_messages(
|
||||
self,
|
||||
@@ -468,7 +550,7 @@ class DefaultReplyer:
|
||||
) -> Optional[MessageSending]:
|
||||
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
|
||||
chat = self.chat_stream
|
||||
chat_id = self.chat_id
|
||||
chat_id = self.chat_stream.stream_id
|
||||
if chat is None:
|
||||
logger.error(f"{self.log_prefix} 无法发送回复,chat_stream 为空。")
|
||||
return None
|
||||
@@ -514,7 +596,7 @@ class DefaultReplyer:
|
||||
is_emoji = False
|
||||
reply_to = not mark_head
|
||||
|
||||
bot_message = await self._build_single_sending_message(
|
||||
bot_message: MessageSending = await self._build_single_sending_message(
|
||||
anchor_message=anchor_message,
|
||||
message_id=part_message_id,
|
||||
message_segment=message_segment,
|
||||
@@ -526,22 +608,22 @@ class DefaultReplyer:
|
||||
)
|
||||
|
||||
try:
|
||||
if (bot_message.is_private_message() or
|
||||
bot_message.reply.processed_plain_text != "[System Trigger Context]" or
|
||||
mark_head):
|
||||
set_reply = False
|
||||
else:
|
||||
set_reply = True
|
||||
|
||||
if not mark_head:
|
||||
mark_head = True
|
||||
# first_bot_msg = bot_message # 保存第一个成功发送的消息对象
|
||||
typing = False
|
||||
else:
|
||||
typing = True
|
||||
|
||||
if type == "emoji":
|
||||
typing = False
|
||||
|
||||
if anchor_message.raw_message:
|
||||
set_reply = True
|
||||
else:
|
||||
set_reply = False
|
||||
|
||||
|
||||
sent_msg = await self.heart_fc_sender.send_message(
|
||||
bot_message, has_thinking=True, typing=typing, set_reply=set_reply
|
||||
bot_message, typing=typing, set_reply=set_reply
|
||||
)
|
||||
|
||||
reply_message_ids.append(part_message_id) # 记录我们生成的ID
|
||||
@@ -562,30 +644,15 @@ class DefaultReplyer:
|
||||
|
||||
return sent_msg_list
|
||||
|
||||
async def _choose_emoji(self, send_emoji: str):
|
||||
"""
|
||||
选择表情,根据send_emoji文本选择表情,返回表情base64
|
||||
"""
|
||||
emoji_base64 = ""
|
||||
description = ""
|
||||
emoji_raw = await get_emoji_manager().get_emoji_for_text(send_emoji)
|
||||
if emoji_raw:
|
||||
emoji_path, description, _emotion = emoji_raw
|
||||
emoji_base64 = image_path_to_base64(emoji_path)
|
||||
return emoji_base64, description, _emotion
|
||||
else:
|
||||
return None, None, None
|
||||
|
||||
async def _build_single_sending_message(
|
||||
self,
|
||||
anchor_message: MessageRecv,
|
||||
message_id: str,
|
||||
message_segment: Seg,
|
||||
reply_to: bool,
|
||||
is_emoji: bool,
|
||||
thinking_id: str,
|
||||
thinking_start_time: float,
|
||||
display_message: str,
|
||||
anchor_message: MessageRecv = None
|
||||
) -> MessageSending:
|
||||
"""构建单个发送消息"""
|
||||
|
||||
@@ -596,12 +663,16 @@ class DefaultReplyer:
|
||||
)
|
||||
|
||||
# await anchor_message.process()
|
||||
if anchor_message:
|
||||
sender_info = anchor_message.message_info.user_info
|
||||
else:
|
||||
sender_info = None
|
||||
|
||||
bot_message = MessageSending(
|
||||
message_id=message_id, # 使用片段的唯一ID
|
||||
chat_stream=self.chat_stream,
|
||||
bot_user_info=bot_user_info,
|
||||
sender_info=anchor_message.message_info.user_info,
|
||||
sender_info=sender_info,
|
||||
message_segment=message_segment,
|
||||
reply=anchor_message, # 回复原始锚点
|
||||
is_head=reply_to,
|
||||
@@ -14,7 +14,8 @@ from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
logger = get_logger("observation")
|
||||
|
||||
# 定义提示模板
|
||||
@@ -70,6 +71,8 @@ class ChattingObservation(Observation):
|
||||
self.oldest_messages = []
|
||||
self.oldest_messages_str = ""
|
||||
|
||||
self.last_observe_time = datetime.now().timestamp() -1
|
||||
print(f"last_observe_time: {self.last_observe_time}")
|
||||
initial_messages = get_raw_msg_before_timestamp_with_chat(self.chat_id, self.last_observe_time, 10)
|
||||
self.last_observe_time = initial_messages[-1]["time"] if initial_messages else self.last_observe_time
|
||||
self.talking_message = initial_messages
|
||||
@@ -92,39 +95,28 @@ class ChattingObservation(Observation):
|
||||
def get_observe_info(self, ids=None):
|
||||
return self.talking_message_str
|
||||
|
||||
def search_message_by_text(self, text: str) -> Optional[MessageRecv]:
|
||||
def get_recv_message_by_text(self, sender: str, text: str) -> Optional[MessageRecv]:
|
||||
"""
|
||||
根据回复的纯文本
|
||||
1. 在talking_message中查找最新的,最匹配的消息
|
||||
2. 如果找到,则返回消息
|
||||
"""
|
||||
msg_list = []
|
||||
find_msg = None
|
||||
reverse_talking_message = list(reversed(self.talking_message))
|
||||
|
||||
for message in reverse_talking_message:
|
||||
if message["processed_plain_text"] == text:
|
||||
find_msg = message
|
||||
break
|
||||
else:
|
||||
raw_message = message.get("raw_message")
|
||||
if raw_message:
|
||||
similarity = difflib.SequenceMatcher(None, text, raw_message).ratio()
|
||||
else:
|
||||
similarity = difflib.SequenceMatcher(None, text, message.get("processed_plain_text", "")).ratio()
|
||||
msg_list.append({"message": message, "similarity": similarity})
|
||||
user_id = message["user_id"]
|
||||
platform = message["platform"]
|
||||
person_id = get_person_info_manager().get_person_id(platform, user_id)
|
||||
person_name = get_person_info_manager().get_value(person_id, "person_name")
|
||||
if person_name == sender:
|
||||
similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio()
|
||||
if similarity >= 0.9:
|
||||
find_msg = message
|
||||
break
|
||||
|
||||
if not find_msg:
|
||||
if msg_list:
|
||||
msg_list.sort(key=lambda x: x["similarity"], reverse=True)
|
||||
if msg_list[0]["similarity"] >= 0.9:
|
||||
find_msg = msg_list[0]["message"]
|
||||
else:
|
||||
logger.debug("没有找到锚定消息,相似度低")
|
||||
return None
|
||||
else:
|
||||
logger.debug("没有找到锚定消息,没有消息捕获")
|
||||
return None
|
||||
return None
|
||||
|
||||
user_info = {
|
||||
"platform": find_msg.get("user_platform", ""),
|
||||
@@ -167,6 +159,10 @@ class ChattingObservation(Observation):
|
||||
"processed_plain_text": find_msg.get("processed_plain_text"),
|
||||
}
|
||||
find_rec_msg = MessageRecv(message_dict)
|
||||
|
||||
find_rec_msg.update_chat_stream(get_chat_manager().get_or_create_stream(self.chat_id))
|
||||
|
||||
|
||||
return find_rec_msg
|
||||
|
||||
async def observe(self):
|
||||
@@ -179,6 +175,8 @@ class ChattingObservation(Observation):
|
||||
limit_mode="latest",
|
||||
)
|
||||
|
||||
print(f"new_messages_list: {new_messages_list}")
|
||||
|
||||
last_obs_time_mark = self.last_observe_time
|
||||
if new_messages_list:
|
||||
self.last_observe_time = new_messages_list[-1]["time"]
|
||||
|
||||
@@ -171,6 +171,15 @@ class ChatManager:
|
||||
# 使用MD5生成唯一ID
|
||||
key = "_".join(components)
|
||||
return hashlib.md5(key.encode()).hexdigest()
|
||||
|
||||
def get_stream_id(self, platform: str, chat_id: str, is_group: bool = True) -> str:
|
||||
"""获取聊天流ID"""
|
||||
if is_group:
|
||||
components = [platform, str(chat_id)]
|
||||
else:
|
||||
components = [platform, str(chat_id), "private"]
|
||||
key = "_".join(components)
|
||||
return hashlib.md5(key.encode()).hexdigest()
|
||||
|
||||
async def get_or_create_stream(
|
||||
self, platform: str, user_info: UserInfo, group_info: Optional[GroupInfo] = None
|
||||
|
||||
@@ -275,7 +275,7 @@ class MessageSending(MessageProcessBase):
|
||||
message_id: str,
|
||||
chat_stream: "ChatStream",
|
||||
bot_user_info: UserInfo,
|
||||
sender_info: UserInfo | None, # 用来记录发送者信息,用于私聊回复
|
||||
sender_info: UserInfo | None, # 用来记录发送者信息
|
||||
message_segment: Seg,
|
||||
display_message: str = "",
|
||||
reply: Optional["MessageRecv"] = None,
|
||||
@@ -304,20 +304,17 @@ class MessageSending(MessageProcessBase):
|
||||
# 用于显示发送内容与显示不一致的情况
|
||||
self.display_message = display_message
|
||||
|
||||
def set_reply(self, reply: Optional["MessageRecv"] = None):
|
||||
def build_reply(self):
|
||||
"""设置回复消息"""
|
||||
if True:
|
||||
if reply:
|
||||
self.reply = reply
|
||||
if self.reply:
|
||||
self.reply_to_message_id = self.reply.message_info.message_id
|
||||
self.message_segment = Seg(
|
||||
type="seglist",
|
||||
data=[
|
||||
Seg(type="reply", data=self.reply.message_info.message_id),
|
||||
self.message_segment,
|
||||
],
|
||||
)
|
||||
if self.reply:
|
||||
self.reply_to_message_id = self.reply.message_info.message_id
|
||||
self.message_segment = Seg(
|
||||
type="seglist",
|
||||
data=[
|
||||
Seg(type="reply", data=self.reply.message_info.message_id),
|
||||
self.message_segment,
|
||||
],
|
||||
)
|
||||
|
||||
async def process(self) -> None:
|
||||
"""处理消息内容,生成纯文本和详细文本"""
|
||||
|
||||
@@ -230,7 +230,7 @@ class MessageManager:
|
||||
logger.debug(
|
||||
f"[{message.chat_stream.stream_id}] 应用 set_reply 逻辑: {message.processed_plain_text[:20]}..."
|
||||
)
|
||||
message.set_reply(message.reply)
|
||||
message.build_reply()
|
||||
# --- 结束条件 set_reply ---
|
||||
|
||||
await message.process() # 预处理消息内容
|
||||
|
||||
@@ -22,7 +22,7 @@ from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner
|
||||
from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier
|
||||
from src.chat.normal_chat.normal_chat_expressor import NormalChatExpressor
|
||||
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
|
||||
from src.chat.focus_chat.replyer.default_generator import DefaultReplyer
|
||||
from src.person_info.person_info import PersonInfoManager
|
||||
from src.chat.utils.chat_message_builder import (
|
||||
get_raw_msg_by_timestamp_with_chat,
|
||||
@@ -1063,9 +1063,6 @@ class NormalChat:
|
||||
reasoning=action_data.get("reasoning", ""),
|
||||
cycle_timers={}, # normal_chat使用空的cycle_timers
|
||||
thinking_id=thinking_id,
|
||||
observations=[], # normal_chat不使用observations
|
||||
expressor=self.expressor, # 使用normal_chat专用的expressor
|
||||
replyer=self.replyer,
|
||||
chat_stream=self.chat_stream,
|
||||
log_prefix=self.stream_name,
|
||||
shutting_down=self._disabled,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.chat.focus_chat.expressors.exprssion_learner import get_expression_learner
|
||||
from src.chat.express.exprssion_learner import get_expression_learner
|
||||
from src.config.config import global_config
|
||||
from src.common.logger import get_logger
|
||||
from src.individuality.individuality import get_individuality
|
||||
|
||||
Reference in New Issue
Block a user