feat:允许command设置展示名称,而非指令原文,添加处理器超时,NormalChat转换清理

This commit is contained in:
SengokuCola
2025-05-28 13:49:19 +08:00
parent 7fcd5c9abe
commit 41f97a0bf1
12 changed files with 75 additions and 23 deletions

View File

@@ -373,7 +373,7 @@ class DefaultExpressor:
# --- 发送器 (Sender) --- # # --- 发送器 (Sender) --- #
async def send_response_messages( async def send_response_messages(
self, anchor_message: Optional[MessageRecv], response_set: List[Tuple[str, str]], thinking_id: str = "" self, anchor_message: Optional[MessageRecv], response_set: List[Tuple[str, str]], thinking_id: str = "", display_message: str = ""
) -> Optional[MessageSending]: ) -> Optional[MessageSending]:
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender""" """发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
chat = self.chat_stream chat = self.chat_stream
@@ -426,6 +426,7 @@ class DefaultExpressor:
anchor_message=anchor_message, anchor_message=anchor_message,
message_id=part_message_id, message_id=part_message_id,
message_segment=message_segment, message_segment=message_segment,
display_message=display_message,
reply_to=reply_to, reply_to=reply_to,
is_emoji=is_emoji, is_emoji=is_emoji,
thinking_id=thinking_id, thinking_id=thinking_id,
@@ -489,6 +490,7 @@ class DefaultExpressor:
is_emoji: bool, is_emoji: bool,
thinking_id: str, thinking_id: str,
thinking_start_time: float, thinking_start_time: float,
display_message: str,
) -> MessageSending: ) -> MessageSending:
"""构建单个发送消息""" """构建单个发送消息"""
@@ -508,6 +510,7 @@ class DefaultExpressor:
is_head=reply_to, is_head=reply_to,
is_emoji=is_emoji, is_emoji=is_emoji,
thinking_start_time=thinking_start_time, # 传递原始思考开始时间 thinking_start_time=thinking_start_time, # 传递原始思考开始时间
display_message=display_message,
) )
return bot_message return bot_message

View File

@@ -53,6 +53,9 @@ CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值
logger = get_logger("hfc") # Logger Name Changed logger = get_logger("hfc") # Logger Name Changed
# 设定处理器超时时间(秒)
PROCESSOR_TIMEOUT = 10
async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str): async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str):
"""处理循环延迟""" """处理循环延迟"""
@@ -376,9 +379,13 @@ class HeartFChatting:
for processor in self.processors: for processor in self.processors:
processor_name = processor.__class__.log_prefix processor_name = processor.__class__.log_prefix
task = asyncio.create_task( # 用lambda包裹便于传参
processor.process_info(observations=observations, running_memorys=running_memorys) async def run_with_timeout(proc=processor):
) return await asyncio.wait_for(
proc.process_info(observations=observations, running_memorys=running_memorys),
timeout=PROCESSOR_TIMEOUT
)
task = asyncio.create_task(run_with_timeout())
processor_tasks.append(task) processor_tasks.append(task)
task_to_name_map[task] = processor_name task_to_name_map[task] = processor_name
logger.debug(f"{self.log_prefix} 启动处理器任务: {processor_name}") logger.debug(f"{self.log_prefix} 启动处理器任务: {processor_name}")
@@ -404,6 +411,8 @@ class HeartFChatting:
all_plan_info.extend(result_list) all_plan_info.extend(result_list)
else: else:
logger.warning(f"{self.log_prefix} 处理器 {processor_name} 返回了 None") logger.warning(f"{self.log_prefix} 处理器 {processor_name} 返回了 None")
except asyncio.TimeoutError:
logger.error(f"{self.log_prefix} 处理器 {processor_name} 超时(>{PROCESSOR_TIMEOUT}s已跳过")
except Exception as e: except Exception as e:
logger.error( logger.error(
f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}", f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}",

View File

@@ -137,9 +137,9 @@ class ActionProcessor(BaseProcessor):
# 检查no_reply比例 # 检查no_reply比例
print(f"no_reply_count: {no_reply_count}, len(recent_cycles): {len(recent_cycles)}") print(f"no_reply_count: {no_reply_count}, len(recent_cycles): {len(recent_cycles)}")
# print(1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111) # print(1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111)
if len(recent_cycles) >= (4 * global_config.focus_chat.exit_focus_threshold) and ( if len(recent_cycles) >= (5 * global_config.focus_chat.exit_focus_threshold) and (
no_reply_count / len(recent_cycles) no_reply_count / len(recent_cycles)
) >= (0.6 * global_config.focus_chat.exit_focus_threshold): ) >= (0.8 * global_config.focus_chat.exit_focus_threshold):
if global_config.chat.chat_mode == "auto": if global_config.chat.chat_mode == "auto":
result["add"].append("exit_focus_chat") result["add"].append("exit_focus_chat")
result["remove"].append("no_reply") result["remove"].append("no_reply")

View File

@@ -111,7 +111,7 @@ class PluginAction(BaseAction):
return platform, user_id return platform, user_id
# 提供简化的API方法 # 提供简化的API方法
async def send_message(self, type: str, data: str, target: Optional[str] = "") -> bool: async def send_message(self, type: str, data: str, target: Optional[str] = "", display_message: str = "") -> bool:
"""发送消息的简化方法 """发送消息的简化方法
Args: Args:
@@ -158,6 +158,7 @@ class PluginAction(BaseAction):
success = await expressor.send_response_messages( success = await expressor.send_response_messages(
anchor_message=anchor_message, anchor_message=anchor_message,
response_set=response_set, response_set=response_set,
display_message=display_message,
) )
return success return success

View File

@@ -126,6 +126,10 @@ class ActionPlanner:
reasoning = f"之前选择的动作{action}已被移除,原因: {reason}" reasoning = f"之前选择的动作{action}已被移除,原因: {reason}"
# 继续处理其他信息 # 继续处理其他信息
self_info = ""
current_mind = ""
cycle_info = ""
structured_info = ""
for info in all_plan_info: for info in all_plan_info:
if isinstance(info, ObsInfo): if isinstance(info, ObsInfo):
observed_messages = info.get_talking_message() observed_messages = info.get_talking_message()

View File

@@ -29,7 +29,6 @@ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class Message(MessageBase): class Message(MessageBase):
chat_stream: "ChatStream" = None chat_stream: "ChatStream" = None
reply: Optional["Message"] = None reply: Optional["Message"] = None
detailed_plain_text: str = ""
processed_plain_text: str = "" processed_plain_text: str = ""
memorized_times: int = 0 memorized_times: int = 0
@@ -275,6 +274,7 @@ class MessageSending(MessageProcessBase):
bot_user_info: UserInfo, bot_user_info: UserInfo,
sender_info: UserInfo | None, # 用来记录发送者信息,用于私聊回复 sender_info: UserInfo | None, # 用来记录发送者信息,用于私聊回复
message_segment: Seg, message_segment: Seg,
display_message: str = "",
reply: Optional["MessageRecv"] = None, reply: Optional["MessageRecv"] = None,
is_head: bool = False, is_head: bool = False,
is_emoji: bool = False, is_emoji: bool = False,
@@ -298,10 +298,11 @@ class MessageSending(MessageProcessBase):
self.is_emoji = is_emoji self.is_emoji = is_emoji
self.apply_set_reply_logic = apply_set_reply_logic self.apply_set_reply_logic = apply_set_reply_logic
# 用于显示发送内容与显示不一致的情况
self.display_message = display_message
def set_reply(self, reply: Optional["MessageRecv"] = None): def set_reply(self, reply: Optional["MessageRecv"] = None):
"""设置回复消息""" """设置回复消息"""
# print(f"set_reply: {reply}")
# if self.message_info.format_info is not None and "reply" in self.message_info.format_info.accept_format:
if True: if True:
if reply: if reply:
self.reply = reply self.reply = reply
@@ -319,7 +320,6 @@ class MessageSending(MessageProcessBase):
"""处理消息内容,生成纯文本和详细文本""" """处理消息内容,生成纯文本和详细文本"""
if self.message_segment: if self.message_segment:
self.processed_plain_text = await self._process_message_segments(self.message_segment) self.processed_plain_text = await self._process_message_segments(self.message_segment)
self.detailed_plain_text = self._generate_detailed_text()
@classmethod @classmethod
def from_thinking( def from_thinking(

View File

@@ -24,11 +24,14 @@ class MessageStorage:
else: else:
filtered_processed_plain_text = "" filtered_processed_plain_text = ""
detailed_plain_text = message.detailed_plain_text if isinstance(message,MessageSending):
if detailed_plain_text: display_message = message.display_message
filtered_detailed_plain_text = re.sub(pattern, "", detailed_plain_text, flags=re.DOTALL) if display_message:
filtered_display_message = re.sub(pattern, "", display_message, flags=re.DOTALL)
else:
filtered_display_message = ""
else: else:
filtered_detailed_plain_text = "" filtered_display_message = ""
chat_info_dict = chat_stream.to_dict() chat_info_dict = chat_stream.to_dict()
user_info_dict = message.message_info.user_info.to_dict() user_info_dict = message.message_info.user_info.to_dict()
@@ -64,7 +67,7 @@ class MessageStorage:
user_cardname=user_info_dict.get("user_cardname"), user_cardname=user_info_dict.get("user_cardname"),
# Text content # Text content
processed_plain_text=filtered_processed_plain_text, processed_plain_text=filtered_processed_plain_text,
detailed_plain_text=filtered_detailed_plain_text, display_message=filtered_display_message,
memorized_times=message.memorized_times, memorized_times=message.memorized_times,
) )
except Exception: except Exception:

View File

@@ -54,6 +54,8 @@ class NormalChat:
# 添加回调函数用于在满足条件时通知切换到focus_chat模式 # 添加回调函数用于在满足条件时通知切换到focus_chat模式
self.on_switch_to_focus_callback = on_switch_to_focus_callback self.on_switch_to_focus_callback = on_switch_to_focus_callback
self._disabled = False # 增加停用标志
async def initialize(self): async def initialize(self):
"""异步初始化,获取聊天类型和目标信息。""" """异步初始化,获取聊天类型和目标信息。"""
@@ -222,6 +224,10 @@ class NormalChat:
async def normal_response( async def normal_response(
self, message: MessageRecv, is_mentioned: bool, interested_rate: float, rewind_response: bool = False self, message: MessageRecv, is_mentioned: bool, interested_rate: float, rewind_response: bool = False
) -> None: ) -> None:
# 新增:如果已停用,直接返回
if self._disabled:
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
return
# 检查收到的消息是否属于当前实例处理的 chat stream # 检查收到的消息是否属于当前实例处理的 chat stream
if message.chat_stream.stream_id != self.stream_id: if message.chat_stream.stream_id != self.stream_id:
logger.error( logger.error(
@@ -306,6 +312,10 @@ class NormalChat:
return # 不执行后续步骤 return # 不执行后续步骤
logger.info(f"[{self.stream_name}] 回复内容: {response_set}") logger.info(f"[{self.stream_name}] 回复内容: {response_set}")
if self._disabled:
logger.info(f"[{self.stream_name}] 已停用,忽略 normal_response。")
return
# 发送回复 (不再需要传入 chat) # 发送回复 (不再需要传入 chat)
with Timer("消息发送", timing_results): with Timer("消息发送", timing_results):
@@ -374,6 +384,8 @@ class NormalChat:
if not self._initialized: if not self._initialized:
await self.initialize() # Ensure initialized before starting tasks await self.initialize() # Ensure initialized before starting tasks
self._disabled = False # 启动时重置停用标志
if self._chat_task is None or self._chat_task.done(): if self._chat_task is None or self._chat_task.done():
logger.info(f"[{self.stream_name}] 开始处理兴趣消息...") logger.info(f"[{self.stream_name}] 开始处理兴趣消息...")
polling_task = asyncio.create_task(self._reply_interested_message()) polling_task = asyncio.create_task(self._reply_interested_message())
@@ -403,6 +415,7 @@ class NormalChat:
# 改为实例方法, 移除 stream_id 参数 # 改为实例方法, 移除 stream_id 参数
async def stop_chat(self): async def stop_chat(self):
"""停止当前实例的兴趣监控任务。""" """停止当前实例的兴趣监控任务。"""
self._disabled = True # 停止时设置停用标志
if self._chat_task and not self._chat_task.done(): if self._chat_task and not self._chat_task.done():
task = self._chat_task task = self._chat_task
logger.debug(f"[{self.stream_name}] 尝试取消normal聊天任务。") logger.debug(f"[{self.stream_name}] 尝试取消normal聊天任务。")

View File

@@ -194,7 +194,15 @@ async def _build_readable_messages_internal(
user_cardname = user_info.get("user_cardname") user_cardname = user_info.get("user_cardname")
timestamp = msg.get("time") timestamp = msg.get("time")
content = msg.get("processed_plain_text", "") # 默认空字符串 if msg.get("display_message"):
content = msg.get("display_message")
else:
content = msg.get("processed_plain_text", "") # 默认空字符串
if "" in content:
content = content.replace("", "")
if "" in content:
content = content.replace("", "")
# 检查必要信息是否存在 # 检查必要信息是否存在
if not all([platform, user_id, timestamp is not None]): if not all([platform, user_id, timestamp is not None]):
@@ -453,7 +461,15 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
platform = user_info.get("platform") platform = user_info.get("platform")
user_id = user_info.get("user_id") user_id = user_info.get("user_id")
timestamp = msg.get("time") timestamp = msg.get("time")
content = msg.get("processed_plain_text", "") if msg.get("display_message"):
content = msg.get("display_message")
else:
content = msg.get("processed_plain_text", "")
if "" in content:
content = content.replace("", "")
if "" in content:
content = content.replace("", "")
if not all([platform, user_id, timestamp is not None]): if not all([platform, user_id, timestamp is not None]):
continue continue

View File

@@ -147,6 +147,7 @@ class Messages(BaseModel):
user_cardname = TextField(null=True) user_cardname = TextField(null=True)
processed_plain_text = TextField(null=True) # 处理后的纯文本消息 processed_plain_text = TextField(null=True) # 处理后的纯文本消息
display_message = TextField(null=True) # 显示的消息
detailed_plain_text = TextField(null=True) # 详细的纯文本消息 detailed_plain_text = TextField(null=True) # 详细的纯文本消息
memorized_times = IntegerField(default=0) # 被记忆的次数 memorized_times = IntegerField(default=0) # 被记忆的次数

View File

@@ -63,7 +63,8 @@ class MuteAction(PluginAction):
# 发送群聊禁言命令,按照新格式 # 发送群聊禁言命令,按照新格式
await self.send_message( await self.send_message(
type="command", data={"name": "GROUP_BAN", "args": {"qq_id": str(user_id), "duration": duration_str}} type = "command", data = {"name": "GROUP_BAN", "args": {"qq_id": str(user_id), "duration": duration_str}},
display_message = f"我 禁言了 {target} {duration_str}"
) )
logger.info(f"{self.log_prefix} 成功发送禁言命令,用户 {target}({user_id}),时长 {duration}") logger.info(f"{self.log_prefix} 成功发送禁言命令,用户 {target}({user_id}),时长 {duration}")

View File

@@ -186,11 +186,12 @@ pfc_chatting = false # 是否启用PFC聊天该功能仅作用于私聊
#下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写 #下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env自定义的宏使用自定义模型则选择定位相似的模型自己填写
# 额外字段
# 下面的模型有以下额外字段可以添加:
# stream = <true|false> : 用于指定模型是否是使用流式输出 # stream = <true|false> : 用于指定模型是否是使用流式输出
# 如果不指定,则该项是 False # pri_in = <float> : 用于指定模型输入价格
# pri_out = <float> : 用于指定模型输出价格
# temp = <float> : 用于指定模型温度
# enable_thinking = <true|false> : 用于指定模型是否启用思考
# thinking_budget = <int> : 用于指定模型思考最长长度
[model] [model]
model_max_output_length = 800 # 模型单次返回的最大token数 model_max_output_length = 800 # 模型单次返回的最大token数