From 5b29399d9408357897c81354003cc0518c685657 Mon Sep 17 00:00:00 2001 From: tt-P607 <68868379+tt-P607@users.noreply.github.com> Date: Thu, 23 Oct 2025 14:41:46 +0800 Subject: [PATCH] =?UTF-8?q?fix(chat):=20=E9=98=B2=E6=AD=A2=E5=9C=A8?= =?UTF-8?q?=E7=94=9F=E6=88=90=E5=9B=9E=E5=A4=8D=E6=97=B6=E8=A7=A6=E5=8F=91?= =?UTF-8?q?=E6=B6=88=E6=81=AF=E6=89=93=E6=96=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 通过在聊天流上下文中引入 `is_replying` 状态标志,解决了在LLM生成回复期间可能被新消息错误打断的问题。 - 在调用LLM生成内容前将 `is_replying` 设置为 `True`。 - 在消息打断检查逻辑中,如果 `is_replying` 为 `True` 则跳过检查。 - 使用 `finally` 块确保无论生成成功或失败,`is_replying` 状态都会被重置为 `False`。 这确保了回复生成的原子性,防止了因用户快速连续发送消息而导致的自我打断。 --- src/chat/message_manager/message_manager.py | 5 +++++ src/chat/replyer/default_generator.py | 17 +++++++++++------ .../data_models/message_manager_data_model.py | 1 + 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/chat/message_manager/message_manager.py b/src/chat/message_manager/message_manager.py index 0a1d61081..d7f5d8190 100644 --- a/src/chat/message_manager/message_manager.py +++ b/src/chat/message_manager/message_manager.py @@ -366,6 +366,11 @@ class MessageManager: if not global_config.chat.interruption_enabled or not chat_stream or not message: return + # 检查是否正在回复 + if chat_stream.context_manager.context.is_replying: + logger.info(f"聊天流 {chat_stream.stream_id} 正在回复中,跳过打断检查") + return + # 检查是否为表情包消息 if message.is_picid or message.is_emoji: logger.info(f"消息 {message.message_id} 是表情包或Emoji,跳过打断检查") diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 8a874b002..1369d6afb 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -313,6 +313,8 @@ class DefaultReplyer: model_name = "unknown_model" try: + # 设置正在回复的状态 + self.chat_stream.context_manager.context.is_replying = True content, reasoning_content, model_name, tool_call = await self.llm_generate_content(prompt) logger.debug(f"replyer生成内容: {content}") llm_response = { @@ -321,6 +323,15 @@ class DefaultReplyer: "model": model_name, "tool_calls": tool_call, } + except UserWarning as e: + raise e + except Exception as llm_e: + # 精简报错信息 + logger.error(f"LLM 生成失败: {llm_e}") + return False, None, prompt # LLM 调用失败则无法生成回复 + finally: + # 重置正在回复的状态 + self.chat_stream.context_manager.context.is_replying = False # 触发 AFTER_LLM 事件 if not from_plugin: @@ -335,12 +346,6 @@ class DefaultReplyer: raise UserWarning( f"插件{result.get_summary().get('stopped_handlers', '')}于请求后取消了内容生成" ) - except UserWarning as e: - raise e - except Exception as llm_e: - # 精简报错信息 - logger.error(f"LLM 生成失败: {llm_e}") - return False, None, prompt # LLM 调用失败则无法生成回复 # 回复生成成功后,异步存储聊天记忆(不阻塞返回) try: diff --git a/src/common/data_models/message_manager_data_model.py b/src/common/data_models/message_manager_data_model.py index 060be410c..203d5e82c 100644 --- a/src/common/data_models/message_manager_data_model.py +++ b/src/common/data_models/message_manager_data_model.py @@ -52,6 +52,7 @@ class StreamContext(BaseDataModel): priority_mode: str | None = None priority_info: dict | None = None triggering_user_id: str | None = None # 触发当前聊天流的用户ID + is_replying: bool = False # 是否正在生成回复 def add_action_to_message(self, message_id: str, action: str): """