fix(chat): 防止在生成回复时触发消息打断
通过在聊天流上下文中引入 `is_replying` 状态标志,解决了在LLM生成回复期间可能被新消息错误打断的问题。 - 在调用LLM生成内容前将 `is_replying` 设置为 `True`。 - 在消息打断检查逻辑中,如果 `is_replying` 为 `True` 则跳过检查。 - 使用 `finally` 块确保无论生成成功或失败,`is_replying` 状态都会被重置为 `False`。 这确保了回复生成的原子性,防止了因用户快速连续发送消息而导致的自我打断。
This commit is contained in:
@@ -366,6 +366,11 @@ class MessageManager:
|
|||||||
if not global_config.chat.interruption_enabled or not chat_stream or not message:
|
if not global_config.chat.interruption_enabled or not chat_stream or not message:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# 检查是否正在回复
|
||||||
|
if chat_stream.context_manager.context.is_replying:
|
||||||
|
logger.info(f"聊天流 {chat_stream.stream_id} 正在回复中,跳过打断检查")
|
||||||
|
return
|
||||||
|
|
||||||
# 检查是否为表情包消息
|
# 检查是否为表情包消息
|
||||||
if message.is_picid or message.is_emoji:
|
if message.is_picid or message.is_emoji:
|
||||||
logger.info(f"消息 {message.message_id} 是表情包或Emoji,跳过打断检查")
|
logger.info(f"消息 {message.message_id} 是表情包或Emoji,跳过打断检查")
|
||||||
|
|||||||
@@ -313,6 +313,8 @@ class DefaultReplyer:
|
|||||||
model_name = "unknown_model"
|
model_name = "unknown_model"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# 设置正在回复的状态
|
||||||
|
self.chat_stream.context_manager.context.is_replying = True
|
||||||
content, reasoning_content, model_name, tool_call = await self.llm_generate_content(prompt)
|
content, reasoning_content, model_name, tool_call = await self.llm_generate_content(prompt)
|
||||||
logger.debug(f"replyer生成内容: {content}")
|
logger.debug(f"replyer生成内容: {content}")
|
||||||
llm_response = {
|
llm_response = {
|
||||||
@@ -321,6 +323,15 @@ class DefaultReplyer:
|
|||||||
"model": model_name,
|
"model": model_name,
|
||||||
"tool_calls": tool_call,
|
"tool_calls": tool_call,
|
||||||
}
|
}
|
||||||
|
except UserWarning as e:
|
||||||
|
raise e
|
||||||
|
except Exception as llm_e:
|
||||||
|
# 精简报错信息
|
||||||
|
logger.error(f"LLM 生成失败: {llm_e}")
|
||||||
|
return False, None, prompt # LLM 调用失败则无法生成回复
|
||||||
|
finally:
|
||||||
|
# 重置正在回复的状态
|
||||||
|
self.chat_stream.context_manager.context.is_replying = False
|
||||||
|
|
||||||
# 触发 AFTER_LLM 事件
|
# 触发 AFTER_LLM 事件
|
||||||
if not from_plugin:
|
if not from_plugin:
|
||||||
@@ -335,12 +346,6 @@ class DefaultReplyer:
|
|||||||
raise UserWarning(
|
raise UserWarning(
|
||||||
f"插件{result.get_summary().get('stopped_handlers', '')}于请求后取消了内容生成"
|
f"插件{result.get_summary().get('stopped_handlers', '')}于请求后取消了内容生成"
|
||||||
)
|
)
|
||||||
except UserWarning as e:
|
|
||||||
raise e
|
|
||||||
except Exception as llm_e:
|
|
||||||
# 精简报错信息
|
|
||||||
logger.error(f"LLM 生成失败: {llm_e}")
|
|
||||||
return False, None, prompt # LLM 调用失败则无法生成回复
|
|
||||||
|
|
||||||
# 回复生成成功后,异步存储聊天记忆(不阻塞返回)
|
# 回复生成成功后,异步存储聊天记忆(不阻塞返回)
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -52,6 +52,7 @@ class StreamContext(BaseDataModel):
|
|||||||
priority_mode: str | None = None
|
priority_mode: str | None = None
|
||||||
priority_info: dict | None = None
|
priority_info: dict | None = None
|
||||||
triggering_user_id: str | None = None # 触发当前聊天流的用户ID
|
triggering_user_id: str | None = None # 触发当前聊天流的用户ID
|
||||||
|
is_replying: bool = False # 是否正在生成回复
|
||||||
|
|
||||||
def add_action_to_message(self, message_id: str, action: str):
|
def add_action_to_message(self, message_id: str, action: str):
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user