This commit is contained in:
明天好像没什么
2025-10-26 13:10:38 +08:00
10 changed files with 214 additions and 574 deletions

View File

@@ -659,6 +659,41 @@ class ChatBot:
group_name = getattr(group_info, "group_name", None)
group_platform = getattr(group_info, "platform", None)
# 准备 additional_config将 format_info 嵌入其中
additional_config_str = None
try:
import orjson
additional_config_data = {}
# 首先获取adapter传递的additional_config
if hasattr(message_info, 'additional_config') and message_info.additional_config:
if isinstance(message_info.additional_config, dict):
additional_config_data = message_info.additional_config.copy()
elif isinstance(message_info.additional_config, str):
try:
additional_config_data = orjson.loads(message_info.additional_config)
except Exception as e:
logger.warning(f"无法解析 additional_config JSON: {e}")
additional_config_data = {}
# 然后添加format_info到additional_config中
if hasattr(message_info, 'format_info') and message_info.format_info:
try:
format_info_dict = message_info.format_info.to_dict()
additional_config_data["format_info"] = format_info_dict
logger.debug(f"[bot.py] 嵌入 format_info 到 additional_config: {format_info_dict}")
except Exception as e:
logger.warning(f"将 format_info 转换为字典失败: {e}")
else:
logger.warning(f"[bot.py] [问题] 消息缺少 format_info: message_id={message_id}")
# 序列化为JSON字符串
if additional_config_data:
additional_config_str = orjson.dumps(additional_config_data).decode("utf-8")
except Exception as e:
logger.error(f"准备 additional_config 失败: {e}")
# 创建数据库消息对象
db_message = DatabaseMessages(
message_id=message_id,
@@ -674,6 +709,7 @@ class ChatBot:
is_notify=bool(message.is_notify),
is_public_notice=bool(message.is_public_notice),
notice_type=message.notice_type,
additional_config=additional_config_str,
user_id=user_id,
user_nickname=user_nickname,
user_cardname=user_cardname,

View File

@@ -156,6 +156,13 @@ class ChatStream:
return instance
def get_raw_id(self) -> str:
"""获取原始的、未哈希的聊天流ID字符串"""
if self.group_info:
return f"{self.platform}:{self.group_info.group_id}:group"
else:
return f"{self.platform}:{self.user_info.user_id}:private"
def update_active_time(self):
"""更新最后活跃时间"""
self.last_active_time = time.time()
@@ -213,8 +220,8 @@ class ChatStream:
priority_info=json.dumps(getattr(message, "priority_info", None))
if getattr(message, "priority_info", None)
else None,
# 额外配置
additional_config=getattr(message_info, "additional_config", None),
# 额外配置 - 需要将 format_info 嵌入到 additional_config 中
additional_config=self._prepare_additional_config(message_info),
# 用户信息
user_id=str(getattr(user_info, "user_id", "")),
user_nickname=getattr(user_info, "user_nickname", ""),
@@ -253,8 +260,59 @@ class ChatStream:
f"interest_value: {db_message.interest_value}"
)
def _prepare_additional_config(self, message_info) -> str | None:
"""
准备 additional_config将 format_info 嵌入其中
这个方法模仿 storage.py 中的逻辑,确保 DatabaseMessages 中的 additional_config
包含 format_info使得 action_modifier 能够正确获取适配器支持的消息类型
Args:
message_info: BaseMessageInfo 对象
Returns:
str | None: JSON 字符串格式的 additional_config如果为空则返回 None
"""
import orjson
# 首先获取adapter传递的additional_config
additional_config_data = {}
if hasattr(message_info, 'additional_config') and message_info.additional_config:
if isinstance(message_info.additional_config, dict):
additional_config_data = message_info.additional_config.copy()
elif isinstance(message_info.additional_config, str):
# 如果是字符串,尝试解析
try:
additional_config_data = orjson.loads(message_info.additional_config)
except Exception as e:
logger.warning(f"无法解析 additional_config JSON: {e}")
additional_config_data = {}
# 然后添加format_info到additional_config中
if hasattr(message_info, 'format_info') and message_info.format_info:
try:
format_info_dict = message_info.format_info.to_dict()
additional_config_data["format_info"] = format_info_dict
logger.debug(f"嵌入 format_info 到 additional_config: {format_info_dict}")
except Exception as e:
logger.warning(f"将 format_info 转换为字典失败: {e}")
else:
logger.warning(f"[问题] 消息缺少 format_info: message_id={getattr(message_info, 'message_id', 'unknown')}")
logger.warning("[问题] 这可能导致 Action 无法正确检查适配器支持的类型")
# 序列化为JSON字符串
if additional_config_data:
try:
return orjson.dumps(additional_config_data).decode("utf-8")
except Exception as e:
logger.error(f"序列化 additional_config 失败: {e}")
return None
return None
def _safe_get_actions(self, message: "MessageRecv") -> list | None:
"""安全获取消息的actions字段"""
import json
try:
actions = getattr(message, "actions", None)
if actions is None:
@@ -263,8 +321,6 @@ class ChatStream:
# 如果是字符串尝试解析为JSON
if isinstance(actions, str):
try:
import json
actions = json.loads(actions)
except json.JSONDecodeError:
logger.warning(f"无法解析actions JSON字符串: {actions}")

View File

@@ -1,487 +0,0 @@
"""
优化版聊天流 - 实现写时复制机制
避免不必要的深拷贝开销,提升多流并发性能
"""
import time
from typing import TYPE_CHECKING, Any
from maim_message import GroupInfo, UserInfo
from rich.traceback import install
from src.common.logger import get_logger
from src.config.config import global_config
if TYPE_CHECKING:
from .message import MessageRecv
install(extra_lines=3)
logger = get_logger("optimized_chat_stream")
class SharedContext:
"""共享上下文数据 - 只读数据结构"""
def __init__(self, stream_id: str, platform: str, user_info: UserInfo, group_info: GroupInfo | None = None):
self.stream_id = stream_id
self.platform = platform
self.user_info = user_info
self.group_info = group_info
self.create_time = time.time()
self._frozen = True
def __setattr__(self, name, value):
if hasattr(self, "_frozen") and self._frozen and name not in ["_frozen"]:
raise AttributeError(f"SharedContext is frozen, cannot modify {name}")
super().__setattr__(name, value)
class LocalChanges:
"""本地修改跟踪器"""
def __init__(self):
self._changes: dict[str, Any] = {}
self._dirty = False
def set_change(self, key: str, value: Any):
"""设置修改项"""
self._changes[key] = value
self._dirty = True
def get_change(self, key: str, default: Any = None) -> Any:
"""获取修改项"""
return self._changes.get(key, default)
def has_changes(self) -> bool:
"""是否有修改"""
return self._dirty
def get_changes(self) -> dict[str, Any]:
"""获取所有修改"""
return self._changes.copy()
def clear_changes(self):
"""清除修改记录"""
self._changes.clear()
self._dirty = False
class OptimizedChatStream:
"""优化版聊天流 - 使用写时复制机制"""
def __init__(
self,
stream_id: str,
platform: str,
user_info: UserInfo,
group_info: GroupInfo | None = None,
data: dict | None = None,
):
# 共享的只读数据
self._shared_context = SharedContext(
stream_id=stream_id, platform=platform, user_info=user_info, group_info=group_info
)
# 本地修改数据
self._local_changes = LocalChanges()
# 写时复制标志
self._copy_on_write = False
# 基础参数
self.base_interest_energy = data.get("base_interest_energy", 0.5) if data else 0.5
self._focus_energy = data.get("focus_energy", 0.5) if data else 0.5
self.no_reply_consecutive = 0
# 创建StreamContext延迟创建
self._stream_context = None
self._context_manager = None
# 更新活跃时间
self.update_active_time()
# 保存标志
self.saved = False
@property
def stream_id(self) -> str:
return self._shared_context.stream_id
@property
def platform(self) -> str:
return self._shared_context.platform
@property
def user_info(self) -> UserInfo:
return self._shared_context.user_info
@user_info.setter
def user_info(self, value: UserInfo):
"""修改用户信息时触发写时复制"""
self._ensure_copy_on_write()
# 由于SharedContext是frozen的我们需要在本地修改中记录
self._local_changes.set_change("user_info", value)
@property
def group_info(self) -> GroupInfo | None:
if self._local_changes.has_changes() and "group_info" in self._local_changes._changes:
return self._local_changes.get_change("group_info")
return self._shared_context.group_info
@group_info.setter
def group_info(self, value: GroupInfo | None):
"""修改群组信息时触发写时复制"""
self._ensure_copy_on_write()
self._local_changes.set_change("group_info", value)
@property
def create_time(self) -> float:
if self._local_changes.has_changes() and "create_time" in self._local_changes._changes:
return self._local_changes.get_change("create_time")
return self._shared_context.create_time
@property
def last_active_time(self) -> float:
return self._local_changes.get_change("last_active_time", self.create_time)
@last_active_time.setter
def last_active_time(self, value: float):
self._local_changes.set_change("last_active_time", value)
self.saved = False
@property
def sleep_pressure(self) -> float:
return self._local_changes.get_change("sleep_pressure", 0.0)
@sleep_pressure.setter
def sleep_pressure(self, value: float):
self._local_changes.set_change("sleep_pressure", value)
self.saved = False
def _ensure_copy_on_write(self):
"""确保写时复制机制生效"""
if not self._copy_on_write:
self._copy_on_write = True
# 深拷贝共享上下文到本地
logger.debug(f"触发写时复制: {self.stream_id}")
def _get_effective_user_info(self) -> UserInfo:
"""获取有效的用户信息"""
if self._local_changes.has_changes() and "user_info" in self._local_changes._changes:
return self._local_changes.get_change("user_info")
return self._shared_context.user_info
def _get_effective_group_info(self) -> GroupInfo | None:
"""获取有效的群组信息"""
if self._local_changes.has_changes() and "group_info" in self._local_changes._changes:
return self._local_changes.get_change("group_info")
return self._shared_context.group_info
def update_active_time(self):
"""更新最后活跃时间"""
self.last_active_time = time.time()
def set_context(self, message: "MessageRecv"):
"""设置聊天消息上下文"""
# 确保stream_context存在
if self._stream_context is None:
self._ensure_copy_on_write()
self._create_stream_context()
# 将MessageRecv转换为DatabaseMessages并设置到stream_context
import json
from src.common.data_models.database_data_model import DatabaseMessages
message_info = getattr(message, "message_info", {})
user_info = getattr(message_info, "user_info", {})
group_info = getattr(message_info, "group_info", {})
reply_to = None
if hasattr(message, "message_segment") and message.message_segment:
reply_to = self._extract_reply_from_segment(message.message_segment)
db_message = DatabaseMessages(
message_id=getattr(message, "message_id", ""),
time=getattr(message, "time", time.time()),
chat_id=self._generate_chat_id(message_info),
reply_to=reply_to,
interest_value=getattr(message, "interest_value", 0.0),
key_words=json.dumps(getattr(message, "key_words", []), ensure_ascii=False)
if getattr(message, "key_words", None)
else None,
key_words_lite=json.dumps(getattr(message, "key_words_lite", []), ensure_ascii=False)
if getattr(message, "key_words_lite", None)
else None,
is_mentioned=getattr(message, "is_mentioned", None),
is_at=getattr(message, "is_at", False),
is_emoji=getattr(message, "is_emoji", False),
is_picid=getattr(message, "is_picid", False),
is_voice=getattr(message, "is_voice", False),
is_video=getattr(message, "is_video", False),
is_command=getattr(message, "is_command", False),
is_notify=getattr(message, "is_notify", False),
is_public_notice=getattr(message, "is_public_notice", False),
notice_type=getattr(message, "notice_type", None),
processed_plain_text=getattr(message, "processed_plain_text", ""),
display_message=getattr(message, "processed_plain_text", ""),
priority_mode=getattr(message, "priority_mode", None),
priority_info=json.dumps(getattr(message, "priority_info", None))
if getattr(message, "priority_info", None)
else None,
additional_config=getattr(message_info, "additional_config", None),
user_id=str(getattr(user_info, "user_id", "")),
user_nickname=getattr(user_info, "user_nickname", ""),
user_cardname=getattr(user_info, "user_cardname", None),
user_platform=getattr(user_info, "platform", ""),
chat_info_group_id=getattr(group_info, "group_id", None),
chat_info_group_name=getattr(group_info, "group_name", None),
chat_info_group_platform=getattr(group_info, "platform", None),
chat_info_user_id=str(getattr(user_info, "user_id", "")),
chat_info_user_nickname=getattr(user_info, "user_nickname", ""),
chat_info_user_cardname=getattr(user_info, "user_cardname", None),
chat_info_user_platform=getattr(user_info, "platform", ""),
chat_info_stream_id=self.stream_id,
chat_info_platform=self.platform,
chat_info_create_time=self.create_time,
chat_info_last_active_time=self.last_active_time,
actions=self._safe_get_actions(message),
should_reply=getattr(message, "should_reply", False),
)
self._stream_context.set_current_message(db_message)
self._stream_context.priority_mode = getattr(message, "priority_mode", None)
self._stream_context.priority_info = getattr(message, "priority_info", None)
logger.debug(
f"消息数据转移完成 - message_id: {db_message.message_id}, "
f"chat_id: {db_message.chat_id}, "
f"interest_value: {db_message.interest_value}"
)
def _create_stream_context(self):
"""创建StreamContext"""
from src.common.data_models.message_manager_data_model import StreamContext
from src.plugin_system.base.component_types import ChatMode, ChatType
self._stream_context = StreamContext(
stream_id=self.stream_id,
chat_type=ChatType.GROUP if self.group_info else ChatType.PRIVATE,
chat_mode=ChatMode.NORMAL,
)
# 创建单流上下文管理器
from src.chat.message_manager.context_manager import SingleStreamContextManager
self._context_manager = SingleStreamContextManager(stream_id=self.stream_id, context=self._stream_context)
@property
def stream_context(self):
"""获取StreamContext"""
if self._stream_context is None:
self._ensure_copy_on_write()
self._create_stream_context()
return self._stream_context
@property
def context_manager(self):
"""获取ContextManager"""
if self._context_manager is None:
self._ensure_copy_on_write()
self._create_stream_context()
return self._context_manager
def to_dict(self) -> dict[str, Any]:
"""转换为字典格式 - 考虑本地修改"""
user_info = self._get_effective_user_info()
group_info = self._get_effective_group_info()
return {
"stream_id": self.stream_id,
"platform": self.platform,
"user_info": user_info.to_dict() if user_info else None,
"group_info": group_info.to_dict() if group_info else None,
"create_time": self.create_time,
"last_active_time": self.last_active_time,
"sleep_pressure": self.sleep_pressure,
"focus_energy": self.focus_energy,
"base_interest_energy": self.base_interest_energy,
"stream_context_chat_type": self.stream_context.chat_type.value,
"stream_context_chat_mode": self.stream_context.chat_mode.value,
"interruption_count": self.stream_context.interruption_count,
}
@classmethod
def from_dict(cls, data: dict) -> "OptimizedChatStream":
"""从字典创建实例"""
user_info = UserInfo.from_dict(data.get("user_info", {})) if data.get("user_info") else None
group_info = GroupInfo.from_dict(data.get("group_info", {})) if data.get("group_info") else None
instance = cls(
stream_id=data["stream_id"],
platform=data["platform"],
user_info=user_info, # type: ignore
group_info=group_info,
data=data,
)
# 恢复stream_context信息
if "stream_context_chat_type" in data:
from src.plugin_system.base.component_types import ChatMode, ChatType
instance.stream_context.chat_type = ChatType(data["stream_context_chat_type"])
if "stream_context_chat_mode" in data:
from src.plugin_system.base.component_types import ChatMode, ChatType
instance.stream_context.chat_mode = ChatMode(data["stream_context_chat_mode"])
# 恢复interruption_count信息
if "interruption_count" in data:
instance.stream_context.interruption_count = data["interruption_count"]
return instance
def _safe_get_actions(self, message: "MessageRecv") -> list | None:
"""安全获取消息的actions字段"""
try:
actions = getattr(message, "actions", None)
if actions is None:
return None
if isinstance(actions, str):
try:
import json
actions = json.loads(actions)
except json.JSONDecodeError:
logger.warning(f"无法解析actions JSON字符串: {actions}")
return None
if isinstance(actions, list):
filtered_actions = [action for action in actions if action is not None and isinstance(action, str)]
return filtered_actions if filtered_actions else None
else:
logger.warning(f"actions字段类型不支持: {type(actions)}")
return None
except Exception as e:
logger.warning(f"获取actions字段失败: {e}")
return None
def _extract_reply_from_segment(self, segment) -> str | None:
"""从消息段中提取reply_to信息"""
try:
if hasattr(segment, "type") and segment.type == "seglist":
if hasattr(segment, "data") and segment.data:
for seg in segment.data:
reply_id = self._extract_reply_from_segment(seg)
if reply_id:
return reply_id
elif hasattr(segment, "type") and segment.type == "reply":
return str(segment.data) if segment.data else None
except Exception as e:
logger.warning(f"提取reply_to信息失败: {e}")
return None
def _generate_chat_id(self, message_info) -> str:
"""生成chat_id基于群组或用户信息"""
try:
group_info = getattr(message_info, "group_info", None)
user_info = getattr(message_info, "user_info", None)
if group_info and hasattr(group_info, "group_id") and group_info.group_id:
return f"{self.platform}_{group_info.group_id}"
elif user_info and hasattr(user_info, "user_id") and user_info.user_id:
return f"{self.platform}_{user_info.user_id}_private"
else:
return self.stream_id
except Exception as e:
logger.warning(f"生成chat_id失败: {e}")
return self.stream_id
@property
def focus_energy(self) -> float:
"""获取缓存的focus_energy值"""
return self._focus_energy
async def calculate_focus_energy(self) -> float:
"""异步计算focus_energy"""
try:
all_messages = self.context_manager.get_messages(limit=global_config.chat.max_context_size)
user_id = None
effective_user_info = self._get_effective_user_info()
if effective_user_info and hasattr(effective_user_info, "user_id"):
user_id = str(effective_user_info.user_id)
from src.chat.energy_system import energy_manager
energy = await energy_manager.calculate_focus_energy(
stream_id=self.stream_id, messages=all_messages, user_id=user_id
)
self._focus_energy = energy
logger.debug(f"聊天流 {self.stream_id} 能量: {energy:.3f}")
return energy
except Exception as e:
logger.error(f"获取focus_energy失败: {e}", exc_info=True)
return self._focus_energy
@focus_energy.setter
def focus_energy(self, value: float):
"""设置focus_energy值"""
self._focus_energy = max(0.0, min(1.0, value))
async def _get_user_relationship_score(self) -> float:
"""获取用户关系分"""
try:
from src.plugin_system.apis.scoring_api import scoring_api
effective_user_info = self._get_effective_user_info()
if effective_user_info and hasattr(effective_user_info, "user_id"):
user_id = str(effective_user_info.user_id)
relationship_score = await scoring_api.get_user_relationship_score(user_id)
logger.debug(f"OptimizedChatStream {self.stream_id}: 用户关系分 = {relationship_score:.3f}")
return relationship_score
except Exception as e:
logger.warning(f"OptimizedChatStream {self.stream_id}: 关系分计算失败: {e}")
return 0.3
def create_snapshot(self) -> "OptimizedChatStream":
"""创建当前状态的快照(用于缓存)"""
# 创建一个新的实例,共享相同的上下文
snapshot = OptimizedChatStream(
stream_id=self.stream_id,
platform=self.platform,
user_info=self._get_effective_user_info(),
group_info=self._get_effective_group_info(),
)
# 复制本地修改(但不触发写时复制)
snapshot._local_changes._changes = self._local_changes.get_changes()
snapshot._local_changes._dirty = self._local_changes._dirty
snapshot._focus_energy = self._focus_energy
snapshot.base_interest_energy = self.base_interest_energy
snapshot.no_reply_consecutive = self.no_reply_consecutive
snapshot.saved = self.saved
return snapshot
# 为了向后兼容,创建一个工厂函数
def create_optimized_chat_stream(
stream_id: str,
platform: str,
user_info: UserInfo,
group_info: GroupInfo | None = None,
data: dict | None = None,
) -> OptimizedChatStream:
"""创建优化版聊天流实例"""
return OptimizedChatStream(
stream_id=stream_id, platform=platform, user_info=user_info, group_info=group_info, data=data
)

View File

@@ -796,44 +796,63 @@ class DefaultReplyer:
async def build_keywords_reaction_prompt(self, target: str | None) -> str:
"""构建关键词反应提示
该方法根据配置的关键词和正则表达式规则,
检查目标消息内容是否触发了任何反应。
如果匹配成功,它会生成一个包含所有触发反应的提示字符串,
用于指导LLM的回复。
Args:
target: 目标消息内容
Returns:
str: 关键词反应提示字符串
str: 关键词反应提示字符串,如果没有触发任何反应则为空字符串
"""
# 关键词检测与反应
keywords_reaction_prompt = ""
try:
# 添加None检查防止NoneType错误
if target is None:
return keywords_reaction_prompt
return ""
# 处理关键词规则
for rule in global_config.keyword_reaction.keyword_rules:
if any(keyword in target for keyword in rule.keywords):
logger.info(f"检测到关键词规则:{rule.keywords},触发反应:{rule.reaction}")
keywords_reaction_prompt += f"{rule.reaction}"
reaction_prompt = ""
try:
current_chat_stream_id_str = self.chat_stream.get_raw_id()
# 2. 筛选适用的规则(全局规则 + 特定于当前聊天的规则)
applicable_rules = []
for rule in global_config.reaction.rules:
if rule.chat_stream_id == "" or rule.chat_stream_id == current_chat_stream_id_str:
applicable_rules.append(rule) # noqa: PERF401
# 处理正则表达式规则
for rule in global_config.keyword_reaction.regex_rules:
for pattern_str in rule.regex:
# 3. 遍历适用规则并执行匹配
for rule in applicable_rules:
matched = False
if rule.rule_type == "keyword":
if any(keyword in target for keyword in rule.patterns):
logger.info(f"检测到关键词规则:{rule.patterns},触发反应:{rule.reaction}")
reaction_prompt += f"{rule.reaction}"
matched = True
elif rule.rule_type == "regex":
for pattern_str in rule.patterns:
try:
pattern = re.compile(pattern_str)
if result := pattern.search(target):
reaction = rule.reaction
# 替换命名捕获组
for name, content in result.groupdict().items():
reaction = reaction.replace(f"[{name}]", content)
logger.info(f"匹配到正则表达式:{pattern_str},触发反应:{reaction}")
keywords_reaction_prompt += f"{reaction}"
break
reaction_prompt += f"{reaction}"
matched = True
break # 一个正则规则里只要有一个 pattern 匹配成功即可
except re.error as e:
logger.error(f"正则表达式编译错误: {pattern_str}, 错误信息: {e!s}")
continue
if matched:
# 如果需要每条消息只触发一个反应规则,可以在这里 break
pass
except Exception as e:
logger.error(f"关键词检测与反应时发生异常: {e!s}", exc_info=True)
return keywords_reaction_prompt
return reaction_prompt
async def build_notice_block(self, chat_id: str) -> str:
"""构建notice信息块

View File

@@ -430,17 +430,11 @@ def process_llm_response(text: str, enable_splitter: bool = True, enable_chinese
if global_config.response_splitter.enable and enable_splitter:
logger.info(f"回复分割器已启用,模式: {global_config.response_splitter.split_mode}")
split_mode = global_config.response_splitter.split_mode
if split_mode == "llm" and "[SPLIT]" in cleaned_text:
if "[SPLIT]" in cleaned_text:
logger.debug("检测到 [SPLIT] 标记,使用 LLM 自定义分割。")
split_sentences_raw = cleaned_text.split("[SPLIT]")
split_sentences = [s.strip() for s in split_sentences_raw if s.strip()]
else:
if split_mode == "llm":
logger.debug("未检测到 [SPLIT] 标记,本次不进行分割。")
split_sentences = [cleaned_text]
else: # mode == "punctuation"
logger.debug("使用基于标点的传统模式进行分割。")
split_sentences = split_into_sentences_w_remove_punctuation(cleaned_text)
else:

View File

@@ -208,22 +208,28 @@ class StreamContext(BaseDataModel):
bool: 如果消息支持所有指定的类型则返回True否则返回False
"""
if not self.current_message:
logger.warning("[问题] StreamContext.check_types: current_message 为 None")
return False
if not types:
# 如果没有指定类型要求,默认为支持
return True
logger.debug(f"[check_types] 检查消息是否支持类型: {types}")
# 优先从additional_config中获取format_info
if hasattr(self.current_message, "additional_config") and self.current_message.additional_config:
try:
import orjson
logger.debug(f"[check_types] additional_config 类型: {type(self.current_message.additional_config)}")
config = orjson.loads(self.current_message.additional_config)
logger.debug(f"[check_types] 解析后的 config 键: {config.keys() if isinstance(config, dict) else 'N/A'}")
# 检查format_info结构
if "format_info" in config:
format_info = config["format_info"]
logger.debug(f"[check_types] 找到 format_info: {format_info}")
# 方法1: 直接检查accept_format字段
if "accept_format" in format_info:
@@ -240,8 +246,9 @@ class StreamContext(BaseDataModel):
# 检查所有请求的类型是否都被支持
for requested_type in types:
if requested_type not in accept_format:
logger.debug(f"消息不支持类型 '{requested_type}',支持的类型: {accept_format}")
logger.debug(f"[check_types] 消息不支持类型 '{requested_type}',支持的类型: {accept_format}")
return False
logger.debug(f"[check_types] ✅ 消息支持所有请求的类型 (来自 accept_format)")
return True
# 方法2: 检查content_format字段向后兼容
@@ -258,22 +265,30 @@ class StreamContext(BaseDataModel):
# 检查所有请求的类型是否都被支持
for requested_type in types:
if requested_type not in content_format:
logger.debug(f"消息不支持类型 '{requested_type}',支持的内容格式: {content_format}")
logger.debug(f"[check_types] 消息不支持类型 '{requested_type}',支持的内容格式: {content_format}")
return False
logger.debug(f"[check_types] ✅ 消息支持所有请求的类型 (来自 content_format)")
return True
else:
logger.warning("[check_types] [问题] additional_config 中没有 format_info 字段")
except (orjson.JSONDecodeError, AttributeError, TypeError) as e:
logger.debug(f"解析消息格式信息失败: {e}")
logger.warning(f"[check_types] [问题] 解析消息格式信息失败: {e}")
else:
logger.warning("[check_types] [问题] current_message 没有 additional_config 或为空")
# 备用方案如果无法从additional_config获取格式信息使用默认支持的类型
# 大多数消息至少支持text类型
logger.debug("[check_types] 使用备用方案:默认支持类型检查")
default_supported_types = ["text", "emoji"]
for requested_type in types:
if requested_type not in default_supported_types:
logger.debug(f"使用默认类型检查,消息可能不支持类型 '{requested_type}'")
logger.debug(f"[check_types] 使用默认类型检查,消息可能不支持类型 '{requested_type}'")
# 对于非基础类型返回False以避免错误
if requested_type not in ["text", "emoji", "reply"]:
logger.warning(f"[check_types] ❌ 备用方案拒绝类型 '{requested_type}'")
return False
logger.debug("[check_types] ✅ 备用方案通过所有类型检查")
return True
def get_priority_mode(self) -> str | None:

View File

@@ -26,7 +26,7 @@ from src.config.official_configs import (
EmojiConfig,
ExperimentalConfig,
ExpressionConfig,
KeywordReactionConfig,
ReactionConfig,
LPMMKnowledgeConfig,
MaimMessageConfig,
MemoryConfig,
@@ -384,7 +384,7 @@ class Config(ValidatedConfigBase):
expression: ExpressionConfig = Field(..., description="表达配置")
memory: MemoryConfig = Field(..., description="记忆配置")
mood: MoodConfig = Field(..., description="情绪配置")
keyword_reaction: KeywordReactionConfig = Field(..., description="关键词反应配置")
reaction: ReactionConfig = Field(default_factory=ReactionConfig, description="反应规则配置")
chinese_typo: ChineseTypoConfig = Field(..., description="中文错别字配置")
response_post_process: ResponsePostProcessConfig = Field(..., description="响应后处理配置")
response_splitter: ResponseSplitterConfig = Field(..., description="响应分割配置")

View File

@@ -401,32 +401,31 @@ class MoodConfig(ValidatedConfigBase):
mood_update_threshold: float = Field(default=1.0, description="情绪更新阈值")
class KeywordRuleConfig(ValidatedConfigBase):
"""关键词规则配置类"""
class ReactionRuleConfig(ValidatedConfigBase):
"""反应规则配置类"""
keywords: list[str] = Field(default_factory=lambda: [], description="关键词列表")
regex: list[str] = Field(default_factory=lambda: [], description="正则表达式列表")
reaction: str = Field(default="", description="反应内容")
chat_stream_id: str = Field(default="", description='聊天流ID格式为 "platform:id:type",空字符串表示全局')
rule_type: Literal["keyword", "regex"] = Field(..., description='规则类型,必须是 "keyword""regex"')
patterns: list[str] = Field(..., description="关键词或正则表达式列表")
reaction: str = Field(..., description="触发后的回复内容")
def __post_init__(self):
import re
if not self.keywords and not self.regex:
raise ValueError("关键词规则必须至少包含keywords或regex中的一个")
if not self.reaction:
raise ValueError("关键词规则必须包含reaction")
for pattern in self.regex:
if not self.patterns:
raise ValueError("patterns 列表不能为空")
if self.rule_type == "regex":
for pattern in self.patterns:
try:
re.compile(pattern)
except re.error as e:
raise ValueError(f"无效的正则表达式 '{pattern}': {e!s}") from e
class KeywordReactionConfig(ValidatedConfigBase):
"""关键词配置"""
class ReactionConfig(ValidatedConfigBase):
"""反应规则系统配置"""
keyword_rules: list[KeywordRuleConfig] = Field(default_factory=lambda: [], description="关键词规则列表")
regex_rules: list[KeywordRuleConfig] = Field(default_factory=lambda: [], description="正则表达式规则列表")
rules: list[ReactionRuleConfig] = Field(default_factory=list, description="反应规则列表")
class CustomPromptConfig(ValidatedConfigBase):

View File

@@ -166,10 +166,10 @@ class NoticeHandler:
logger.warning("notice处理失败或不支持")
return None
group_info: GroupInfo = None
group_info: GroupInfo | None = None
if group_id:
fetched_group_info = await get_group_info(self.get_server_connection(), group_id)
group_name: str = None
group_name: str | None = None
if fetched_group_info:
group_name = fetched_group_info.get("group_name")
else:

View File

@@ -279,12 +279,6 @@ max_frequency_bonus = 10.0 # 最大激活频率奖励天数
# 休眠机制
dormant_threshold_days = 90 # 休眠状态判定天数(超过此天数未访问的记忆进入休眠状态)
# 统一存储配置 (已弃用 - 请使用Vector DB配置)
# DEPRECATED: unified_storage_path = "data/unified_memory"
# DEPRECATED: unified_storage_cache_limit = 10000
# DEPRECATED: unified_storage_auto_save_interval = 50
# DEPRECATED: unified_storage_enable_compression = true
# Vector DB存储配置 (新增 - 替代JSON存储)
enable_vector_memory_storage = true # 启用Vector DB存储
enable_llm_instant_memory = true # 启用基于LLM的瞬时记忆
@@ -336,22 +330,36 @@ qa_ppr_damping = 0.8 # PPR阻尼系数
qa_res_top_k = 3 # 最终提供的文段TopK
embedding_dimension = 1024 # 嵌入向量维度,应该与模型的输出维度一致
# keyword_rules 用于设置关键词触发的额外回复知识
# 添加新规则方法:在 keyword_rules 数组中增加一项,格式如下:
# { keywords = ["关键词1", "关键词2"], reaction = "触发这些关键词时的回复内容" }
# 例如添加一个新规则当检测到“你好”或“hello”时回复“你好有什么可以帮你
# { keywords = ["你好", "hello"], reaction = "你好,有什么可以帮你?" }
[keyword_reaction]
keyword_rules = [
{ keywords = ["人机", "bot", "机器", "入机", "robot", "机器人", "ai", "AI"], reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" },
{ keywords = ["测试关键词回复", "test"], reaction = "回答测试成功" },
#{ keywords = ["你好", "hello"], reaction = "你好,有什么可以帮你?" }
# 在此处添加更多规则,格式同上
]
# --- 反应规则系统 ---
# 在这里,您可以定义一系列基于关键词或正则表达式的自动回复规则。
# 每条规则都是一个独立的 [[reaction.rules]] 块。
regex_rules = [
{ regex = ["^(?P<n>\\S{1,20})是这样的$"], reaction = "请按照以下模板造句:[n]是这样的xx只要xx就可以可是[n]要考虑的事情就很多了比如什么时候xx什么时候xx什么时候xx。请自由发挥替换xx部分只需保持句式结构同时表达一种将[n]过度重视的反讽意味)" }
]
# chat_stream_id: 聊天流ID (格式 "platform:id:type")。
# 用于指定此规则仅在哪个聊天中生效。
# 如果留空 (""),则为全局规则,对所有聊天生效。
# rule_type: 规则类型,必须是 "keyword" 或 "regex"。
# "keyword": 表示本条规则使用关键词匹配。
# "regex": 表示本条规则使用正则表达式匹配。
# patterns: 一个字符串列表,根据 rule_type 的不同,这里填写关键词或正则表达式。
# reaction: 触发规则后,机器人发送的回复内容。
[[reaction.rules]]
chat_stream_id = ""
rule_type = "keyword"
patterns = ["人机", "bot", "机器", "入机", "robot", "机器人", "ai", "AI"]
reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认"
[[reaction.rules]]
chat_stream_id = ""
rule_type = "keyword"
patterns = ["测试关键词回复", "test"]
reaction = "回答测试成功"
[[reaction.rules]]
chat_stream_id = ""
rule_type = "regex"
patterns = ["^(?P<n>\\S{1,20})是这样的$"]
reaction = "请按照以下模板造句:[n]是这样的xx只要xx就可以可是[n]要考虑的事情就很多了比如什么时候xx什么时候xx什么时候xx。请自由发挥替换xx部分只需保持句式结构同时表达一种将[n]过度重视的反讽意味)"
# 可以自定义部分提示词
[custom_prompt]