feat(affinity-flow): 通过标签扩展与提及分类增强兴趣匹配

- 实施扩展标签描述以实现更精确的语义匹配
- 增加强/弱提及分类,并附带独立的兴趣评分
- 重构机器人兴趣管理器,采用动态嵌入生成与缓存机制
- 通过增强的@提及处理功能优化消息处理
- 更新配置以支持回帖提升机制
- 将亲和力流量聊天重新组织为模块化结构,包含核心、规划器、主动响应和工具子模块
- 移除已弃用的规划器组件并整合功能
- 为napcat适配器插件添加数据库表初始化功能
- 修复元事件处理器中的心跳监控
This commit is contained in:
Windpicker-owo
2025-11-03 22:24:51 +08:00
parent eeb77e0e3c
commit a6d2aee781
28 changed files with 1217 additions and 168 deletions

View File

@@ -26,6 +26,8 @@ class BotInterestManager:
def __init__(self):
self.current_interests: BotPersonalityInterests | None = None
self.embedding_cache: dict[str, list[float]] = {} # embedding缓存
self.expanded_tag_cache: dict[str, str] = {} # 扩展标签缓存
self.expanded_embedding_cache: dict[str, list[float]] = {} # 扩展标签的embedding缓存
self._initialized = False
# Embedding客户端配置
@@ -169,22 +171,47 @@ class BotInterestManager:
1. 标签应该符合人设特点和性格
2. 每个标签都有权重0.1-1.0),表示对该兴趣的喜好程度
3. 生成15-25个不等的标签
4. 标签应该是具体的关键词,而不是抽象概念
5. 每个标签的长度不超过10个字符
4. 每个标签包含两个部分:
- name: 简短的标签名2-6个字符用于显示和管理"Python""追番""撸猫"
- expanded: 完整的描述性文本20-50个字符用于语义匹配描述这个兴趣的具体内容和场景
5. expanded 扩展描述要求:
- 必须是完整的句子或短语,包含丰富的语义信息
- 描述具体的对话场景、活动内容、相关话题
- 避免过于抽象,要有明确的语境
- 示例:
* "Python" -> "讨论Python编程语言、写Python代码、Python脚本开发、Python技术问题"
* "追番" -> "讨论正在播出的动漫番剧、追番进度、动漫剧情、番剧推荐、动漫角色"
* "撸猫" -> "讨论猫咪宠物、晒猫分享、萌宠日常、可爱猫猫、养猫心得"
* "社恐" -> "表达社交焦虑、不想见人、想躲起来、害怕社交的心情"
* "深夜码代码" -> "深夜写代码、熬夜编程、夜猫子程序员、深夜调试bug"
请以JSON格式返回格式如下
{{
"interests": [
{{"name": "标签名", "weight": 0.8}},
{{"name": "标签名", "weight": 0.6}},
{{"name": "标签名", "weight": 0.9}}
{{
"name": "Python",
"expanded": "讨论Python编程语言、写Python代码、Python脚本开发、Python技术问题",
"weight": 0.9
}},
{{
"name": "追番",
"expanded": "讨论正在播出的动漫番剧、追番进度、动漫剧情、番剧推荐、动漫角色",
"weight": 0.85
}},
{{
"name": "撸猫",
"expanded": "讨论猫咪宠物、晒猫分享、萌宠日常、可爱猫猫、养猫心得",
"weight": 0.95
}}
]
}}
注意:
- 权重范围0.1-1.0,权重越高表示越感兴趣
- 标签要具体,如"编程""游戏""旅行"
- 根据人设生成个性化的标签
- name: 简短标签名2-6个字符方便显示
- expanded: 完整描述20-50个字符用于精准的语义匹配
- weight: 权重范围0.1-1.0,权重越高表示越感兴趣
- 根据人设生成个性化、具体的标签和描述
- expanded 描述要有具体场景,避免泛化
"""
# 调用LLM生成兴趣标签
@@ -211,16 +238,22 @@ class BotInterestManager:
for i, tag_data in enumerate(interests_list):
tag_name = tag_data.get("name", f"标签_{i}")
weight = tag_data.get("weight", 0.5)
expanded = tag_data.get("expanded") # 获取扩展描述
# 检查标签长度,如果过长则截断
if len(tag_name) > 10:
logger.warning(f"⚠️ 标签 '{tag_name}' 过长将截断为10个字符")
tag_name = tag_name[:10]
tag = BotInterestTag(tag_name=tag_name, weight=weight)
bot_interests.interest_tags.append(tag)
# 验证扩展描述
if expanded:
logger.debug(f" 🏷️ {tag_name} (权重: {weight:.2f})")
logger.debug(f" 📝 扩展: {expanded}")
else:
logger.warning(f" ⚠️ 标签 '{tag_name}' 缺少扩展描述,将使用回退方案")
logger.debug(f" 🏷️ {tag_name} (权重: {weight:.2f})")
tag = BotInterestTag(tag_name=tag_name, weight=weight, expanded=expanded)
bot_interests.interest_tags.append(tag)
# 为所有标签生成embedding
logger.info("🧠 开始为兴趣标签生成embedding向量...")
@@ -284,12 +317,12 @@ class BotInterestManager:
return None
async def _generate_embeddings_for_tags(self, interests: BotPersonalityInterests):
"""为所有兴趣标签生成embedding"""
"""为所有兴趣标签生成embedding(仅缓存在内存中)"""
if not hasattr(self, "embedding_request"):
raise RuntimeError("❌ Embedding客户端未初始化无法生成embedding")
total_tags = len(interests.interest_tags)
logger.info(f"🧠 开始为 {total_tags} 个兴趣标签生成embedding向量...")
logger.info(f"🧠 开始为 {total_tags} 个兴趣标签生成embedding向量(动态生成,仅内存缓存)...")
cached_count = 0
generated_count = 0
@@ -297,22 +330,22 @@ class BotInterestManager:
for i, tag in enumerate(interests.interest_tags, 1):
if tag.tag_name in self.embedding_cache:
# 使用缓存的embedding
# 使用内存缓存的embedding
tag.embedding = self.embedding_cache[tag.tag_name]
cached_count += 1
logger.debug(f" [{i}/{total_tags}] 🏷️ '{tag.tag_name}' - 使用缓存")
logger.debug(f" [{i}/{total_tags}] 🏷️ '{tag.tag_name}' - 使用内存缓存")
else:
# 生成新的embedding
# 动态生成新的embedding
embedding_text = tag.tag_name
logger.debug(f" [{i}/{total_tags}] 🔄 正在为 '{tag.tag_name}' 生成embedding...")
logger.debug(f" [{i}/{total_tags}] 🔄 正在为 '{tag.tag_name}' 动态生成embedding...")
embedding = await self._get_embedding(embedding_text)
if embedding:
tag.embedding = embedding
self.embedding_cache[tag.tag_name] = embedding
tag.embedding = embedding # 设置到 tag 对象(内存中)
self.embedding_cache[tag.tag_name] = embedding # 同时缓存
generated_count += 1
logger.debug(f"'{tag.tag_name}' embedding生成成功")
logger.debug(f"'{tag.tag_name}' embedding动态生成成功并缓存到内存")
else:
failed_count += 1
logger.warning(f"'{tag.tag_name}' embedding生成失败")
@@ -322,12 +355,12 @@ class BotInterestManager:
interests.last_updated = datetime.now()
logger.info("=" * 50)
logger.info("✅ Embedding生成完成!")
logger.info("✅ Embedding动态生成完成(仅存储在内存中)!")
logger.info(f"📊 总标签数: {total_tags}")
logger.info(f"💾 缓存命中: {cached_count}")
logger.info(f"💾 内存缓存命中: {cached_count}")
logger.info(f"🆕 新生成: {generated_count}")
logger.info(f"❌ 失败: {failed_count}")
logger.info(f"🗃️ 缓存大小: {len(self.embedding_cache)}")
logger.info(f"🗃️ 内存缓存大小: {len(self.embedding_cache)}")
logger.info("=" * 50)
async def _get_embedding(self, text: str) -> list[float]:
@@ -421,7 +454,19 @@ class BotInterestManager:
async def calculate_interest_match(
self, message_text: str, keywords: list[str] | None = None
) -> InterestMatchResult:
"""计算消息与机器人兴趣的匹配度"""
"""计算消息与机器人兴趣的匹配度(优化版 - 标签扩展策略)
核心优化:将短标签扩展为完整的描述性句子,解决语义粒度不匹配问题
原问题:
- 消息: "今天天气不错" (完整句子)
- 标签: "蹭人治愈" (2-4字短语)
- 结果: 误匹配,因为短标签的 embedding 过于抽象
解决方案:
- 标签扩展: "蹭人治愈" -> "表达亲近、寻求安慰、撒娇的内容"
- 现在是: 句子 vs 句子,匹配更准确
"""
if not self.current_interests or not self._initialized:
raise RuntimeError("❌ 兴趣标签系统未初始化")
@@ -442,13 +487,13 @@ class BotInterestManager:
message_embedding = await self._get_embedding(message_text)
logger.debug(f"消息 embedding 生成成功, 维度: {len(message_embedding)}")
# 计算与每个兴趣标签的相似度
# 计算与每个兴趣标签的相似度(使用扩展标签)
match_count = 0
high_similarity_count = 0
medium_similarity_count = 0
low_similarity_count = 0
# 分级相似度阈值
# 分级相似度阈值 - 优化后可以提高阈值,因为匹配更准确了
affinity_config = global_config.affinity_flow
high_threshold = affinity_config.high_match_interest_threshold
medium_threshold = affinity_config.medium_match_interest_threshold
@@ -458,27 +503,45 @@ class BotInterestManager:
for tag in active_tags:
if tag.embedding:
similarity = self._calculate_cosine_similarity(message_embedding, tag.embedding)
# 🔧 优化:获取扩展标签的 embedding(带缓存)
expanded_embedding = await self._get_expanded_tag_embedding(tag.tag_name)
if expanded_embedding:
# 使用扩展标签的 embedding 进行匹配
similarity = self._calculate_cosine_similarity(message_embedding, expanded_embedding)
# 同时计算原始标签的相似度作为参考
original_similarity = self._calculate_cosine_similarity(message_embedding, tag.embedding)
# 混合策略扩展标签权重更高70%原始标签作为补充30%
# 这样可以兼顾准确性(扩展)和灵活性(原始)
final_similarity = similarity * 0.7 + original_similarity * 0.3
logger.debug(f"标签'{tag.tag_name}': 原始={original_similarity:.3f}, 扩展={similarity:.3f}, 最终={final_similarity:.3f}")
else:
# 如果扩展 embedding 获取失败,使用原始 embedding
final_similarity = self._calculate_cosine_similarity(message_embedding, tag.embedding)
logger.debug(f"标签'{tag.tag_name}': 使用原始相似度={final_similarity:.3f}")
# 基础加权分数
weighted_score = similarity * tag.weight
weighted_score = final_similarity * tag.weight
# 根据相似度等级应用不同的加成
if similarity > high_threshold:
if final_similarity > high_threshold:
# 高相似度:强加成
enhanced_score = weighted_score * affinity_config.high_match_keyword_multiplier
match_count += 1
high_similarity_count += 1
result.add_match(tag.tag_name, enhanced_score, [tag.tag_name])
elif similarity > medium_threshold:
elif final_similarity > medium_threshold:
# 中相似度:中等加成
enhanced_score = weighted_score * affinity_config.medium_match_keyword_multiplier
match_count += 1
medium_similarity_count += 1
result.add_match(tag.tag_name, enhanced_score, [tag.tag_name])
elif similarity > low_threshold:
elif final_similarity > low_threshold:
# 低相似度:轻微加成
enhanced_score = weighted_score * affinity_config.low_match_keyword_multiplier
match_count += 1
@@ -520,6 +583,121 @@ class BotInterestManager:
)
return result
async def _get_expanded_tag_embedding(self, tag_name: str) -> list[float] | None:
"""获取扩展标签的 embedding带缓存
优先使用缓存,如果没有则生成并缓存
"""
# 检查缓存
if tag_name in self.expanded_embedding_cache:
return self.expanded_embedding_cache[tag_name]
# 扩展标签
expanded_tag = self._expand_tag_for_matching(tag_name)
# 生成 embedding
try:
embedding = await self._get_embedding(expanded_tag)
if embedding:
# 缓存结果
self.expanded_tag_cache[tag_name] = expanded_tag
self.expanded_embedding_cache[tag_name] = embedding
logger.debug(f"✅ 为标签'{tag_name}'生成并缓存扩展embedding: {expanded_tag[:50]}...")
return embedding
except Exception as e:
logger.warning(f"为标签'{tag_name}'生成扩展embedding失败: {e}")
return None
def _expand_tag_for_matching(self, tag_name: str) -> str:
"""将短标签扩展为完整的描述性句子
这是解决"标签太短导致误匹配"的核心方法
策略:
1. 优先使用 LLM 生成的 expanded 字段(最准确)
2. 如果没有,使用基于规则的回退方案
3. 最后使用通用模板
示例:
- "Python" + expanded -> "讨论Python编程语言、写Python代码、Python脚本开发、Python技术问题"
- "蹭人治愈" + expanded -> "想要获得安慰、寻求温暖关怀、撒娇卖萌、表达亲昵、求抱抱求陪伴的对话"
"""
# 使用缓存
if tag_name in self.expanded_tag_cache:
return self.expanded_tag_cache[tag_name]
# 🎯 优先策略:使用 LLM 生成的 expanded 字段
if self.current_interests:
for tag in self.current_interests.interest_tags:
if tag.tag_name == tag_name and tag.expanded:
logger.debug(f"✅ 使用LLM生成的扩展描述: {tag_name} -> {tag.expanded[:50]}...")
self.expanded_tag_cache[tag_name] = tag.expanded
return tag.expanded
# 🔧 回退策略基于规则的扩展用于兼容旧数据或LLM未生成扩展的情况
logger.debug(f"⚠️ 标签'{tag_name}'没有LLM扩展描述使用规则回退方案")
tag_lower = tag_name.lower()
# 技术编程类标签(具体化描述)
if any(word in tag_lower for word in ['python', 'java', 'code', '代码', '编程', '脚本', '算法', '开发']):
if 'python' in tag_lower:
return f"讨论Python编程语言、写Python代码、Python脚本开发、Python技术问题"
elif '算法' in tag_lower:
return f"讨论算法题目、数据结构、编程竞赛、刷LeetCode题目、代码优化"
elif '代码' in tag_lower or '被窝' in tag_lower:
return f"讨论写代码、编程开发、代码实现、技术方案、编程技巧"
else:
return f"讨论编程开发、软件技术、代码编写、技术实现"
# 情感表达类标签(具体化为真实对话场景)
elif any(word in tag_lower for word in ['治愈', '撒娇', '安慰', '呼噜', '', '卖萌']):
return f"想要获得安慰、寻求温暖关怀、撒娇卖萌、表达亲昵、求抱抱求陪伴的对话"
# 游戏娱乐类标签(具体游戏场景)
elif any(word in tag_lower for word in ['游戏', '网游', 'mmo', '', '']):
return f"讨论网络游戏、MMO游戏、游戏玩法、组队打副本、游戏攻略心得"
# 动漫影视类标签(具体观看行为)
elif any(word in tag_lower for word in ['', '动漫', '视频', 'b站', '弹幕', '追番', '云新番']):
# 特别处理"云新番" - 它的意思是在网上看新动漫,不是泛泛的"新东西"
if '' in tag_lower or '新番' in tag_lower:
return f"讨论正在播出的新动漫、新番剧集、动漫剧情、追番心得、动漫角色"
else:
return f"讨论动漫番剧内容、B站视频、弹幕文化、追番体验"
# 社交平台类标签(具体平台行为)
elif any(word in tag_lower for word in ['小红书', '贴吧', '论坛', '社区', '吃瓜', '八卦']):
if '吃瓜' in tag_lower:
return f"聊八卦爆料、吃瓜看热闹、网络热点事件、社交平台热议话题"
else:
return f"讨论社交平台内容、网络社区话题、论坛讨论、分享生活"
# 生活日常类标签(具体萌宠场景)
elif any(word in tag_lower for word in ['', '宠物', '尾巴', '耳朵', '毛绒']):
return f"讨论猫咪宠物、晒猫分享、萌宠日常、可爱猫猫、养猫心得"
# 状态心情类标签(具体情绪状态)
elif any(word in tag_lower for word in ['社恐', '隐身', '流浪', '深夜', '被窝']):
if '社恐' in tag_lower:
return f"表达社交焦虑、不想见人、想躲起来、害怕社交的心情"
elif '深夜' in tag_lower:
return f"深夜睡不着、熬夜、夜猫子、深夜思考人生的对话"
else:
return f"表达当前心情状态、个人感受、生活状态"
# 物品装备类标签(具体使用场景)
elif any(word in tag_lower for word in ['键盘', '耳机', '装备', '设备']):
return f"讨论键盘耳机装备、数码产品、使用体验、装备推荐评测"
# 互动关系类标签
elif any(word in tag_lower for word in ['拾风', '互怼', '互动']):
return f"聊天互动、开玩笑、友好互怼、日常对话交流"
# 默认:尽量具体化
else:
return f"明确讨论{tag_name}这个特定主题的具体内容和相关话题"
def _calculate_keyword_match_bonus(self, keywords: list[str], matched_tags: list[str]) -> dict[str, float]:
"""计算关键词直接匹配奖励"""
if not keywords or not matched_tags:
@@ -668,11 +846,12 @@ class BotInterestManager:
last_updated=db_interests.last_updated,
)
# 解析兴趣标签
# 解析兴趣标签embedding 从数据库加载后会被忽略,因为我们不再存储它)
for tag_data in tags_data:
tag = BotInterestTag(
tag_name=tag_data.get("tag_name", ""),
weight=tag_data.get("weight", 0.5),
expanded=tag_data.get("expanded"), # 加载扩展描述
created_at=datetime.fromisoformat(
tag_data.get("created_at", datetime.now().isoformat())
),
@@ -680,11 +859,11 @@ class BotInterestManager:
tag_data.get("updated_at", datetime.now().isoformat())
),
is_active=tag_data.get("is_active", True),
embedding=tag_data.get("embedding"),
embedding=None, # 不再从数据库加载 embedding改为动态生成
)
interests.interest_tags.append(tag)
logger.debug(f"成功解析 {len(interests.interest_tags)} 个兴趣标签")
logger.debug(f"成功解析 {len(interests.interest_tags)} 个兴趣标签embedding 将在初始化时动态生成)")
return interests
except (orjson.JSONDecodeError, Exception) as e:
@@ -715,16 +894,17 @@ class BotInterestManager:
from src.common.database.compatibility import get_db_session
from src.common.database.core.models import BotPersonalityInterests as DBBotPersonalityInterests
# 将兴趣标签转换为JSON格式
# 将兴趣标签转换为JSON格式不再保存embedding启动时动态生成
tags_data = []
for tag in interests.interest_tags:
tag_dict = {
"tag_name": tag.tag_name,
"weight": tag.weight,
"expanded": tag.expanded, # 保存扩展描述
"created_at": tag.created_at.isoformat(),
"updated_at": tag.updated_at.isoformat(),
"is_active": tag.is_active,
"embedding": tag.embedding,
# embedding 不再存储到数据库,改为内存缓存
}
tags_data.append(tag_dict)

View File

@@ -196,10 +196,18 @@ async def _process_single_segment(segment: Seg, state: dict, message_info: BaseM
state["is_emoji"] = False
state["is_video"] = False
state["is_at"] = True
# 处理at消息格式为"昵称:QQ号"
if isinstance(segment.data, str) and ":" in segment.data:
nickname, qq_id = segment.data.split(":", 1)
return f"@{nickname}"
# 处理at消息格式为"@<昵称:QQ号>"
if isinstance(segment.data, str):
if ":" in segment.data:
# 标准格式: "昵称:QQ号"
nickname, qq_id = segment.data.split(":", 1)
result = f"@<{nickname}:{qq_id}>"
logger.info(f"[at处理] 标准格式 -> {result}")
return result
else:
logger.warning(f"[at处理] 无法解析格式: '{segment.data}'")
return f"@{segment.data}"
logger.warning(f"[at处理] 数据类型异常: {type(segment.data)}")
return f"@{segment.data}" if isinstance(segment.data, str) else "@未知用户"
elif segment.type == "image":

View File

@@ -49,23 +49,22 @@ def is_mentioned_bot_in_message(message) -> tuple[bool, float]:
message: DatabaseMessages 消息对象
Returns:
tuple[bool, float]: (是否提及, 提及概率)
tuple[bool, float]: (是否提及, 提及类型)
提及类型: 0=未提及, 1=弱提及(文本匹配), 2=强提及(@/回复/私聊)
"""
keywords = [global_config.bot.nickname]
nicknames = global_config.bot.alias_names
reply_probability = 0.0
is_at = False
is_mentioned = False
mention_type = 0 # 0=未提及, 1=弱提及, 2=强提及
# 检查 is_mentioned 属性
# 检查 is_mentioned 属性(保持向后兼容)
mentioned_attr = getattr(message, "is_mentioned", None)
if mentioned_attr is not None:
try:
return bool(mentioned_attr), float(mentioned_attr)
# 如果已有 is_mentioned直接返回假设是强提及
return bool(mentioned_attr), 2.0 if mentioned_attr else 0.0
except (ValueError, TypeError):
pass
# 检查 additional_config
# 检查 additional_config(保持向后兼容)
additional_config = None
# DatabaseMessages: additional_config 是 JSON 字符串
@@ -78,62 +77,66 @@ def is_mentioned_bot_in_message(message) -> tuple[bool, float]:
if additional_config and additional_config.get("is_mentioned") is not None:
try:
reply_probability = float(additional_config.get("is_mentioned")) # type: ignore
is_mentioned = True
return is_mentioned, reply_probability
mentioned_value = float(additional_config.get("is_mentioned")) # type: ignore
# 如果配置中有提及值,假设是强提及
return True, 2.0 if mentioned_value > 0 else 0.0
except Exception as e:
logger.warning(str(e))
logger.warning(
f"消息中包含不合理的设置 is_mentioned: {additional_config.get('is_mentioned')}"
)
# 检查消息文本内容
processed_text = message.processed_plain_text or ""
if global_config.bot.nickname in processed_text:
is_mentioned = True
for alias_name in global_config.bot.alias_names:
if alias_name in processed_text:
is_mentioned = True
# 判断是否被@
if re.search(rf"@<(.+?):{global_config.bot.qq_account}>", message.processed_plain_text):
# 1. 判断是否为私聊(强提及)
group_info = getattr(message, "group_info", None)
if not group_info or not getattr(group_info, "group_id", None):
is_private = True
mention_type = 2
logger.debug("检测到私聊消息 - 强提及")
# 2. 判断是否被@(强提及)
if re.search(rf"@<(.+?):{global_config.bot.qq_account}>", processed_text):
is_at = True
is_mentioned = True
# print(f"message.processed_plain_text: {message.processed_plain_text}")
# print(f"is_mentioned: {is_mentioned}")
# print(f"is_at: {is_at}")
if is_at and global_config.chat.at_bot_inevitable_reply:
reply_probability = 1.0
logger.debug("被@回复概率设置为100%")
else:
if not is_mentioned:
# 判断是否被回复
if re.match(
rf"\[回复 (.+?)\({global_config.bot.qq_account!s}\)(.+?)\],说:", message.processed_plain_text
) or re.match(
rf"\[回复<(.+?)(?=:{global_config.bot.qq_account!s}>)\:{global_config.bot.qq_account!s}>(.+?)\],说:",
message.processed_plain_text,
):
is_mentioned = True
else:
# 判断内容中是否被提及
message_content = re.sub(r"@(.+?)(\d+)", "", message.processed_plain_text)
message_content = re.sub(r"@<(.+?)(?=:(\d+))\:(\d+)>", "", message_content)
message_content = re.sub(r"\[回复 (.+?)\(((\d+)|未知id)\)(.+?)\],说:", "", message_content)
message_content = re.sub(r"\[回复<(.+?)(?=:(\d+))\:(\d+)>(.+?)\],说:", "", message_content)
for keyword in keywords:
if keyword in message_content:
is_mentioned = True
for nickname in nicknames:
if nickname in message_content:
is_mentioned = True
if is_mentioned and global_config.chat.mentioned_bot_inevitable_reply:
reply_probability = 1.0
logger.debug("被提及回复概率设置为100%")
return is_mentioned, reply_probability
mention_type = 2
logger.debug("检测到@提及 - 强提及")
# 3. 判断是否被回复(强提及)
if re.match(
rf"\[回复 (.+?)\({global_config.bot.qq_account!s}\)(.+?)\],说:", processed_text
) or re.match(
rf"\[回复<(.+?)(?=:{global_config.bot.qq_account!s}>)\:{global_config.bot.qq_account!s}>(.+?)\],说:",
processed_text,
):
is_replied = True
mention_type = 2
logger.debug("检测到回复消息 - 强提及")
# 4. 判断文本中是否提及bot名字或别名弱提及
if mention_type == 0: # 只有在没有强提及时才检查弱提及
# 移除@和回复标记后再检查
message_content = re.sub(r"@(.+?)(\d+)", "", processed_text)
message_content = re.sub(r"@<(.+?)(?=:(\d+))\:(\d+)>", "", message_content)
message_content = re.sub(r"\[回复 (.+?)\(((\d+)|未知id)\)(.+?)\],说:", "", message_content)
message_content = re.sub(r"\[回复<(.+?)(?=:(\d+))\:(\d+)>(.+?)\],说:", "", message_content)
# 检查bot主名字
if global_config.bot.nickname in message_content:
is_text_mentioned = True
mention_type = 1
logger.debug(f"检测到文本提及bot主名字 '{global_config.bot.nickname}' - 弱提及")
# 如果主名字没匹配,再检查别名
elif nicknames:
for alias_name in nicknames:
if alias_name in message_content:
is_text_mentioned = True
mention_type = 1
logger.debug(f"检测到文本提及bot别名 '{alias_name}' - 弱提及")
break
# 返回结果
is_mentioned = mention_type > 0
return is_mentioned, float(mention_type)
async def get_embedding(text, request_type="embedding") -> list[float] | None:
"""获取文本的embedding向量"""

View File

@@ -16,6 +16,7 @@ class BotInterestTag(BaseDataModel):
tag_name: str
weight: float = 1.0 # 权重,表示对这个兴趣的喜好程度 (0.0-1.0)
expanded: str | None = None # 标签的扩展描述,用于更精准的语义匹配
embedding: list[float] | None = None # 标签的embedding向量
created_at: datetime = field(default_factory=datetime.now)
updated_at: datetime = field(default_factory=datetime.now)
@@ -26,6 +27,7 @@ class BotInterestTag(BaseDataModel):
return {
"tag_name": self.tag_name,
"weight": self.weight,
"expanded": self.expanded,
"embedding": self.embedding,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat(),
@@ -38,6 +40,7 @@ class BotInterestTag(BaseDataModel):
return cls(
tag_name=data["tag_name"],
weight=data.get("weight", 1.0),
expanded=data.get("expanded"),
embedding=data.get("embedding"),
created_at=datetime.fromisoformat(data["created_at"]) if data.get("created_at") else datetime.now(),
updated_at=datetime.fromisoformat(data["updated_at"]) if data.get("updated_at") else datetime.now(),

View File

@@ -703,6 +703,12 @@ class AffinityFlowConfig(ValidatedConfigBase):
reply_cooldown_reduction: int = Field(default=2, description="回复后减少的不回复计数")
max_no_reply_count: int = Field(default=5, description="最大不回复计数次数")
# 回复后连续对话机制参数
enable_post_reply_boost: bool = Field(default=True, description="是否启用回复后阈值降低机制使bot在回复后更容易进行连续对话")
post_reply_threshold_reduction: float = Field(default=0.15, description="回复后初始阈值降低值建议0.1-0.2")
post_reply_boost_max_count: int = Field(default=3, description="回复后阈值降低的最大持续次数建议2-5")
post_reply_boost_decay_rate: float = Field(default=0.5, description="每次回复后阈值降低衰减率0-1建议0.3-0.7")
# 综合评分权重
keyword_match_weight: float = Field(default=0.4, description="兴趣关键词匹配度权重")
mention_bot_weight: float = Field(default=0.3, description="提及bot分数权重")
@@ -710,7 +716,9 @@ class AffinityFlowConfig(ValidatedConfigBase):
# 提及bot相关参数
mention_bot_adjustment_threshold: float = Field(default=0.3, description="提及bot后的调整阈值")
mention_bot_interest_score: float = Field(default=0.6, description="提及bot的兴趣分")
mention_bot_interest_score: float = Field(default=0.6, description="提及bot的兴趣分已弃用改用strong/weak_mention")
strong_mention_interest_score: float = Field(default=2.5, description="强提及的兴趣分(被@、被回复、私聊)")
weak_mention_interest_score: float = Field(default=1.5, description="弱提及的兴趣分文本匹配bot名字或别名")
base_relationship_score: float = Field(default=0.5, description="基础人物关系分")
# 关系追踪系统参数

View File

@@ -33,9 +33,14 @@ class Individuality:
personality_side = global_config.personality.personality_side
identity = global_config.personality.identity
person_info_manager = get_person_info_manager()
self.bot_person_id = person_info_manager.get_person_id("system", "bot_id")
# 基于人设文本生成 personality_id使用 MD5 hash
# 这样当人设发生变化时会自动生成新的 ID触发重新生成兴趣标签
personality_hash, _ = self._get_config_hash(bot_nickname, personality_core, personality_side, identity)
self.bot_person_id = personality_hash
self.name = bot_nickname
logger.info(f"生成的 personality_id: {self.bot_person_id[:16]}... (基于人设文本 hash)")
person_info_manager = get_person_info_manager()
# 检查配置变化,如果变化则清空
personality_changed, identity_changed = await self._check_config_and_clear_if_changed(
@@ -72,8 +77,8 @@ class Individuality:
if personality_changed or identity_changed:
logger.info("将清空数据库中原有的关键词缓存")
update_data = {
"platform": "system",
"user_id": "bot_id",
"platform": "personality",
"user_id": self.bot_person_id, # 使用基于人设生成的 ID
"person_name": self.name,
"nickname": self.name,
}
@@ -171,8 +176,8 @@ class Individuality:
if personality_changed or identity_changed:
logger.info("将清空原有的关键词缓存")
update_data = {
"platform": "system",
"user_id": "bot_id",
"platform": "personality",
"user_id": current_personality_hash, # 使用 personality hash 作为 user_id
"person_name": self.name,
"nickname": self.name,
}

View File

@@ -0,0 +1,10 @@
"""
AffinityFlow Chatter 核心模块
包含兴趣度计算器和核心对话处理逻辑
"""
from .affinity_chatter import AffinityChatter
from .affinity_interest_calculator import AffinityInterestCalculator
__all__ = ["AffinityChatter", "AffinityInterestCalculator"]

View File

@@ -15,7 +15,7 @@ from src.common.data_models.message_manager_data_model import StreamContext
from src.common.logger import get_logger
from src.plugin_system.base.base_chatter import BaseChatter
from src.plugin_system.base.component_types import ChatType
from src.plugins.built_in.affinity_flow_chatter.planner import ChatterActionPlanner
from src.plugins.built_in.affinity_flow_chatter.planner.planner import ChatterActionPlanner
logger = get_logger("affinity_chatter")

View File

@@ -3,6 +3,7 @@
基于原有的 AffinityFlow 兴趣度评分系统提供标准化的兴趣值计算功能
"""
import asyncio
import time
from typing import TYPE_CHECKING
@@ -60,10 +61,18 @@ class AffinityInterestCalculator(BaseInterestCalculator):
# 用户关系数据缓存
self.user_relationships: dict[str, float] = {} # user_id -> relationship_score
# 回复后阈值降低机制
self.enable_post_reply_boost = affinity_config.enable_post_reply_boost
self.post_reply_boost_remaining = 0 # 剩余的回复后降低次数
self.post_reply_threshold_reduction = affinity_config.post_reply_threshold_reduction
self.post_reply_boost_max_count = affinity_config.post_reply_boost_max_count
self.post_reply_boost_decay_rate = affinity_config.post_reply_boost_decay_rate
logger.info("[Affinity兴趣计算器] 初始化完成:")
logger.info(f" - 权重配置: {self.score_weights}")
logger.info(f" - 回复阈值: {self.reply_threshold}")
logger.info(f" - 智能匹配: {self.use_smart_matching}")
logger.info(f" - 回复后连续对话: {self.enable_post_reply_boost}")
# 检查 bot_interest_manager 状态
try:
@@ -120,22 +129,23 @@ class AffinityInterestCalculator(BaseInterestCalculator):
f"{mentioned_score:.3f}*{self.score_weights['mentioned']} = {total_score:.3f}"
)
# 5. 考虑连续不回复的概率提升
adjusted_score = self._apply_no_reply_boost(total_score)
logger.debug(f"[Affinity兴趣计算] 应用不回复提升后: {total_score:.3f}{adjusted_score:.3f}")
# 5. 考虑连续不回复的阈值调整
adjusted_score = total_score
adjusted_reply_threshold, adjusted_action_threshold = self._apply_no_reply_threshold_adjustment()
logger.debug(
f"[Affinity兴趣计算] 连续不回复调整: 回复阈值 {self.reply_threshold:.3f}{adjusted_reply_threshold:.3f}, "
f"动作阈值 {global_config.affinity_flow.non_reply_action_interest_threshold:.3f}{adjusted_action_threshold:.3f}"
)
# 6. 决定是否回复和执行动作
reply_threshold = self.reply_threshold
action_threshold = global_config.affinity_flow.non_reply_action_interest_threshold
should_reply = adjusted_score >= reply_threshold
should_take_action = adjusted_score >= action_threshold
should_reply = adjusted_score >= adjusted_reply_threshold
should_take_action = adjusted_score >= adjusted_action_threshold
logger.debug(
f"[Affinity兴趣计算] 阈值判断: {adjusted_score:.3f} >= 回复阈值:{reply_threshold:.3f}? = {should_reply}"
f"[Affinity兴趣计算] 阈值判断: {adjusted_score:.3f} >= 回复阈值:{adjusted_reply_threshold:.3f}? = {should_reply}"
)
logger.debug(
f"[Affinity兴趣计算] 阈值判断: {adjusted_score:.3f} >= 动作阈值:{action_threshold:.3f}? = {should_take_action}"
f"[Affinity兴趣计算] 阈值判断: {adjusted_score:.3f} >= 动作阈值:{adjusted_action_threshold:.3f}? = {should_take_action}"
)
calculation_time = time.time() - start_time
@@ -162,7 +172,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
)
async def _calculate_interest_match_score(self, content: str, keywords: list[str] | None = None) -> float:
"""计算兴趣匹配度(使用智能兴趣匹配系统)"""
"""计算兴趣匹配度(使用智能兴趣匹配系统,带超时保护"""
# 调试日志:检查各个条件
if not content:
@@ -178,8 +188,11 @@ class AffinityInterestCalculator(BaseInterestCalculator):
logger.debug(f"开始兴趣匹配计算,内容: {content[:50]}...")
try:
# 使用机器人的兴趣标签系统进行智能匹配
match_result = await bot_interest_manager.calculate_interest_match(content, keywords or [])
# 使用机器人的兴趣标签系统进行智能匹配1.5秒超时保护)
match_result = await asyncio.wait_for(
bot_interest_manager.calculate_interest_match(content, keywords or []),
timeout=1.5
)
logger.debug(f"兴趣匹配结果: {match_result}")
if match_result:
@@ -195,6 +208,9 @@ class AffinityInterestCalculator(BaseInterestCalculator):
logger.debug("兴趣匹配返回0.0: match_result为None")
return 0.0
except asyncio.TimeoutError:
logger.warning(f"⏱️ 兴趣匹配计算超时(>1.5秒)返回默认分值0.5以保留其他分数")
return 0.5 # 超时时返回默认分值,避免丢失提及分和关系分
except Exception as e:
logger.warning(f"智能兴趣匹配失败: {e}")
return 0.0
@@ -226,29 +242,78 @@ class AffinityInterestCalculator(BaseInterestCalculator):
return global_config.affinity_flow.base_relationship_score
def _calculate_mentioned_score(self, message: "DatabaseMessages", bot_nickname: str) -> float:
"""计算提及分 - 统一使用配置值,不区分提及方式"""
is_mentioned = getattr(message, "is_mentioned", False)
processed_plain_text = getattr(message, "processed_plain_text", "")
# 判断是否为私聊 - 通过 group_info 对象判断
is_private_chat = not message.group_info # 如果没有group_info则是私聊
logger.debug(f"[提及分计算] is_mentioned={is_mentioned}, is_private_chat={is_private_chat}, group_info={message.group_info}")
# 检查是否被提及(包括文本匹配)
bot_aliases = [bot_nickname, *global_config.bot.alias_names]
is_text_mentioned = any(alias in processed_plain_text for alias in bot_aliases if alias)
# 统一判断:只要提及了机器人(包括@、文本提及、私聊)都返回配置的分值
if is_mentioned or is_text_mentioned or is_private_chat:
logger.debug("[提及分计算] 检测到机器人提及,返回配置分值")
return global_config.affinity_flow.mention_bot_interest_score
"""计算提及分 - 区分提及和弱提及
强提及@被回复私聊: 使用 strong_mention_interest_score
弱提及文本匹配名字/别名: 使用 weak_mention_interest_score
"""
from src.chat.utils.utils import is_mentioned_bot_in_message
# 使用统一的提及检测函数
is_mentioned, mention_type = is_mentioned_bot_in_message(message)
if not is_mentioned:
logger.debug("[提及分计算] 未提及机器人返回0.0")
return 0.0
# mention_type: 0=未提及, 1=弱提及, 2=强提及
if mention_type >= 2:
# 强提及:被@、被回复、私聊
score = global_config.affinity_flow.strong_mention_interest_score
logger.debug(f"[提及分计算] 检测到强提及(@/回复/私聊),返回分值: {score}")
return score
elif mention_type >= 1:
# 弱提及文本匹配bot名字或别名
score = global_config.affinity_flow.weak_mention_interest_score
logger.debug(f"[提及分计算] 检测到弱提及(文本匹配),返回分值: {score}")
return score
else:
logger.debug("[提及分计算] 未提及机器人返回0.0")
return 0.0 # 未提及机器人
return 0.0
def _apply_no_reply_threshold_adjustment(self) -> tuple[float, float]:
"""应用阈值调整(包括连续不回复和回复后降低机制)
Returns:
tuple[float, float]: (调整后的回复阈值, 调整后的动作阈值)
"""
# 基础阈值
base_reply_threshold = self.reply_threshold
base_action_threshold = global_config.affinity_flow.non_reply_action_interest_threshold
total_reduction = 0.0
# 1. 连续不回复的阈值降低
if self.no_reply_count > 0 and self.no_reply_count < self.max_no_reply_count:
no_reply_reduction = self.no_reply_count * self.probability_boost_per_no_reply
total_reduction += no_reply_reduction
logger.debug(f"[阈值调整] 连续不回复降低: {no_reply_reduction:.3f} (计数: {self.no_reply_count})")
# 2. 回复后的阈值降低使bot更容易连续对话
if self.enable_post_reply_boost and self.post_reply_boost_remaining > 0:
# 计算衰减后的降低值
decay_factor = self.post_reply_boost_decay_rate ** (
self.post_reply_boost_max_count - self.post_reply_boost_remaining
)
post_reply_reduction = self.post_reply_threshold_reduction * decay_factor
total_reduction += post_reply_reduction
logger.debug(
f"[阈值调整] 回复后降低: {post_reply_reduction:.3f} "
f"(剩余次数: {self.post_reply_boost_remaining}, 衰减: {decay_factor:.2f})"
)
# 应用总降低量
adjusted_reply_threshold = max(0.0, base_reply_threshold - total_reduction)
adjusted_action_threshold = max(0.0, base_action_threshold - total_reduction)
return adjusted_reply_threshold, adjusted_action_threshold
def _apply_no_reply_boost(self, base_score: float) -> float:
"""应用连续不回复的概率提升"""
"""【已弃用】应用连续不回复的概率提升
注意此方法已被 _apply_no_reply_threshold_adjustment 替代
保留用于向后兼容
"""
if self.no_reply_count > 0 and self.no_reply_count < self.max_no_reply_count:
boost = self.no_reply_count * self.probability_boost_per_no_reply
return min(1.0, base_score + boost)
@@ -315,3 +380,34 @@ class AffinityInterestCalculator(BaseInterestCalculator):
self.no_reply_count = 0
else:
self.no_reply_count = min(self.no_reply_count + 1, self.max_no_reply_count)
def on_reply_sent(self):
"""当机器人发送回复后调用,激活回复后阈值降低机制"""
if self.enable_post_reply_boost:
# 重置回复后降低计数器
self.post_reply_boost_remaining = self.post_reply_boost_max_count
logger.debug(
f"[回复后机制] 激活连续对话模式,阈值将在接下来 {self.post_reply_boost_max_count} 条消息中降低"
)
# 同时重置不回复计数
self.no_reply_count = 0
def on_message_processed(self, replied: bool):
"""消息处理完成后调用,更新各种计数器
Args:
replied: 是否回复了此消息
"""
# 更新不回复计数
self.update_no_reply_count(replied)
# 如果已回复,激活回复后降低机制
if replied:
self.on_reply_sent()
else:
# 如果没有回复,减少回复后降低剩余次数
if self.post_reply_boost_remaining > 0:
self.post_reply_boost_remaining -= 1
logger.debug(
f"[回复后机制] 未回复消息,剩余降低次数: {self.post_reply_boost_remaining}"
)

View File

@@ -0,0 +1,13 @@
"""
AffinityFlow Chatter 规划器模块
包含计划生成、过滤、执行等规划相关功能
"""
from .plan_executor import ChatterPlanExecutor
from .plan_filter import ChatterPlanFilter
from .plan_generator import ChatterPlanGenerator
from .planner import ChatterActionPlanner
from . import planner_prompts
__all__ = ["ChatterActionPlanner", "planner_prompts", "ChatterPlanGenerator", "ChatterPlanFilter", "ChatterPlanExecutor"]

View File

@@ -11,9 +11,9 @@ from src.common.logger import get_logger
from src.config.config import global_config
from src.mood.mood_manager import mood_manager
from src.plugin_system.base.component_types import ChatMode
from src.plugins.built_in.affinity_flow_chatter.plan_executor import ChatterPlanExecutor
from src.plugins.built_in.affinity_flow_chatter.plan_filter import ChatterPlanFilter
from src.plugins.built_in.affinity_flow_chatter.plan_generator import ChatterPlanGenerator
from src.plugins.built_in.affinity_flow_chatter.planner.plan_executor import ChatterPlanExecutor
from src.plugins.built_in.affinity_flow_chatter.planner.plan_filter import ChatterPlanFilter
from src.plugins.built_in.affinity_flow_chatter.planner.plan_generator import ChatterPlanGenerator
if TYPE_CHECKING:
from src.chat.planner_actions.action_manager import ChatterActionManager
@@ -21,7 +21,7 @@ if TYPE_CHECKING:
from src.common.data_models.message_manager_data_model import StreamContext
# 导入提示词模块以确保其被初始化
from src.plugins.built_in.affinity_flow_chatter import planner_prompts # noqa
from src.plugins.built_in.affinity_flow_chatter.planner import planner_prompts
logger = get_logger("planner")

View File

@@ -39,48 +39,48 @@ class AffinityChatterPlugin(BasePlugin):
components: ClassVar = []
try:
# 延迟导入 AffinityChatter
from .affinity_chatter import AffinityChatter
# 延迟导入 AffinityChatter(从 core 子模块)
from .core.affinity_chatter import AffinityChatter
components.append((AffinityChatter.get_chatter_info(), AffinityChatter))
except Exception as e:
logger.error(f"加载 AffinityChatter 时出错: {e}")
try:
# 延迟导入 AffinityInterestCalculator
from .affinity_interest_calculator import AffinityInterestCalculator
# 延迟导入 AffinityInterestCalculator(从 core 子模块)
from .core.affinity_interest_calculator import AffinityInterestCalculator
components.append((AffinityInterestCalculator.get_interest_calculator_info(), AffinityInterestCalculator))
except Exception as e:
logger.error(f"加载 AffinityInterestCalculator 时出错: {e}")
try:
# 延迟导入 UserProfileTool
from .user_profile_tool import UserProfileTool
# 延迟导入 UserProfileTool(从 tools 子模块)
from .tools.user_profile_tool import UserProfileTool
components.append((UserProfileTool.get_tool_info(), UserProfileTool))
except Exception as e:
logger.error(f"加载 UserProfileTool 时出错: {e}")
try:
# 延迟导入 ChatStreamImpressionTool
from .chat_stream_impression_tool import ChatStreamImpressionTool
# 延迟导入 ChatStreamImpressionTool(从 tools 子模块)
from .tools.chat_stream_impression_tool import ChatStreamImpressionTool
components.append((ChatStreamImpressionTool.get_tool_info(), ChatStreamImpressionTool))
except Exception as e:
logger.error(f"加载 ChatStreamImpressionTool 时出错: {e}")
try:
# 延迟导入 ProactiveThinkingReplyHandler
from .proactive_thinking_event import ProactiveThinkingReplyHandler
# 延迟导入 ProactiveThinkingReplyHandler(从 proactive 子模块)
from .proactive.proactive_thinking_event import ProactiveThinkingReplyHandler
components.append((ProactiveThinkingReplyHandler.get_handler_info(), ProactiveThinkingReplyHandler))
except Exception as e:
logger.error(f"加载 ProactiveThinkingReplyHandler 时出错: {e}")
try:
# 延迟导入 ProactiveThinkingMessageHandler
from .proactive_thinking_event import ProactiveThinkingMessageHandler
# 延迟导入 ProactiveThinkingMessageHandler(从 proactive 子模块)
from .proactive.proactive_thinking_event import ProactiveThinkingMessageHandler
components.append((ProactiveThinkingMessageHandler.get_handler_info(), ProactiveThinkingMessageHandler))
except Exception as e:

View File

@@ -0,0 +1,17 @@
"""
AffinityFlow Chatter 主动思考模块
包含主动思考调度器、执行器和事件处理
"""
from .proactive_thinking_event import ProactiveThinkingMessageHandler, ProactiveThinkingReplyHandler
from .proactive_thinking_executor import execute_proactive_thinking
from .proactive_thinking_scheduler import ProactiveThinkingScheduler, proactive_thinking_scheduler
__all__ = [
"ProactiveThinkingReplyHandler",
"ProactiveThinkingMessageHandler",
"execute_proactive_thinking",
"ProactiveThinkingScheduler",
"proactive_thinking_scheduler",
]

View File

@@ -9,7 +9,7 @@ from typing import ClassVar
from src.common.logger import get_logger
from src.plugin_system import BaseEventHandler, EventType
from src.plugin_system.base.base_event import HandlerResult
from src.plugins.built_in.affinity_flow_chatter.proactive_thinking_scheduler import (
from src.plugins.built_in.affinity_flow_chatter.proactive.proactive_thinking_scheduler import (
proactive_thinking_scheduler,
)

View File

@@ -226,7 +226,7 @@ class ProactiveThinkingPlanner:
# 5. 获取上次决策
last_decision = None
try:
from src.plugins.built_in.affinity_flow_chatter.proactive_thinking_scheduler import (
from src.plugins.built_in.affinity_flow_chatter.proactive.proactive_thinking_scheduler import (
proactive_thinking_scheduler,
)
@@ -520,7 +520,7 @@ async def execute_proactive_thinking(stream_id: str):
stream_id: 聊天流ID
"""
from src.config.config import global_config
from src.plugins.built_in.affinity_flow_chatter.proactive_thinking_scheduler import (
from src.plugins.built_in.affinity_flow_chatter.proactive.proactive_thinking_scheduler import (
proactive_thinking_scheduler,
)

View File

@@ -256,7 +256,7 @@ class ProactiveThinkingScheduler:
logger.debug(f"[调度器] 触发间隔={interval_seconds}秒 ({interval_seconds / 60:.1f}分钟)")
# 导入回调函数(延迟导入避免循环依赖)
from src.plugins.built_in.affinity_flow_chatter.proactive_thinking_executor import (
from src.plugins.built_in.affinity_flow_chatter.proactive.proactive_thinking_executor import (
execute_proactive_thinking,
)

View File

@@ -0,0 +1,10 @@
"""
AffinityFlow Chatter 工具模块
包含各种辅助工具类
"""
from .chat_stream_impression_tool import ChatStreamImpressionTool
from .user_profile_tool import UserProfileTool
__all__ = ["ChatStreamImpressionTool", "UserProfileTool"]

View File

@@ -386,6 +386,9 @@ class NapcatAdapterPlugin(BasePlugin):
return components
async def on_plugin_loaded(self):
# 初始化数据库表
await self._init_database_tables()
# 设置插件配置
message_send_instance.set_plugin_config(self.config)
# 设置chunker的插件配置
@@ -410,3 +413,18 @@ class NapcatAdapterPlugin(BasePlugin):
stream_router.cleanup_interval = config_api.get_plugin_config(self.config, "stream_router.cleanup_interval", 60)
# 设置其他handler的插件配置现在由component_registry在注册时自动设置
async def _init_database_tables(self):
"""初始化插件所需的数据库表"""
try:
from src.common.database.core.engine import get_engine
from .src.database import NapcatBanRecord
engine = await get_engine()
async with engine.begin() as conn:
# 创建 napcat_ban_records 表
await conn.run_sync(NapcatBanRecord.metadata.create_all)
logger.info("Napcat 插件数据库表初始化成功")
except Exception as e:
logger.error(f"Napcat 插件数据库表初始化失败: {e}", exc_info=True)

View File

@@ -35,13 +35,17 @@ class MetaEventHandler:
self_id = message.get("self_id")
self.last_heart_beat = time.time()
logger.info(f"Bot {self_id} 连接成功")
asyncio.create_task(self.check_heartbeat(self_id))
# 不在连接时立即启动心跳检查,等第一个心跳包到达后再启动
elif event_type == MetaEventType.heartbeat:
if message["status"].get("online") and message["status"].get("good"):
if not self._interval_checking:
asyncio.create_task(self.check_heartbeat())
self_id = message.get("self_id")
if not self._interval_checking and self_id:
# 第一次收到心跳包时才启动心跳检查
asyncio.create_task(self.check_heartbeat(self_id))
self.last_heart_beat = time.time()
self.interval = message.get("interval") / 1000
interval = message.get("interval")
if interval:
self.interval = interval / 1000
else:
self_id = message.get("self_id")
logger.warning(f"Bot {self_id} Napcat 端异常!")