This commit is contained in:
tt-P607
2025-12-13 19:38:16 +08:00
55 changed files with 5273 additions and 826 deletions

View File

@@ -4,7 +4,6 @@ import binascii
import hashlib
import io
import json
import json_repair
import os
import random
import re
@@ -12,6 +11,7 @@ import time
import traceback
from typing import Any, Optional, cast
import json_repair
from PIL import Image
from rich.traceback import install
from sqlalchemy import select

View File

@@ -3,7 +3,7 @@ import re
import time
import traceback
from collections import deque
from typing import TYPE_CHECKING, Optional, Any, cast
from typing import TYPE_CHECKING, Any, Optional, cast
import orjson
from sqlalchemy import desc, insert, select, update

View File

@@ -1799,7 +1799,7 @@ class DefaultReplyer:
)
if content:
if not global_config.response_splitter.enable or global_config.response_splitter.split_mode != 'llm':
if not global_config.response_splitter.enable or global_config.response_splitter.split_mode != "llm":
# 移除 [SPLIT] 标记,防止消息被分割
content = content.replace("[SPLIT]", "")

View File

@@ -10,9 +10,8 @@ from datetime import datetime, timedelta
from pathlib import Path
from typing import Any
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.semantic_interest.trainer import SemanticInterestTrainer
from src.common.logger import get_logger
logger = get_logger("semantic_interest.auto_trainer")
@@ -64,7 +63,7 @@ class AutoTrainer:
# 加载缓存的人设状态
self._load_persona_cache()
# 定时任务标志(防止重复启动)
self._scheduled_task_running = False
self._scheduled_task = None
@@ -78,7 +77,7 @@ class AutoTrainer:
"""加载缓存的人设状态"""
if self.persona_cache_file.exists():
try:
with open(self.persona_cache_file, "r", encoding="utf-8") as f:
with open(self.persona_cache_file, encoding="utf-8") as f:
cache = json.load(f)
self.last_persona_hash = cache.get("persona_hash")
last_train_str = cache.get("last_train_time")
@@ -121,7 +120,7 @@ class AutoTrainer:
"personality_side": persona_info.get("personality_side", ""),
"identity": persona_info.get("identity", ""),
}
# 转为JSON并计算哈希
json_str = json.dumps(key_fields, sort_keys=True, ensure_ascii=False)
return hashlib.sha256(json_str.encode()).hexdigest()
@@ -136,17 +135,17 @@ class AutoTrainer:
True 如果人设发生变化
"""
current_hash = self._calculate_persona_hash(persona_info)
if self.last_persona_hash is None:
logger.info("[自动训练器] 首次检测人设")
return True
if current_hash != self.last_persona_hash:
logger.info(f"[自动训练器] 检测到人设变化")
logger.info("[自动训练器] 检测到人设变化")
logger.info(f" - 旧哈希: {self.last_persona_hash[:8]}")
logger.info(f" - 新哈希: {current_hash[:8]}")
return True
return False
def should_train(self, persona_info: dict[str, Any], force: bool = False) -> tuple[bool, str]:
@@ -198,7 +197,7 @@ class AutoTrainer:
"""
# 检查是否需要训练
should_train, reason = self.should_train(persona_info, force)
if not should_train:
logger.debug(f"[自动训练器] {reason},跳过训练")
return False, None
@@ -236,7 +235,7 @@ class AutoTrainer:
# 创建"latest"符号链接
self._create_latest_link(model_path)
logger.info(f"[自动训练器] 训练完成!")
logger.info("[自动训练器] 训练完成!")
logger.info(f" - 模型: {model_path.name}")
logger.info(f" - 准确率: {metrics.get('test_accuracy', 0):.4f}")
@@ -255,18 +254,18 @@ class AutoTrainer:
model_path: 模型文件路径
"""
latest_path = self.model_dir / "semantic_interest_latest.pkl"
try:
# 删除旧链接
if latest_path.exists() or latest_path.is_symlink():
latest_path.unlink()
# 创建新链接Windows 需要管理员权限,使用复制代替)
import shutil
shutil.copy2(model_path, latest_path)
logger.info(f"[自动训练器] 已更新 latest 模型")
logger.info("[自动训练器] 已更新 latest 模型")
except Exception as e:
logger.warning(f"[自动训练器] 创建 latest 链接失败: {e}")
@@ -283,9 +282,9 @@ class AutoTrainer:
"""
# 检查是否已经有任务在运行
if self._scheduled_task_running:
logger.info(f"[自动训练器] 定时任务已在运行,跳过重复启动")
logger.info("[自动训练器] 定时任务已在运行,跳过重复启动")
return
self._scheduled_task_running = True
logger.info(f"[自动训练器] 启动定时训练任务,间隔: {interval_hours}小时")
logger.info(f"[自动训练器] 当前人设哈希: {self._calculate_persona_hash(persona_info)[:8]}")
@@ -294,13 +293,13 @@ class AutoTrainer:
try:
# 检查并训练
trained, model_path = await self.auto_train_if_needed(persona_info)
if trained:
logger.info(f"[自动训练器] 定时训练完成: {model_path}")
# 等待下次检查
await asyncio.sleep(interval_hours * 3600)
except Exception as e:
logger.error(f"[自动训练器] 定时训练出错: {e}")
# 出错后等待较短时间再试
@@ -316,24 +315,24 @@ class AutoTrainer:
模型文件路径,如果不存在则返回 None
"""
persona_hash = self._calculate_persona_hash(persona_info)
# 查找匹配的模型
pattern = f"semantic_interest_auto_{persona_hash[:8]}_*.pkl"
matching_models = list(self.model_dir.glob(pattern))
if matching_models:
# 返回最新的
latest = max(matching_models, key=lambda p: p.stat().st_mtime)
logger.debug(f"[自动训练器] 找到人设模型: {latest.name}")
return latest
# 没有找到,返回 latest
latest_path = self.model_dir / "semantic_interest_latest.pkl"
if latest_path.exists():
logger.debug(f"[自动训练器] 使用 latest 模型")
logger.debug("[自动训练器] 使用 latest 模型")
return latest_path
logger.warning(f"[自动训练器] 未找到可用模型")
logger.warning("[自动训练器] 未找到可用模型")
return None
def cleanup_old_models(self, keep_count: int = 5):
@@ -345,20 +344,20 @@ class AutoTrainer:
try:
# 获取所有自动训练的模型
all_models = list(self.model_dir.glob("semantic_interest_auto_*.pkl"))
if len(all_models) <= keep_count:
return
# 按修改时间排序
all_models.sort(key=lambda p: p.stat().st_mtime, reverse=True)
# 删除旧模型
for old_model in all_models[keep_count:]:
old_model.unlink()
logger.info(f"[自动训练器] 清理旧模型: {old_model.name}")
logger.info(f"[自动训练器] 模型清理完成,保留 {keep_count}")
except Exception as e:
logger.error(f"[自动训练器] 清理模型失败: {e}")

View File

@@ -3,7 +3,6 @@
从数据库采样消息并使用 LLM 进行兴趣度标注
"""
import asyncio
import json
import random
from datetime import datetime, timedelta
@@ -11,7 +10,6 @@ from pathlib import Path
from typing import Any
from src.common.logger import get_logger
from src.config.config import global_config
logger = get_logger("semantic_interest.dataset")
@@ -111,16 +109,16 @@ class DatasetGenerator:
async def initialize(self):
"""初始化 LLM 客户端"""
try:
from src.llm_models.utils_model import LLMRequest
from src.config.config import model_config
from src.llm_models.utils_model import LLMRequest
# 使用 utilities 模型配置(标注更偏工具型)
if hasattr(model_config.model_task_config, 'utils'):
if hasattr(model_config.model_task_config, "utils"):
self.model_client = LLMRequest(
model_set=model_config.model_task_config.utils,
request_type="semantic_annotation"
)
logger.info(f"数据集生成器初始化完成,使用 utils 模型")
logger.info("数据集生成器初始化完成,使用 utils 模型")
else:
logger.error("未找到 utils 模型配置")
self.model_client = None
@@ -149,9 +147,9 @@ class DatasetGenerator:
Returns:
消息样本列表
"""
from src.common.database.api.query import QueryBuilder
from src.common.database.core.models import Messages
from sqlalchemy import func, or_
logger.info(f"开始采样消息,时间范围: 最近 {days} 天,目标数量: {max_samples}")
@@ -174,14 +172,14 @@ class DatasetGenerator:
# 查询条件
cutoff_time = datetime.now() - timedelta(days=days)
cutoff_ts = cutoff_time.timestamp()
# 优化策略:为了过滤掉长度不足的消息,预取 max_samples * 1.5 条
# 这样可以在保证足够样本的同时减少查询量
prefetch_limit = int(max_samples * 1.5)
# 构建优化查询:在数据库层面限制数量并按时间倒序(最新消息优先)
query_builder = QueryBuilder(Messages)
# 过滤条件:时间范围 + 消息文本不为空
messages = await query_builder.filter(
time__gte=cutoff_ts,
@@ -254,43 +252,43 @@ class DatasetGenerator:
await self.initialize()
logger.info(f"开始生成初始关键词数据集,温度={temperature},迭代{num_iterations}")
# 构造人格描述
persona_desc = self._format_persona_info(persona_info)
# 构造提示词
prompt = self.KEYWORD_GENERATION_PROMPT.format(
persona_info=persona_desc,
)
all_keywords_data = []
# 重复生成多次
for iteration in range(num_iterations):
try:
if not self.model_client:
logger.warning("LLM 客户端未初始化,跳过关键词生成")
break
logger.info(f"{iteration + 1}/{num_iterations} 次生成关键词...")
# 调用 LLM使用较高温度
response = await self.model_client.generate_response_async(
prompt=prompt,
max_tokens=1000, # 关键词列表需要较多token
temperature=temperature,
)
# 解析响应generate_response_async 返回元组)
response_text = response[0] if isinstance(response, tuple) else response
keywords_data = self._parse_keywords_response(response_text)
if keywords_data:
interested = keywords_data.get("interested", [])
not_interested = keywords_data.get("not_interested", [])
logger.info(f" 生成 {len(interested)} 个感兴趣关键词,{len(not_interested)} 个不感兴趣关键词")
# 转换为训练格式(标签 1 表示感兴趣,-1 表示不感兴趣)
for keyword in interested:
if keyword and keyword.strip():
@@ -300,7 +298,7 @@ class DatasetGenerator:
"source": "llm_generated_initial",
"iteration": iteration + 1,
})
for keyword in not_interested:
if keyword and keyword.strip():
all_keywords_data.append({
@@ -311,21 +309,21 @@ class DatasetGenerator:
})
else:
logger.warning(f"{iteration + 1} 次生成失败,未能解析关键词")
except Exception as e:
logger.error(f"{iteration + 1} 次关键词生成失败: {e}")
import traceback
traceback.print_exc()
logger.info(f"初始关键词数据集生成完成,共 {len(all_keywords_data)} 条(不去重)")
# 统计标签分布
label_counts = {}
for item in all_keywords_data:
label = item["label"]
label_counts[label] = label_counts.get(label, 0) + 1
logger.info(f"标签分布: {label_counts}")
return all_keywords_data
def _parse_keywords_response(self, response: str) -> dict | None:
@@ -344,20 +342,20 @@ class DatasetGenerator:
response = response.split("```json")[1].split("```")[0].strip()
elif "```" in response:
response = response.split("```")[1].split("```")[0].strip()
# 解析JSON
import json_repair
response = json_repair.repair_json(response)
data = json.loads(response)
# 验证格式
if isinstance(data, dict) and "interested" in data and "not_interested" in data:
if isinstance(data["interested"], list) and isinstance(data["not_interested"], list):
return data
logger.warning(f"关键词响应格式不正确: {data}")
return None
except json.JSONDecodeError as e:
logger.error(f"解析关键词JSON失败: {e}")
logger.debug(f"响应内容: {response}")
@@ -437,10 +435,10 @@ class DatasetGenerator:
for i in range(0, len(messages), batch_size):
batch = messages[i : i + batch_size]
# 批量标注一次LLM请求处理多条消息
labels = await self._annotate_batch_llm(batch, persona_info)
# 保存结果
for msg, label in zip(batch, labels):
annotated_data.append({
@@ -632,7 +630,7 @@ class DatasetGenerator:
# 提取JSON内容
import re
json_match = re.search(r'```json\s*({.*?})\s*```', response, re.DOTALL)
json_match = re.search(r"```json\s*({.*?})\s*```", response, re.DOTALL)
if json_match:
json_str = json_match.group(1)
else:
@@ -642,7 +640,7 @@ class DatasetGenerator:
# 解析JSON
labels_json = json_repair.repair_json(json_str)
labels_dict = json.loads(labels_json) # 验证是否为有效JSON
# 转换为列表
labels = []
for i in range(1, expected_count + 1):
@@ -703,7 +701,7 @@ class DatasetGenerator:
Returns:
(文本列表, 标签列表)
"""
with open(path, "r", encoding="utf-8") as f:
with open(path, encoding="utf-8") as f:
data = json.load(f)
texts = [item["message_text"] for item in data]
@@ -770,7 +768,7 @@ async def generate_training_dataset(
logger.info("=" * 60)
logger.info("步骤 3/3: LLM 标注真实消息")
logger.info("=" * 60)
# 注意:不保存到文件,返回标注后的数据
annotated_messages = await generator.annotate_batch(
messages=messages,
@@ -783,21 +781,21 @@ async def generate_training_dataset(
logger.info("=" * 60)
logger.info("步骤 4/4: 合并数据集")
logger.info("=" * 60)
# 合并初始关键词和标注后的消息(不去重,保持所有重复项)
combined_dataset = []
# 添加初始关键词数据
if initial_keywords_data:
combined_dataset.extend(initial_keywords_data)
logger.info(f" + 初始关键词: {len(initial_keywords_data)}")
# 添加标注后的消息
combined_dataset.extend(annotated_messages)
logger.info(f" + 标注消息: {len(annotated_messages)}")
logger.info(f"✓ 合并后总计: {len(combined_dataset)} 条(不去重)")
# 统计标签分布
label_counts = {}
for item in combined_dataset:
@@ -809,7 +807,7 @@ async def generate_training_dataset(
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
json.dump(combined_dataset, f, ensure_ascii=False, indent=2)
logger.info("=" * 60)
logger.info(f"✓ 训练数据集已保存: {output_path}")
logger.info("=" * 60)

View File

@@ -3,7 +3,6 @@
使用字符级 n-gram 提取中文消息的 TF-IDF 特征
"""
from pathlib import Path
from sklearn.feature_extraction.text import TfidfVectorizer
@@ -70,10 +69,10 @@ class TfidfFeatureExtractor:
logger.info(f"开始训练 TF-IDF 向量化器,样本数: {len(texts)}")
self.vectorizer.fit(texts)
self.is_fitted = True
vocab_size = len(self.vectorizer.vocabulary_)
logger.info(f"TF-IDF 向量化器训练完成,词表大小: {vocab_size}")
return self
def transform(self, texts: list[str]):
@@ -87,7 +86,7 @@ class TfidfFeatureExtractor:
"""
if not self.is_fitted:
raise ValueError("向量化器尚未训练,请先调用 fit() 方法")
return self.vectorizer.transform(texts)
def fit_transform(self, texts: list[str]):
@@ -102,10 +101,10 @@ class TfidfFeatureExtractor:
logger.info(f"开始训练并转换 TF-IDF 向量,样本数: {len(texts)}")
result = self.vectorizer.fit_transform(texts)
self.is_fitted = True
vocab_size = len(self.vectorizer.vocabulary_)
logger.info(f"TF-IDF 向量化完成,词表大小: {vocab_size}")
return result
def get_feature_names(self) -> list[str]:
@@ -116,7 +115,7 @@ class TfidfFeatureExtractor:
"""
if not self.is_fitted:
raise ValueError("向量化器尚未训练")
return self.vectorizer.get_feature_names_out().tolist()
def get_vocabulary_size(self) -> int:

View File

@@ -4,17 +4,15 @@
"""
import time
from pathlib import Path
from typing import Any
import joblib
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from src.common.logger import get_logger
from src.chat.semantic_interest.features_tfidf import TfidfFeatureExtractor
from src.common.logger import get_logger
logger = get_logger("semantic_interest.model")
@@ -173,12 +171,12 @@ class SemanticInterestModel:
# 确保类别顺序为 [-1, 0, 1]
classes = self.clf.classes_
if not np.array_equal(classes, [-1, 0, 1]):
# 需要重新排序
sorted_proba = np.zeros_like(proba)
# 需要重排/补齐(即使是二分类,也保证输出 3 列)
sorted_proba = np.zeros((proba.shape[0], 3), dtype=proba.dtype)
for i, cls in enumerate([-1, 0, 1]):
idx = np.where(classes == cls)[0]
if len(idx) > 0:
sorted_proba[:, i] = proba[:, idx[0]]
sorted_proba[:, i] = proba[:, int(idx[0])]
return sorted_proba
return proba

View File

@@ -16,7 +16,7 @@ from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable
from typing import Any
import numpy as np
@@ -58,16 +58,16 @@ class FastScorerConfig:
analyzer: str = "char"
ngram_range: tuple[int, int] = (2, 4)
lowercase: bool = True
# 权重剪枝阈值(绝对值小于此值的权重视为 0
weight_prune_threshold: float = 1e-4
# 只保留 top-k 权重0 表示不限制)
top_k_weights: int = 0
# sigmoid 缩放因子
sigmoid_alpha: float = 1.0
# 评分超时(秒)
score_timeout: float = 2.0
@@ -88,30 +88,35 @@ class FastScorer:
3. 查表 w'_i累加求和
4. sigmoid 转 [0, 1]
"""
def __init__(self, config: FastScorerConfig | None = None):
"""初始化快速评分器"""
self.config = config or FastScorerConfig()
# 融合后的权重字典: {token: combined_weight}
# 对于三分类,我们计算 z_interest = z_pos - z_neg
# 所以 combined_weight = (w_pos - w_neg) * idf
self.token_weights: dict[str, float] = {}
# 偏置项: bias_pos - bias_neg
self.bias: float = 0.0
# 输出变换interest = output_bias + output_scale * sigmoid(z)
# 用于兼容二分类(缺少中立/负类)等情况
self.output_bias: float = 0.0
self.output_scale: float = 1.0
# 元信息
self.meta: dict[str, Any] = {}
self.is_loaded = False
# 统计
self.total_scores = 0
self.total_time = 0.0
# n-gram 正则(预编译)
self._tokenize_pattern = re.compile(r'\s+')
self._tokenize_pattern = re.compile(r"\s+")
@classmethod
def from_sklearn_model(
cls,
@@ -132,47 +137,92 @@ class FastScorer:
scorer = cls(config)
scorer._extract_weights(vectorizer, model)
return scorer
def _extract_weights(self, vectorizer, model):
"""从 sklearn 模型提取并融合权重
将 TF-IDF 的 idf 和 LR 的权重合并为单一的 token→weight 字典
"""
# 获取底层 sklearn 对象
if hasattr(vectorizer, 'vectorizer'):
if hasattr(vectorizer, "vectorizer"):
# TfidfFeatureExtractor 包装类
tfidf = vectorizer.vectorizer
else:
tfidf = vectorizer
if hasattr(model, 'clf'):
if hasattr(model, "clf"):
# SemanticInterestModel 包装类
clf = model.clf
else:
clf = model
# 获取词表和 IDF
vocabulary = tfidf.vocabulary_ # {token: index}
idf = tfidf.idf_ # numpy array, shape (n_features,)
# 获取 LR 权重
# clf.coef_ shape: (n_classes, n_features) 对于多分类
# classes_ 顺序应该是 [-1, 0, 1]
coef = clf.coef_ # shape (3, n_features)
intercept = clf.intercept_ # shape (3,)
classes = clf.classes_
# 找到 -1 和 1 的索引
idx_neg = np.where(classes == -1)[0][0]
idx_pos = np.where(classes == 1)[0][0]
# 计算 z_interest = z_pos - z_neg 的权重
w_interest = coef[idx_pos] - coef[idx_neg] # shape (n_features,)
b_interest = intercept[idx_pos] - intercept[idx_neg]
# - 多分类: coef_.shape == (n_classes, n_features)
# - 二分类: coef_.shape == (1, n_features),对应 classes_[1] 的 logit
coef = np.asarray(clf.coef_)
intercept = np.asarray(clf.intercept_)
classes = np.asarray(clf.classes_)
# 默认输出变换
self.output_bias = 0.0
self.output_scale = 1.0
extraction_mode = "unknown"
b_interest: float
if len(classes) == 2 and coef.shape[0] == 1:
# 二分类sigmoid(w·x + b) == P(classes_[1])
w_interest = coef[0]
b_interest = float(intercept[0]) if intercept.size else 0.0
extraction_mode = "binary"
# 兼容兴趣分定义interest = P(1) + 0.5*P(0)
# 二分类下缺失的类别概率视为 0 或 (1-P(pos)),可化简为线性变换
class_set = {int(c) for c in classes.tolist()}
pos_label = int(classes[1])
if class_set == {-1, 1} and pos_label == 1:
# interest = P(1)
self.output_bias, self.output_scale = 0.0, 1.0
elif class_set == {0, 1} and pos_label == 1:
# P(0) = 1 - P(1) => interest = P(1) + 0.5*(1-P(1)) = 0.5 + 0.5*P(1)
self.output_bias, self.output_scale = 0.5, 0.5
elif class_set == {-1, 0} and pos_label == 0:
# interest = 0.5*P(0)
self.output_bias, self.output_scale = 0.0, 0.5
else:
logger.warning(f"[FastScorer] 非标准二分类标签 {classes.tolist()},将直接使用 sigmoid(logit)")
else:
# 多分类/非标准:尽量构造一个可用的 z
if coef.ndim != 2 or coef.shape[0] != len(classes):
raise ValueError(
f"不支持的模型权重形状: coef={coef.shape}, classes={classes.tolist()}"
)
if (-1 in classes) and (1 in classes):
# 对三分类:使用 z_pos - z_neg 近似兴趣 logit忽略中立
idx_neg = int(np.where(classes == -1)[0][0])
idx_pos = int(np.where(classes == 1)[0][0])
w_interest = coef[idx_pos] - coef[idx_neg]
b_interest = float(intercept[idx_pos] - intercept[idx_neg])
extraction_mode = "multiclass_diff"
elif 1 in classes:
# 退化:仅使用 class=1 的 logit仍然输出 sigmoid(logit)
idx_pos = int(np.where(classes == 1)[0][0])
w_interest = coef[idx_pos]
b_interest = float(intercept[idx_pos])
extraction_mode = "multiclass_pos_only"
logger.warning(f"[FastScorer] 模型缺少 -1 类别: {classes.tolist()},将仅使用 class=1 logit")
else:
raise ValueError(f"模型缺少 class=1无法构建兴趣评分: classes={classes.tolist()}")
# 融合: combined_weight = w_interest * idf
combined_weights = w_interest * idf
# 构建 token→weight 字典
token_weights = {}
for token, idx in vocabulary.items():
@@ -180,17 +230,17 @@ class FastScorer:
# 权重剪枝
if abs(weight) >= self.config.weight_prune_threshold:
token_weights[token] = weight
# 如果设置了 top-k 限制
if self.config.top_k_weights > 0 and len(token_weights) > self.config.top_k_weights:
# 按绝对值排序,保留 top-k
sorted_items = sorted(token_weights.items(), key=lambda x: abs(x[1]), reverse=True)
token_weights = dict(sorted_items[:self.config.top_k_weights])
self.token_weights = token_weights
self.bias = float(b_interest)
self.is_loaded = True
# 更新元信息
self.meta = {
"original_vocab_size": len(vocabulary),
@@ -200,14 +250,18 @@ class FastScorer:
"top_k_weights": self.config.top_k_weights,
"bias": self.bias,
"ngram_range": self.config.ngram_range,
"classes": classes.tolist(),
"extraction_mode": extraction_mode,
"output_bias": self.output_bias,
"output_scale": self.output_scale,
}
logger.info(
f"[FastScorer] 权重提取完成: "
f"原始词表={len(vocabulary)}, 剪枝后={len(token_weights)}, "
f"剪枝率={self.meta['prune_ratio']:.2%}"
)
def _tokenize(self, text: str) -> list[str]:
"""将文本转换为 n-gram tokens
@@ -215,17 +269,17 @@ class FastScorer:
"""
if self.config.lowercase:
text = text.lower()
# 字符级 n-gram
min_n, max_n = self.config.ngram_range
tokens = []
for n in range(min_n, max_n + 1):
for i in range(len(text) - n + 1):
tokens.append(text[i:i + n])
return tokens
def _compute_tf(self, tokens: list[str]) -> dict[str, float]:
"""计算词频TF
@@ -233,7 +287,7 @@ class FastScorer:
这里简化为原始计数,因为对于短消息差异不大
"""
return dict(Counter(tokens))
def score(self, text: str) -> float:
"""计算单条消息的语义兴趣度
@@ -245,25 +299,25 @@ class FastScorer:
"""
if not self.is_loaded:
raise ValueError("评分器尚未加载,请先调用 from_sklearn_model() 或 load()")
start_time = time.time()
try:
# 1. Tokenize
tokens = self._tokenize(text)
if not tokens:
return 0.5 # 空文本返回中立值
# 2. 计算 TF
tf = self._compute_tf(tokens)
# 3. 加权求和: z = Σ (w'_i * tf_i) + b
z = self.bias
for token, count in tf.items():
if token in self.token_weights:
z += self.token_weights[token] * count
# 4. Sigmoid 转换
# interest = 1 / (1 + exp(-α * z))
alpha = self.config.sigmoid_alpha
@@ -271,29 +325,32 @@ class FastScorer:
interest = 1.0 / (1.0 + math.exp(-alpha * z))
except OverflowError:
interest = 0.0 if z < 0 else 1.0
interest = self.output_bias + self.output_scale * interest
interest = max(0.0, min(1.0, interest))
# 统计
self.total_scores += 1
self.total_time += time.time() - start_time
return interest
except Exception as e:
logger.error(f"[FastScorer] 评分失败: {e}, 消息: {text[:50]}")
return 0.5
def score_batch(self, texts: list[str]) -> list[float]:
"""批量计算兴趣度"""
if not texts:
return []
return [self.score(text) for text in texts]
async def score_async(self, text: str, timeout: float | None = None) -> float:
"""异步计算兴趣度(使用全局线程池)"""
timeout = timeout or self.config.score_timeout
executor = get_global_executor()
loop = asyncio.get_running_loop()
try:
return await asyncio.wait_for(
loop.run_in_executor(executor, self.score, text),
@@ -302,16 +359,16 @@ class FastScorer:
except asyncio.TimeoutError:
logger.warning(f"[FastScorer] 评分超时({timeout}s): {text[:30]}...")
return 0.5
async def score_batch_async(self, texts: list[str], timeout: float | None = None) -> list[float]:
"""异步批量计算兴趣度"""
if not texts:
return []
timeout = timeout or self.config.score_timeout * len(texts)
executor = get_global_executor()
loop = asyncio.get_running_loop()
try:
return await asyncio.wait_for(
loop.run_in_executor(executor, self.score_batch, texts),
@@ -320,7 +377,7 @@ class FastScorer:
except asyncio.TimeoutError:
logger.warning(f"[FastScorer] 批量评分超时({timeout}s), 批次大小: {len(texts)}")
return [0.5] * len(texts)
def get_statistics(self) -> dict[str, Any]:
"""获取统计信息"""
avg_time = self.total_time / self.total_scores if self.total_scores > 0 else 0
@@ -332,12 +389,12 @@ class FastScorer:
"vocab_size": len(self.token_weights),
"meta": self.meta,
}
def save(self, path: Path | str):
"""保存快速评分器"""
import joblib
path = Path(path)
bundle = {
"token_weights": self.token_weights,
"bias": self.bias,
@@ -352,25 +409,25 @@ class FastScorer:
},
"meta": self.meta,
}
joblib.dump(bundle, path)
logger.info(f"[FastScorer] 已保存到: {path}")
@classmethod
def load(cls, path: Path | str) -> "FastScorer":
"""加载快速评分器"""
import joblib
path = Path(path)
bundle = joblib.load(path)
config = FastScorerConfig(**bundle["config"])
scorer = cls(config)
scorer.token_weights = bundle["token_weights"]
scorer.bias = bundle["bias"]
scorer.meta = bundle.get("meta", {})
scorer.is_loaded = True
logger.info(f"[FastScorer] 已从 {path} 加载,词表大小: {len(scorer.token_weights)}")
return scorer
@@ -391,7 +448,7 @@ class BatchScoringQueue:
攒一小撮消息一起算,提高 CPU 利用率
"""
def __init__(
self,
scorer: FastScorer,
@@ -408,40 +465,40 @@ class BatchScoringQueue:
self.scorer = scorer
self.batch_size = batch_size
self.flush_interval = flush_interval_ms / 1000.0
self._pending: list[ScoringRequest] = []
self._lock = asyncio.Lock()
self._flush_task: asyncio.Task | None = None
self._running = False
# 统计
self.total_batches = 0
self.total_requests = 0
async def start(self):
"""启动批处理队列"""
if self._running:
return
self._running = True
self._flush_task = asyncio.create_task(self._flush_loop())
logger.info(f"[BatchQueue] 启动batch_size={self.batch_size}, interval={self.flush_interval*1000}ms")
async def stop(self):
"""停止批处理队列"""
self._running = False
if self._flush_task:
self._flush_task.cancel()
try:
await self._flush_task
except asyncio.CancelledError:
pass
# 处理剩余请求
await self._flush()
logger.info("[BatchQueue] 已停止")
async def score(self, text: str) -> float:
"""提交评分请求并等待结果
@@ -453,56 +510,56 @@ class BatchScoringQueue:
"""
loop = asyncio.get_running_loop()
future = loop.create_future()
request = ScoringRequest(text=text, future=future)
async with self._lock:
self._pending.append(request)
self.total_requests += 1
# 达到批次大小,立即处理
if len(self._pending) >= self.batch_size:
asyncio.create_task(self._flush())
return await future
async def _flush_loop(self):
"""定时刷新循环"""
while self._running:
await asyncio.sleep(self.flush_interval)
await self._flush()
async def _flush(self):
"""处理当前待处理的请求"""
async with self._lock:
if not self._pending:
return
batch = self._pending.copy()
self._pending.clear()
if not batch:
return
self.total_batches += 1
try:
# 批量评分
texts = [req.text for req in batch]
scores = await self.scorer.score_batch_async(texts)
# 分发结果
for req, score in zip(batch, scores):
if not req.future.done():
req.future.set_result(score)
except Exception as e:
logger.error(f"[BatchQueue] 批量评分失败: {e}")
# 返回默认值
for req in batch:
if not req.future.done():
req.future.set_result(0.5)
def get_statistics(self) -> dict[str, Any]:
"""获取统计信息"""
avg_batch_size = self.total_requests / self.total_batches if self.total_batches > 0 else 0
@@ -543,22 +600,22 @@ async def get_fast_scorer(
FastScorer 或 BatchScoringQueue 实例
"""
import joblib
model_path = Path(model_path)
path_key = str(model_path.resolve())
# 检查是否已存在
if not force_reload:
if use_batch_queue and path_key in _batch_queue_instances:
return _batch_queue_instances[path_key]
elif not use_batch_queue and path_key in _fast_scorer_instances:
return _fast_scorer_instances[path_key]
# 加载模型
logger.info(f"[优化评分器] 加载模型: {model_path}")
bundle = joblib.load(model_path)
# 检查是 FastScorer 还是 sklearn 模型
if "token_weights" in bundle:
# FastScorer 格式
@@ -567,22 +624,22 @@ async def get_fast_scorer(
# sklearn 模型格式,需要转换
vectorizer = bundle["vectorizer"]
model = bundle["model"]
config = FastScorerConfig(
ngram_range=vectorizer.get_config().get("ngram_range", (2, 4)),
weight_prune_threshold=1e-4,
)
scorer = FastScorer.from_sklearn_model(vectorizer, model, config)
_fast_scorer_instances[path_key] = scorer
# 如果需要批处理队列
if use_batch_queue:
queue = BatchScoringQueue(scorer, batch_size, flush_interval_ms)
await queue.start()
_batch_queue_instances[path_key] = queue
return queue
return scorer
@@ -602,40 +659,40 @@ def convert_sklearn_to_fast(
FastScorer 实例
"""
import joblib
sklearn_model_path = Path(sklearn_model_path)
bundle = joblib.load(sklearn_model_path)
vectorizer = bundle["vectorizer"]
model = bundle["model"]
# 从 vectorizer 配置推断 n-gram range
if config is None:
vconfig = vectorizer.get_config() if hasattr(vectorizer, 'get_config') else {}
vconfig = vectorizer.get_config() if hasattr(vectorizer, "get_config") else {}
config = FastScorerConfig(
ngram_range=vconfig.get("ngram_range", (2, 4)),
weight_prune_threshold=1e-4,
)
scorer = FastScorer.from_sklearn_model(vectorizer, model, config)
# 保存转换后的模型
if output_path:
output_path = Path(output_path)
scorer.save(output_path)
return scorer
def clear_fast_scorer_instances():
"""清空所有快速评分器实例"""
global _fast_scorer_instances, _batch_queue_instances
# 停止所有批处理队列
for queue in _batch_queue_instances.values():
asyncio.create_task(queue.stop())
_fast_scorer_instances.clear()
_batch_queue_instances.clear()
logger.info("[优化评分器] 已清空所有实例")

View File

@@ -16,11 +16,10 @@ from pathlib import Path
from typing import Any
import joblib
import numpy as np
from src.common.logger import get_logger
from src.chat.semantic_interest.features_tfidf import TfidfFeatureExtractor
from src.chat.semantic_interest.model_lr import SemanticInterestModel
from src.common.logger import get_logger
logger = get_logger("semantic_interest.scorer")
@@ -74,7 +73,7 @@ class SemanticInterestScorer:
self.model: SemanticInterestModel | None = None
self.meta: dict[str, Any] = {}
self.is_loaded = False
# 快速评分器模式
self._use_fast_scorer = use_fast_scorer
self._fast_scorer = None # FastScorer 实例
@@ -83,6 +82,45 @@ class SemanticInterestScorer:
self.total_scores = 0
self.total_time = 0.0
def _get_underlying_clf(self):
model = self.model
if model is None:
return None
return model.clf if hasattr(model, "clf") else model
def _proba_to_three(self, proba_row) -> tuple[float, float, float]:
"""将任意 predict_proba 输出对齐为 (-1, 0, 1) 三类概率。
兼容情况:
- 三分类classes_ 可能不是 [-1,0,1],需要按 classes_ 重排
- 二分类classes_ 可能是 [-1,1] / [0,1] / [-1,0]
- 包装模型:可能已输出固定 3 列(按 [-1,0,1])但 classes_ 仍为二类
"""
# numpy array / list 都支持 len() 与迭代
proba_row = list(proba_row)
clf = self._get_underlying_clf()
classes = getattr(clf, "classes_", None)
if classes is not None and len(classes) == len(proba_row):
mapping = {int(cls): float(p) for cls, p in zip(classes, proba_row)}
return (
mapping.get(-1, 0.0),
mapping.get(0, 0.0),
mapping.get(1, 0.0),
)
# 兼容包装模型输出:固定为 [-1, 0, 1]
if len(proba_row) == 3:
return float(proba_row[0]), float(proba_row[1]), float(proba_row[2])
# 无 classes_ 时的保守兜底(尽量不抛异常)
if len(proba_row) == 2:
return float(proba_row[0]), 0.0, float(proba_row[1])
if len(proba_row) == 1:
return 0.0, float(proba_row[0]), 0.0
raise ValueError(f"不支持的 proba 形状: len={len(proba_row)}")
def load(self):
"""同步加载模型(阻塞)"""
if not self.model_path.exists():
@@ -101,18 +139,22 @@ class SemanticInterestScorer:
# 如果启用快速评分器模式,创建 FastScorer
if self._use_fast_scorer:
from src.chat.semantic_interest.optimized_scorer import FastScorer, FastScorerConfig
config = FastScorerConfig(
ngram_range=self.vectorizer.get_config().get("ngram_range", (2, 3)),
weight_prune_threshold=1e-4,
)
self._fast_scorer = FastScorer.from_sklearn_model(
self.vectorizer, self.model, config
)
logger.info(
f"[FastScorer] 已启用,词表从 {self.vectorizer.get_vocabulary_size()} "
f"剪枝到 {len(self._fast_scorer.token_weights)}"
)
try:
self._fast_scorer = FastScorer.from_sklearn_model(
self.vectorizer, self.model, config
)
logger.info(
f"[FastScorer] 已启用,词表从 {self.vectorizer.get_vocabulary_size()} "
f"剪枝到 {len(self._fast_scorer.token_weights)}"
)
except Exception as e:
self._fast_scorer = None
logger.warning(f"[FastScorer] 初始化失败,将回退到 sklearn 评分路径: {e}")
self.is_loaded = True
load_time = time.time() - start_time
@@ -128,7 +170,7 @@ class SemanticInterestScorer:
except Exception as e:
logger.error(f"模型加载失败: {e}")
raise
async def load_async(self):
"""异步加载模型(非阻塞)"""
if not self.model_path.exists():
@@ -150,18 +192,22 @@ class SemanticInterestScorer:
# 如果启用快速评分器模式,创建 FastScorer
if self._use_fast_scorer:
from src.chat.semantic_interest.optimized_scorer import FastScorer, FastScorerConfig
config = FastScorerConfig(
ngram_range=self.vectorizer.get_config().get("ngram_range", (2, 3)),
weight_prune_threshold=1e-4,
)
self._fast_scorer = FastScorer.from_sklearn_model(
self.vectorizer, self.model, config
)
logger.info(
f"[FastScorer] 已启用,词表从 {self.vectorizer.get_vocabulary_size()} "
f"剪枝到 {len(self._fast_scorer.token_weights)}"
)
try:
self._fast_scorer = FastScorer.from_sklearn_model(
self.vectorizer, self.model, config
)
logger.info(
f"[FastScorer] 已启用,词表从 {self.vectorizer.get_vocabulary_size()} "
f"剪枝到 {len(self._fast_scorer.token_weights)}"
)
except Exception as e:
self._fast_scorer = None
logger.warning(f"[FastScorer] 初始化失败,将回退到 sklearn 评分路径: {e}")
self.is_loaded = True
load_time = time.time() - start_time
@@ -173,7 +219,7 @@ class SemanticInterestScorer:
if self.meta:
logger.info(f"模型元信息: {self.meta}")
# 预热模型
await self._warmup_async()
@@ -186,7 +232,7 @@ class SemanticInterestScorer:
logger.info("重新加载模型...")
self.is_loaded = False
self.load()
async def reload_async(self):
"""异步重新加载模型"""
logger.info("异步重新加载模型...")
@@ -219,8 +265,7 @@ class SemanticInterestScorer:
# 预测概率
proba = self.model.predict_proba(X)[0]
# proba 顺序为 [-1, 0, 1]
p_neg, p_neu, p_pos = proba
p_neg, p_neu, p_pos = self._proba_to_three(proba)
# 兴趣分计算策略:
# interest = P(1) + 0.5 * P(0)
@@ -283,7 +328,7 @@ class SemanticInterestScorer:
# 优先使用 FastScorer
if self._fast_scorer is not None:
interests = self._fast_scorer.score_batch(texts)
# 统计
self.total_scores += len(texts)
self.total_time += time.time() - start_time
@@ -298,7 +343,8 @@ class SemanticInterestScorer:
# 计算兴趣分
interests = []
for p_neg, p_neu, p_pos in proba:
for row in proba:
_, p_neu, p_pos = self._proba_to_three(row)
interest = float(p_pos + 0.5 * p_neu)
interest = max(0.0, min(1.0, interest))
interests.append(interest)
@@ -325,11 +371,11 @@ class SemanticInterestScorer:
"""
if not texts:
return []
# 计算动态超时
if timeout is None:
timeout = DEFAULT_SCORE_TIMEOUT * len(texts)
# 使用全局线程池
executor = _get_global_executor()
loop = asyncio.get_running_loop()
@@ -341,7 +387,7 @@ class SemanticInterestScorer:
except asyncio.TimeoutError:
logger.warning(f"批量兴趣度计算超时({timeout}秒),批次大小: {len(texts)}")
return [0.5] * len(texts)
def _warmup(self, sample_texts: list[str] | None = None):
"""预热模型(执行几次推理以优化性能)
@@ -350,26 +396,26 @@ class SemanticInterestScorer:
"""
if not self.is_loaded:
return
if sample_texts is None:
sample_texts = [
"你好",
"今天天气怎么样?",
"我对这个话题很感兴趣"
]
logger.debug(f"开始预热模型,样本数: {len(sample_texts)}")
start_time = time.time()
for text in sample_texts:
try:
self.score(text)
except Exception:
pass # 忽略预热错误
warmup_time = time.time() - start_time
logger.debug(f"模型预热完成,耗时: {warmup_time:.3f}")
async def _warmup_async(self, sample_texts: list[str] | None = None):
"""异步预热模型"""
loop = asyncio.get_event_loop()
@@ -391,7 +437,7 @@ class SemanticInterestScorer:
proba = self.model.predict_proba(X)[0]
pred_label = self.model.predict(X)[0]
p_neg, p_neu, p_pos = proba
p_neg, p_neu, p_pos = self._proba_to_three(proba)
interest = float(p_pos + 0.5 * p_neu)
return {
@@ -429,11 +475,11 @@ class SemanticInterestScorer:
"fast_scorer_enabled": self._fast_scorer is not None,
"meta": self.meta,
}
# 如果启用了 FastScorer添加其统计
if self._fast_scorer is not None:
stats["fast_scorer_stats"] = self._fast_scorer.get_statistics()
return stats
def __repr__(self) -> str:
@@ -465,7 +511,7 @@ class ModelManager:
self.current_version: str | None = None
self.current_persona_info: dict[str, Any] | None = None
self._lock = asyncio.Lock()
# 自动训练器集成
self._auto_trainer = None
self._auto_training_started = False # 防止重复启动自动训练
@@ -495,7 +541,7 @@ class ModelManager:
# 使用单例获取评分器
scorer = await get_semantic_scorer(model_path, force_reload=False, use_async=use_async)
self.current_scorer = scorer
self.current_version = version
self.current_persona_info = persona_info
@@ -550,30 +596,30 @@ class ModelManager:
try:
# 延迟导入避免循环依赖
from src.chat.semantic_interest.auto_trainer import get_auto_trainer
if self._auto_trainer is None:
self._auto_trainer = get_auto_trainer()
# 检查是否需要训练
trained, model_path = await self._auto_trainer.auto_train_if_needed(
persona_info=persona_info,
days=7,
max_samples=1000, # 初始训练使用1000条消息
)
if trained and model_path:
logger.info(f"[模型管理器] 使用新训练的模型: {model_path.name}")
return model_path
# 获取现有的人设模型
model_path = self._auto_trainer.get_model_for_persona(persona_info)
if model_path:
return model_path
# 降级到 latest
logger.warning("[模型管理器] 未找到人设模型,使用 latest")
return self._get_latest_model()
except Exception as e:
logger.error(f"[模型管理器] 获取人设模型失败: {e}")
return self._get_latest_model()
@@ -590,9 +636,9 @@ class ModelManager:
# 检查人设是否变化
if self.current_persona_info == persona_info:
return False
logger.info("[模型管理器] 检测到人设变化,重新加载模型...")
try:
await self.load_model(version="auto", persona_info=persona_info)
return True
@@ -611,25 +657,25 @@ class ModelManager:
async with self._lock:
# 检查是否已经启动
if self._auto_training_started:
logger.debug(f"[模型管理器] 自动训练任务已启动,跳过")
logger.debug("[模型管理器] 自动训练任务已启动,跳过")
return
try:
from src.chat.semantic_interest.auto_trainer import get_auto_trainer
if self._auto_trainer is None:
self._auto_trainer = get_auto_trainer()
logger.info(f"[模型管理器] 启动自动训练任务,间隔: {interval_hours}小时")
# 标记为已启动
self._auto_training_started = True
# 在后台任务中运行
asyncio.create_task(
self._auto_trainer.scheduled_train(persona_info, interval_hours)
)
except Exception as e:
logger.error(f"[模型管理器] 启动自动训练失败: {e}")
self._auto_training_started = False # 失败时重置标志
@@ -659,7 +705,7 @@ async def get_semantic_scorer(
"""
model_path = Path(model_path)
path_key = str(model_path.resolve()) # 使用绝对路径作为键
async with _instance_lock:
# 检查是否已存在实例
if not force_reload and path_key in _scorer_instances:
@@ -669,7 +715,7 @@ async def get_semantic_scorer(
return scorer
else:
logger.info(f"[单例] 评分器未加载,重新加载: {model_path.name}")
# 创建或重新加载实例
if path_key not in _scorer_instances:
logger.info(f"[单例] 创建新的评分器实例: {model_path.name}")
@@ -678,13 +724,13 @@ async def get_semantic_scorer(
else:
scorer = _scorer_instances[path_key]
logger.info(f"[单例] 强制重新加载评分器: {model_path.name}")
# 加载模型
if use_async:
await scorer.load_async()
else:
scorer.load()
return scorer
@@ -705,14 +751,14 @@ def get_semantic_scorer_sync(
"""
model_path = Path(model_path)
path_key = str(model_path.resolve())
# 检查是否已存在实例
if not force_reload and path_key in _scorer_instances:
scorer = _scorer_instances[path_key]
if scorer.is_loaded:
logger.debug(f"[单例] 复用已加载的评分器: {model_path.name}")
return scorer
# 创建或重新加载实例
if path_key not in _scorer_instances:
logger.info(f"[单例] 创建新的评分器实例: {model_path.name}")
@@ -721,7 +767,7 @@ def get_semantic_scorer_sync(
else:
scorer = _scorer_instances[path_key]
logger.info(f"[单例] 强制重新加载评分器: {model_path.name}")
# 加载模型
scorer.load()
return scorer

View File

@@ -3,16 +3,15 @@
统一的训练流程入口,包含数据采样、标注、训练、评估
"""
import asyncio
from datetime import datetime
from pathlib import Path
from typing import Any
import joblib
from src.common.logger import get_logger
from src.chat.semantic_interest.dataset import DatasetGenerator, generate_training_dataset
from src.chat.semantic_interest.model_lr import train_semantic_model
from src.common.logger import get_logger
logger = get_logger("semantic_interest.trainer")
@@ -110,7 +109,6 @@ class SemanticInterestTrainer:
logger.info(f"开始训练模型,数据集: {dataset_path}")
# 加载数据集
from src.chat.semantic_interest.dataset import DatasetGenerator
texts, labels = DatasetGenerator.load_dataset(dataset_path)
# 训练模型

View File

@@ -13,7 +13,7 @@ from src.common.data_models.database_data_model import DatabaseUserInfo
# MessageRecv 已被移除,现在使用 DatabaseMessages
from src.common.logger import get_logger
from src.common.message_repository import count_and_length_messages, count_messages, find_messages
from src.common.message_repository import count_and_length_messages, find_messages
from src.config.config import global_config, model_config
from src.llm_models.utils_model import LLMRequest
from src.person_info.person_info import PersonInfoManager, get_person_info_manager

View File

@@ -10,6 +10,7 @@ from typing import Any
import numpy as np
from src.config.config import model_config
from . import BaseDataModel

View File

@@ -9,11 +9,10 @@
import asyncio
import time
from collections import defaultdict
from collections import OrderedDict, defaultdict
from collections.abc import Awaitable, Callable
from dataclasses import dataclass, field
from typing import Any
from collections import OrderedDict
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession

View File

@@ -0,0 +1,259 @@
"""
日志广播系统
用于实时推送日志到多个订阅者(如WebSocket客户端)
"""
import asyncio
import logging
from collections import deque
from collections.abc import Callable
from typing import Any
import orjson
class LogBroadcaster:
"""日志广播器,用于实时推送日志到订阅者"""
def __init__(self, max_buffer_size: int = 1000):
"""
初始化日志广播器
Args:
max_buffer_size: 缓冲区最大大小,超过后会丢弃旧日志
"""
self.subscribers: set[Callable[[dict[str, Any]], None]] = set()
self.buffer: deque[dict[str, Any]] = deque(maxlen=max_buffer_size)
self._lock = asyncio.Lock()
async def subscribe(self, callback: Callable[[dict[str, Any]], None]) -> None:
"""
订阅日志推送
Args:
callback: 接收日志的回调函数,参数为日志字典
"""
async with self._lock:
self.subscribers.add(callback)
async def unsubscribe(self, callback: Callable[[dict[str, Any]], None]) -> None:
"""
取消订阅
Args:
callback: 要移除的回调函数
"""
async with self._lock:
self.subscribers.discard(callback)
async def broadcast(self, log_record: dict[str, Any]) -> None:
"""
广播日志到所有订阅者
Args:
log_record: 日志记录字典
"""
# 添加到缓冲区
async with self._lock:
self.buffer.append(log_record)
# 创建订阅者列表的副本,避免在迭代时修改
subscribers = list(self.subscribers)
# 异步发送到所有订阅者
tasks = []
for callback in subscribers:
try:
if asyncio.iscoroutinefunction(callback):
tasks.append(asyncio.create_task(callback(log_record)))
else:
# 同步回调在线程池中执行
tasks.append(asyncio.to_thread(callback, log_record))
except Exception:
# 忽略单个订阅者的错误
pass
# 等待所有发送完成(但不阻塞太久)
if tasks:
await asyncio.wait(tasks, timeout=1.0)
def get_recent_logs(self, limit: int = 100) -> list[dict[str, Any]]:
"""
获取最近的日志记录
Args:
limit: 返回的最大日志数量
Returns:
日志记录列表
"""
return list(self.buffer)[-limit:]
def clear_buffer(self) -> None:
"""清空日志缓冲区"""
self.buffer.clear()
class BroadcastLogHandler(logging.Handler):
"""
日志处理器,将日志推送到广播器
"""
def __init__(self, broadcaster: LogBroadcaster):
"""
初始化处理器
Args:
broadcaster: 日志广播器实例
"""
super().__init__()
self.broadcaster = broadcaster
self.loop: asyncio.AbstractEventLoop | None = None
def _get_logger_metadata(self, logger_name: str) -> dict[str, str | None]:
"""
获取logger的元数据别名和颜色
Args:
logger_name: logger名称
Returns:
包含alias和color的字典
"""
try:
# 导入logger元数据获取函数
from src.common.logger import get_logger_meta
return get_logger_meta(logger_name)
except Exception:
# 如果获取失败,返回空元数据
return {"alias": None, "color": None}
def emit(self, record: logging.LogRecord) -> None:
"""
处理日志记录
Args:
record: 日志记录
"""
try:
# 获取logger元数据别名和颜色
logger_meta = self._get_logger_metadata(record.name)
# 转换日志记录为字典
log_dict = {
"timestamp": self.format_time(record),
"level": record.levelname, # 保持大写,与前端筛选器一致
"logger_name": record.name, # 原始logger名称
"event": record.getMessage(),
}
# 添加别名和颜色(如果存在)
if logger_meta["alias"]:
log_dict["alias"] = logger_meta["alias"]
if logger_meta["color"]:
log_dict["color"] = logger_meta["color"]
# 添加额外字段
if hasattr(record, "__dict__"):
for key, value in record.__dict__.items():
if key not in (
"name",
"msg",
"args",
"created",
"filename",
"funcName",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"pathname",
"process",
"processName",
"relativeCreated",
"thread",
"threadName",
"exc_info",
"exc_text",
"stack_info",
):
try:
# 尝试序列化以确保可以转为JSON
orjson.dumps(value)
log_dict[key] = value
except (TypeError, ValueError):
log_dict[key] = str(value)
# 获取或创建事件循环
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# 没有运行的事件循环,创建新任务
if self.loop is None:
try:
self.loop = asyncio.new_event_loop()
except Exception:
return
loop = self.loop
# 在事件循环中异步广播
asyncio.run_coroutine_threadsafe(
self.broadcaster.broadcast(log_dict), loop
)
except Exception:
# 忽略广播错误,避免影响日志系统
pass
def format_time(self, record: logging.LogRecord) -> str:
"""
格式化时间戳
Args:
record: 日志记录
Returns:
格式化的时间字符串
"""
from datetime import datetime
dt = datetime.fromtimestamp(record.created)
return dt.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
# 全局广播器实例
_global_broadcaster: LogBroadcaster | None = None
def get_log_broadcaster() -> LogBroadcaster:
"""
获取全局日志广播器实例
Returns:
日志广播器实例
"""
global _global_broadcaster
if _global_broadcaster is None:
_global_broadcaster = LogBroadcaster()
return _global_broadcaster
def setup_log_broadcasting() -> LogBroadcaster:
"""
设置日志广播系统,将日志处理器添加到根日志记录器
Returns:
日志广播器实例
"""
broadcaster = get_log_broadcaster()
# 创建并添加广播处理器到根日志记录器
handler = BroadcastLogHandler(broadcaster)
handler.setLevel(logging.DEBUG)
# 添加到根日志记录器
root_logger = logging.getLogger()
root_logger.addHandler(handler)
return broadcaster

View File

@@ -100,7 +100,7 @@ _monitor_thread: threading.Thread | None = None
_stop_event: threading.Event = threading.Event()
# 环境变量控制是否启用,防止所有环境一起开
MEM_MONITOR_ENABLED = True
MEM_MONITOR_ENABLED = False
# 触发详细采集的阈值
MEM_ABSOLUTE_THRESHOLD_MB = 1024.0 # 超过 1 GiB
MEM_GROWTH_THRESHOLD_MB = 200.0 # 单次增长超过 200 MiB

View File

@@ -34,7 +34,7 @@ def get_accurate_size(obj: Any, seen: set | None = None, max_depth: int = 3, _cu
# 深度限制:防止递归爆炸
if _current_depth >= max_depth:
return sys.getsizeof(obj)
# 对象数量限制:防止内存爆炸
if len(seen) > 10000:
return sys.getsizeof(obj)
@@ -55,7 +55,7 @@ def get_accurate_size(obj: Any, seen: set | None = None, max_depth: int = 3, _cu
if isinstance(obj, dict):
# 限制处理的键值对数量
items = list(obj.items())[:1000] # 最多处理1000个键值对
size += sum(get_accurate_size(k, seen, max_depth, _current_depth + 1) +
size += sum(get_accurate_size(k, seen, max_depth, _current_depth + 1) +
get_accurate_size(v, seen, max_depth, _current_depth + 1)
for k, v in items)
@@ -204,7 +204,7 @@ def estimate_cache_item_size(obj: Any) -> int:
if pickle_size > 0:
# pickle 通常略小于实际内存乘以1.5作为安全系数
return int(pickle_size * 1.5)
# 方法2: 智能估算(深度受限,采样大容器)
try:
smart_size = estimate_size_smart(obj, max_depth=5, sample_large=True)

View File

@@ -59,6 +59,7 @@ class Server:
"http://127.0.0.1:11451",
"http://localhost:3001",
"http://127.0.0.1:3001",
"http://127.0.0.1:12138",
# 在生产环境中,您应该添加实际的前端域名
]

View File

@@ -597,7 +597,7 @@ class OpenaiClient(BaseClient):
"""
client = self._create_client()
is_batch_request = isinstance(embedding_input, list)
# 关键修复:指定 encoding_format="base64" 避免 SDK 自动 tolist() 转换
# OpenAI SDK 在不指定 encoding_format 时会调用 np.frombuffer().tolist()
# 这会创建大量 Python float 对象,导致严重的内存泄露
@@ -643,14 +643,14 @@ class OpenaiClient(BaseClient):
# 兜底:如果 SDK 返回的不是 base64旧版或其他情况
# 转换为 NumPy 数组
embeddings.append(np.array(item.embedding, dtype=np.float32))
response.embedding = embeddings if is_batch_request else embeddings[0]
else:
raise RespParseException(
raw_response,
"响应解析失败,缺失嵌入数据。",
)
# 大批量请求后触发垃圾回收batch_size > 8
if is_batch_request and len(embedding_input) > 8:
gc.collect()

View File

@@ -29,7 +29,6 @@ from enum import Enum
from typing import Any, ClassVar, Literal
import numpy as np
from rich.traceback import install
from src.common.logger import get_logger

View File

@@ -7,7 +7,7 @@ import time
import traceback
from collections.abc import Callable, Coroutine
from random import choices
from typing import Any, cast
from typing import Any
from rich.traceback import install
@@ -386,6 +386,14 @@ class MainSystem:
await mood_manager.start()
logger.debug("情绪管理器初始化成功")
# 初始化日志广播系统
try:
from src.common.log_broadcaster import setup_log_broadcasting
setup_log_broadcasting()
logger.debug("日志广播系统初始化成功")
except Exception as e:
logger.error(f"日志广播系统初始化失败: {e}")
# 启动聊天管理器的自动保存任务
from src.chat.message_receive.chat_stream import get_chat_manager
task = asyncio.create_task(get_chat_manager()._auto_save_task())

View File

@@ -57,6 +57,15 @@ class LongTermMemoryManager:
# 状态
self._initialized = False
# 批量embedding生成队列
self._pending_embeddings: list[tuple[str, str]] = [] # (node_id, content)
self._embedding_batch_size = 10
self._embedding_lock = asyncio.Lock()
# 相似记忆缓存 (stm_id -> memories)
self._similar_memory_cache: dict[str, list[Memory]] = {}
self._cache_max_size = 100
logger.info(
f"长期记忆管理器已创建 (batch_size={batch_size}, "
f"search_top_k={search_top_k}, decay_factor={long_term_decay_factor:.2f})"
@@ -150,7 +159,7 @@ class LongTermMemoryManager:
async def _process_batch(self, batch: list[ShortTermMemory]) -> dict[str, Any]:
"""
处理一批短期记忆
处理一批短期记忆(并行处理)
Args:
batch: 短期记忆批次
@@ -167,57 +176,89 @@ class LongTermMemoryManager:
"transferred_memory_ids": [],
}
for stm in batch:
try:
# 步骤1: 在长期记忆中检索相似记忆
similar_memories = await self._search_similar_long_term_memories(stm)
# 并行处理批次中的所有记忆
tasks = [self._process_single_memory(stm) for stm in batch]
results = await asyncio.gather(*tasks, return_exceptions=True)
# 步骤2: LLM 决策如何更新图结构
operations = await self._decide_graph_operations(stm, similar_memories)
# 汇总结果
for stm, single_result in zip(batch, results):
if isinstance(single_result, Exception):
logger.error(f"处理短期记忆 {stm.id} 失败: {single_result}")
result["failed_count"] += 1
elif single_result and isinstance(single_result, dict):
result["processed_count"] += 1
result["transferred_memory_ids"].append(stm.id)
# 步骤3: 执行图操作
success = await self._execute_graph_operations(operations, stm)
if success:
result["processed_count"] += 1
result["transferred_memory_ids"].append(stm.id)
# 统计操作类型
for op in operations:
if op.operation_type == GraphOperationType.CREATE_MEMORY:
# 统计操作类型
operations = single_result.get("operations", [])
if isinstance(operations, list):
for op_type in operations:
if op_type == GraphOperationType.CREATE_MEMORY:
result["created_count"] += 1
elif op.operation_type == GraphOperationType.UPDATE_MEMORY:
elif op_type == GraphOperationType.UPDATE_MEMORY:
result["updated_count"] += 1
elif op.operation_type == GraphOperationType.MERGE_MEMORIES:
elif op_type == GraphOperationType.MERGE_MEMORIES:
result["merged_count"] += 1
else:
result["failed_count"] += 1
except Exception as e:
logger.error(f"处理短期记忆 {stm.id} 失败: {e}")
else:
result["failed_count"] += 1
# 处理完批次后批量生成embeddings
await self._flush_pending_embeddings()
return result
async def _process_single_memory(self, stm: ShortTermMemory) -> dict[str, Any] | None:
"""
处理单条短期记忆
Args:
stm: 短期记忆
Returns:
处理结果或None如果失败
"""
try:
# 步骤1: 在长期记忆中检索相似记忆
similar_memories = await self._search_similar_long_term_memories(stm)
# 步骤2: LLM 决策如何更新图结构
operations = await self._decide_graph_operations(stm, similar_memories)
# 步骤3: 执行图操作
success = await self._execute_graph_operations(operations, stm)
if success:
return {
"success": True,
"operations": [op.operation_type for op in operations]
}
return None
except Exception as e:
logger.error(f"处理短期记忆 {stm.id} 失败: {e}")
return None
async def _search_similar_long_term_memories(
self, stm: ShortTermMemory
) -> list[Memory]:
"""
在长期记忆中检索与短期记忆相似的记忆
优化:不仅检索内容相似的,还利用图结构获取上下文相关的记忆
优化:使用缓存并减少重复查询
"""
# 检查缓存
if stm.id in self._similar_memory_cache:
logger.debug(f"使用缓存的相似记忆: {stm.id}")
return self._similar_memory_cache[stm.id]
try:
from src.config.config import global_config
# 检查是否启用了高级路径扩展算法
use_path_expansion = getattr(global_config.memory, "enable_path_expansion", False)
# 1. 检索记忆
# 如果启用了路径扩展search_memories 内部会自动使用 PathScoreExpansion
# 我们只需要传入合适的 expand_depth
expand_depth = getattr(global_config.memory, "path_expansion_max_hops", 2) if use_path_expansion else 0
# 1. 检索记忆
memories = await self.memory_manager.search_memories(
query=stm.content,
top_k=self.search_top_k,
@@ -226,53 +267,91 @@ class LongTermMemoryManager:
expand_depth=expand_depth
)
# 2. 图结构扩展 (Graph Expansion)
# 如果已经使用了高级路径扩展算法,就不需要再做简单的手动扩展了
# 2. 如果启用了高级路径扩展,直接返回
if use_path_expansion:
logger.debug(f"已使用路径扩展算法检索到 {len(memories)} 条记忆")
self._cache_similar_memories(stm.id, memories)
return memories
# 如果未启用高级算法,使用简单的 1 跳邻居扩展作为保底
expanded_memories = []
seen_ids = {m.id for m in memories}
# 3. 简化的图扩展(仅在未启用高级算法时)
if memories:
# 批量获取相关记忆ID减少单次查询
related_ids_batch = await self._batch_get_related_memories(
[m.id for m in memories], max_depth=1, max_per_memory=2
)
for mem in memories:
expanded_memories.append(mem)
# 批量加载相关记忆
seen_ids = {m.id for m in memories}
new_memories = []
for rid in related_ids_batch:
if rid not in seen_ids and len(new_memories) < self.search_top_k:
related_mem = await self.memory_manager.get_memory(rid)
if related_mem:
new_memories.append(related_mem)
seen_ids.add(rid)
# 获取该记忆的直接关联记忆1跳邻居
try:
# 利用 MemoryManager 的底层图遍历能力
related_ids = self.memory_manager._get_related_memories(mem.id, max_depth=1)
memories.extend(new_memories)
# 限制每个记忆扩展的邻居数量,避免上下文爆炸
max_neighbors = 2
neighbor_count = 0
logger.debug(f"为短期记忆 {stm.id} 找到 {len(memories)} 个长期记忆")
for rid in related_ids:
if rid not in seen_ids:
related_mem = await self.memory_manager.get_memory(rid)
if related_mem:
expanded_memories.append(related_mem)
seen_ids.add(rid)
neighbor_count += 1
if neighbor_count >= max_neighbors:
break
except Exception as e:
logger.warning(f"获取关联记忆失败: {e}")
# 总数限制
if len(expanded_memories) >= self.search_top_k * 2:
break
logger.debug(f"为短期记忆 {stm.id} 找到 {len(expanded_memories)} 个长期记忆 (含简单图扩展)")
return expanded_memories
# 缓存结果
self._cache_similar_memories(stm.id, memories)
return memories
except Exception as e:
logger.error(f"检索相似长期记忆失败: {e}")
return []
async def _batch_get_related_memories(
self, memory_ids: list[str], max_depth: int = 1, max_per_memory: int = 2
) -> set[str]:
"""
批量获取相关记忆ID
Args:
memory_ids: 记忆ID列表
max_depth: 最大深度
max_per_memory: 每个记忆最多获取的相关记忆数
Returns:
相关记忆ID集合
"""
all_related_ids = set()
try:
for mem_id in memory_ids:
if len(all_related_ids) >= max_per_memory * len(memory_ids):
break
try:
related_ids = self.memory_manager._get_related_memories(mem_id, max_depth=max_depth)
# 限制每个记忆的相关数量
for rid in list(related_ids)[:max_per_memory]:
all_related_ids.add(rid)
except Exception as e:
logger.warning(f"获取记忆 {mem_id} 的相关记忆失败: {e}")
except Exception as e:
logger.error(f"批量获取相关记忆失败: {e}")
return all_related_ids
def _cache_similar_memories(self, stm_id: str, memories: list[Memory]) -> None:
"""
缓存相似记忆
Args:
stm_id: 短期记忆ID
memories: 相似记忆列表
"""
# 简单的LRU策略如果超过最大缓存数删除最早的
if len(self._similar_memory_cache) >= self._cache_max_size:
# 删除第一个(最早的)
first_key = next(iter(self._similar_memory_cache))
del self._similar_memory_cache[first_key]
self._similar_memory_cache[stm_id] = memories
async def _decide_graph_operations(
self, stm: ShortTermMemory, similar_memories: list[Memory]
) -> list[GraphOperation]:
@@ -587,17 +666,24 @@ class LongTermMemoryManager:
return temp_id_map.get(raw_id, raw_id)
def _resolve_value(self, value: Any, temp_id_map: dict[str, str]) -> Any:
if isinstance(value, str):
return self._resolve_id(value, temp_id_map)
if isinstance(value, list):
return [self._resolve_value(v, temp_id_map) for v in value]
if isinstance(value, dict):
return {k: self._resolve_value(v, temp_id_map) for k, v in value.items()}
"""优化的值解析,减少递归和类型检查"""
value_type = type(value)
if value_type is str:
return temp_id_map.get(value, value)
elif value_type is list:
return [temp_id_map.get(v, v) if isinstance(v, str) else v for v in value]
elif value_type is dict:
return {k: temp_id_map.get(v, v) if isinstance(v, str) else v
for k, v in value.items()}
return value
def _resolve_parameters(
self, params: dict[str, Any], temp_id_map: dict[str, str]
) -> dict[str, Any]:
"""优化的参数解析"""
if not temp_id_map:
return params
return {k: self._resolve_value(v, temp_id_map) for k, v in params.items()}
def _register_aliases_from_params(
@@ -643,7 +729,7 @@ class LongTermMemoryManager:
subject=params.get("subject", source_stm.subject or "未知"),
memory_type=params.get("memory_type", source_stm.memory_type or "fact"),
topic=params.get("topic", source_stm.topic or source_stm.content[:50]),
object=params.get("object", source_stm.object),
obj=params.get("object", source_stm.object),
attributes=params.get("attributes", source_stm.attributes),
importance=params.get("importance", source_stm.importance),
)
@@ -730,8 +816,10 @@ class LongTermMemoryManager:
importance=merged_importance,
)
# 3. 异步保存
asyncio.create_task(self.memory_manager._async_save_graph_store("合并记忆"))
# 3. 异步保存(后台任务,不需要等待)
asyncio.create_task( # noqa: RUF006
self.memory_manager._async_save_graph_store("合并记忆")
)
logger.info(f"合并记忆完成: {source_ids} -> {target_id}")
else:
logger.error(f"合并记忆失败: {source_ids}")
@@ -761,8 +849,8 @@ class LongTermMemoryManager:
)
if success:
# 尝试为新节点生成 embedding (异步)
asyncio.create_task(self._generate_node_embedding(node_id, content))
# 将embedding生成加入队列批量处理
await self._queue_embedding_generation(node_id, content)
logger.info(f"创建节点: {content} ({node_type}) -> {memory_id}")
# 强制注册 target_id无论它是否符合 placeholder 格式
self._register_temp_id(op.target_id, node_id, temp_id_map, force=True)
@@ -820,7 +908,7 @@ class LongTermMemoryManager:
# 合并其他节点到目标节点
for source_id in sources:
self.memory_manager.graph_store.merge_nodes(source_id, target_id)
logger.info(f"合并节点: {sources} -> {target_id}")
async def _execute_create_edge(
@@ -901,20 +989,83 @@ class LongTermMemoryManager:
else:
logger.error(f"删除边失败: {edge_id}")
async def _generate_node_embedding(self, node_id: str, content: str) -> None:
"""为新节点生成 embedding 并存入向量库"""
async def _queue_embedding_generation(self, node_id: str, content: str) -> None:
"""将节点加入embedding生成队列"""
async with self._embedding_lock:
self._pending_embeddings.append((node_id, content))
# 如果队列达到批次大小,立即处理
if len(self._pending_embeddings) >= self._embedding_batch_size:
await self._flush_pending_embeddings()
async def _flush_pending_embeddings(self) -> None:
"""批量处理待生成的embeddings"""
async with self._embedding_lock:
if not self._pending_embeddings:
return
batch = self._pending_embeddings[:]
self._pending_embeddings.clear()
if not self.memory_manager.vector_store or not self.memory_manager.embedding_generator:
return
try:
# 批量生成embeddings
contents = [content for _, content in batch]
embeddings = await self.memory_manager.embedding_generator.generate_batch(contents)
if not embeddings or len(embeddings) != len(batch):
logger.warning("批量生成embedding失败或数量不匹配")
# 回退到单个生成
for node_id, content in batch:
await self._generate_node_embedding_single(node_id, content)
return
# 批量添加到向量库
from src.memory_graph.models import MemoryNode, NodeType
nodes = [
MemoryNode(
id=node_id,
content=content,
node_type=NodeType.OBJECT,
embedding=embedding
)
for (node_id, content), embedding in zip(batch, embeddings)
if embedding is not None
]
if nodes:
# 批量添加节点
await self.memory_manager.vector_store.add_nodes_batch(nodes)
# 批量更新图存储
for node in nodes:
node.mark_vector_stored()
if self.memory_manager.graph_store.graph.has_node(node.id):
self.memory_manager.graph_store.graph.nodes[node.id]["has_vector"] = True
logger.debug(f"批量生成 {len(nodes)} 个节点的embedding")
except Exception as e:
logger.error(f"批量生成embedding失败: {e}")
# 回退到单个生成
for node_id, content in batch:
await self._generate_node_embedding_single(node_id, content)
async def _generate_node_embedding_single(self, node_id: str, content: str) -> None:
"""为单个节点生成 embedding 并存入向量库(回退方法)"""
try:
if not self.memory_manager.vector_store or not self.memory_manager.embedding_generator:
return
embedding = await self.memory_manager.embedding_generator.generate(content)
if embedding is not None:
# 需要构造一个 MemoryNode 对象来调用 add_node
from src.memory_graph.models import MemoryNode, NodeType
node = MemoryNode(
id=node_id,
content=content,
node_type=NodeType.OBJECT, # 默认
node_type=NodeType.OBJECT,
embedding=embedding
)
await self.memory_manager.vector_store.add_node(node)
@@ -926,7 +1077,7 @@ class LongTermMemoryManager:
async def apply_long_term_decay(self) -> dict[str, Any]:
"""
应用长期记忆的激活度衰减
应用长期记忆的激活度衰减(优化版)
长期记忆的衰减比短期记忆慢,使用更高的衰减因子。
@@ -941,6 +1092,12 @@ class LongTermMemoryManager:
all_memories = self.memory_manager.graph_store.get_all_memories()
decayed_count = 0
now = datetime.now()
# 预计算衰减因子的幂次方(缓存常用值)
decay_cache = {i: self.long_term_decay_factor ** i for i in range(1, 31)} # 缓存1-30天
memories_to_update = []
for memory in all_memories:
# 跳过已遗忘的记忆
@@ -954,27 +1111,34 @@ class LongTermMemoryManager:
if last_access:
try:
last_access_dt = datetime.fromisoformat(last_access)
days_passed = (datetime.now() - last_access_dt).days
days_passed = (now - last_access_dt).days
if days_passed > 0:
# 使用长期记忆的衰减因子
# 使用缓存的衰减因子或计算新值
decay_factor = decay_cache.get(
days_passed,
self.long_term_decay_factor ** days_passed
)
base_activation = activation_info.get("level", memory.activation)
new_activation = base_activation * (self.long_term_decay_factor ** days_passed)
new_activation = base_activation * decay_factor
# 更新激活度
memory.activation = new_activation
activation_info["level"] = new_activation
memory.metadata["activation"] = activation_info
memories_to_update.append(memory)
decayed_count += 1
except (ValueError, TypeError) as e:
logger.warning(f"解析时间失败: {e}")
# 保存更新
await self.memory_manager.persistence.save_graph_store(
self.memory_manager.graph_store
)
# 批量保存更新(如果有变化)
if memories_to_update:
await self.memory_manager.persistence.save_graph_store(
self.memory_manager.graph_store
)
logger.info(f"长期记忆衰减完成: {decayed_count} 条记忆已更新")
return {"decayed_count": decayed_count, "total_memories": len(all_memories)}
@@ -1002,6 +1166,12 @@ class LongTermMemoryManager:
try:
logger.info("正在关闭长期记忆管理器...")
# 清空待处理的embedding队列
await self._flush_pending_embeddings()
# 清空缓存
self._similar_memory_cache.clear()
# 长期记忆的保存由 MemoryManager 负责
self._initialized = False

View File

@@ -21,7 +21,7 @@ import numpy as np
from src.common.logger import get_logger
from src.memory_graph.models import MemoryBlock, PerceptualMemory
from src.memory_graph.utils.embeddings import EmbeddingGenerator
from src.memory_graph.utils.similarity import batch_cosine_similarity_async
from src.memory_graph.utils.similarity import _compute_similarities_sync
logger = get_logger(__name__)
@@ -208,6 +208,7 @@ class PerceptualMemoryManager:
# 生成向量
embedding = await self._generate_embedding(combined_text)
embedding_norm = float(np.linalg.norm(embedding)) if embedding is not None else 0.0
# 创建记忆块
block = MemoryBlock(
@@ -215,7 +216,10 @@ class PerceptualMemoryManager:
messages=messages,
combined_text=combined_text,
embedding=embedding,
metadata={"stream_id": stream_id} # 添加 stream_id 元数据
metadata={
"stream_id": stream_id,
"embedding_norm": embedding_norm,
}, # stream_id 便于调试embedding_norm 用于快速相似度
)
# 添加到记忆堆顶部
@@ -395,6 +399,17 @@ class PerceptualMemoryManager:
logger.error(f"批量生成向量失败: {e}")
return [None] * len(texts)
async def _compute_similarities(
self,
query_embedding: np.ndarray,
block_embeddings: list[np.ndarray],
block_norms: list[float] | None = None,
) -> np.ndarray:
"""在后台线程中向量化计算相似度,避免阻塞事件循环。"""
return await asyncio.to_thread(
_compute_similarities_sync, query_embedding, block_embeddings, block_norms
)
async def recall_blocks(
self,
query_text: str,
@@ -425,7 +440,7 @@ class PerceptualMemoryManager:
logger.warning("查询向量生成失败,返回空列表")
return []
# 批量计算所有块的相似度(使用异步版本
# 批量计算所有块的相似度(使用向量化计算 + 后台线程
blocks_with_embeddings = [
block for block in self.perceptual_memory.blocks
if block.embedding is not None
@@ -434,26 +449,39 @@ class PerceptualMemoryManager:
if not blocks_with_embeddings:
return []
# 批量计算相似度
block_embeddings = [block.embedding for block in blocks_with_embeddings]
similarities = await batch_cosine_similarity_async(query_embedding, block_embeddings)
block_embeddings: list[np.ndarray] = []
block_norms: list[float] = []
# 过滤和排序
scored_blocks = []
for block, similarity in zip(blocks_with_embeddings, similarities):
# 过滤低于阈值的块
if similarity >= similarity_threshold:
scored_blocks.append((block, similarity))
for block in blocks_with_embeddings:
block_embeddings.append(block.embedding)
norm = block.metadata.get("embedding_norm") if block.metadata else None
if norm is None and block.embedding is not None:
norm = float(np.linalg.norm(block.embedding))
block.metadata["embedding_norm"] = norm
block_norms.append(norm if norm is not None else 0.0)
# 按相似度降序排序
scored_blocks.sort(key=lambda x: x[1], reverse=True)
similarities = await self._compute_similarities(query_embedding, block_embeddings, block_norms)
similarities = np.asarray(similarities, dtype=np.float32)
# 取 TopK
top_blocks = scored_blocks[:top_k]
candidate_indices = np.nonzero(similarities >= similarity_threshold)[0]
if candidate_indices.size == 0:
return []
if candidate_indices.size > top_k:
# argpartition 将复杂度降为 O(n)
top_indices = candidate_indices[
np.argpartition(similarities[candidate_indices], -top_k)[-top_k:]
]
else:
top_indices = candidate_indices
# 保持按相似度降序
top_indices = top_indices[np.argsort(similarities[top_indices])[::-1]]
# 更新召回计数和位置
recalled_blocks = []
for block, similarity in top_blocks:
for idx in top_indices[:top_k]:
block = blocks_with_embeddings[int(idx)]
block.increment_recall()
recalled_blocks.append(block)
@@ -663,6 +691,7 @@ class PerceptualMemoryManager:
for block, embedding in zip(blocks_to_process, embeddings):
if embedding is not None:
block.embedding = embedding
block.metadata["embedding_norm"] = float(np.linalg.norm(embedding))
success_count += 1
logger.debug(f"向量重新生成完成(成功: {success_count}/{len(blocks_to_process)}")

View File

@@ -11,10 +11,10 @@ import asyncio
import json
import re
import uuid
import json_repair
from pathlib import Path
from typing import Any
import json_repair
import numpy as np
from src.common.logger import get_logger
@@ -65,6 +65,10 @@ class ShortTermMemoryManager:
self.memories: list[ShortTermMemory] = []
self.embedding_generator: EmbeddingGenerator | None = None
# 优化:快速查找索引
self._memory_id_index: dict[str, ShortTermMemory] = {} # ID 快速查找
self._similarity_cache: dict[str, dict[str, float]] = {} # 相似度缓存 {query_id: {target_id: sim}}
# 状态
self._initialized = False
self._save_lock = asyncio.Lock()
@@ -366,6 +370,7 @@ class ShortTermMemoryManager:
if decision.operation == ShortTermOperation.CREATE_NEW:
# 创建新记忆
self.memories.append(new_memory)
self._memory_id_index[new_memory.id] = new_memory # 更新索引
logger.debug(f"创建新短期记忆: {new_memory.id}")
return new_memory
@@ -375,6 +380,7 @@ class ShortTermMemoryManager:
if not target:
logger.warning(f"目标记忆不存在,改为创建新记忆: {decision.target_memory_id}")
self.memories.append(new_memory)
self._memory_id_index[new_memory.id] = new_memory
return new_memory
# 更新内容
@@ -389,6 +395,9 @@ class ShortTermMemoryManager:
target.embedding = await self._generate_embedding(target.content)
target.update_access()
# 清除此记忆的缓存
self._similarity_cache.pop(target.id, None)
logger.debug(f"合并记忆到: {target.id}")
return target
@@ -398,6 +407,7 @@ class ShortTermMemoryManager:
if not target:
logger.warning(f"目标记忆不存在,改为创建新记忆: {decision.target_memory_id}")
self.memories.append(new_memory)
self._memory_id_index[new_memory.id] = new_memory
return new_memory
# 更新内容
@@ -412,6 +422,9 @@ class ShortTermMemoryManager:
target.source_block_ids.extend(new_memory.source_block_ids)
target.update_access()
# 清除此记忆的缓存
self._similarity_cache.pop(target.id, None)
logger.debug(f"更新记忆: {target.id}")
return target
@@ -423,12 +436,14 @@ class ShortTermMemoryManager:
elif decision.operation == ShortTermOperation.KEEP_SEPARATE:
# 保持独立
self.memories.append(new_memory)
self._memory_id_index[new_memory.id] = new_memory # 更新索引
logger.debug(f"保持独立记忆: {new_memory.id}")
return new_memory
else:
logger.warning(f"未知操作类型: {decision.operation},默认创建新记忆")
self.memories.append(new_memory)
self._memory_id_index[new_memory.id] = new_memory
return new_memory
except Exception as e:
@@ -439,7 +454,7 @@ class ShortTermMemoryManager:
self, memory: ShortTermMemory, top_k: int = 5
) -> list[tuple[ShortTermMemory, float]]:
"""
查找与给定记忆相似的现有记忆
查找与给定记忆相似的现有记忆(优化版:并发计算 + 缓存)
Args:
memory: 目标记忆
@@ -452,13 +467,35 @@ class ShortTermMemoryManager:
return []
try:
scored = []
# 检查缓存
if memory.id in self._similarity_cache:
cached = self._similarity_cache[memory.id]
scored = [(self._memory_id_index[mid], sim)
for mid, sim in cached.items()
if mid in self._memory_id_index]
scored.sort(key=lambda x: x[1], reverse=True)
return scored[:top_k]
# 并发计算所有相似度
tasks = []
for existing_mem in self.memories:
if existing_mem.embedding is None:
continue
tasks.append(cosine_similarity_async(memory.embedding, existing_mem.embedding))
similarity = await cosine_similarity_async(memory.embedding, existing_mem.embedding)
if not tasks:
return []
similarities = await asyncio.gather(*tasks)
# 构建结果并缓存
scored = []
cache_entry = {}
for existing_mem, similarity in zip([m for m in self.memories if m.embedding is not None], similarities):
scored.append((existing_mem, similarity))
cache_entry[existing_mem.id] = similarity
self._similarity_cache[memory.id] = cache_entry
# 按相似度降序排序
scored.sort(key=lambda x: x[1], reverse=True)
@@ -470,15 +507,12 @@ class ShortTermMemoryManager:
return []
def _find_memory_by_id(self, memory_id: str | None) -> ShortTermMemory | None:
"""根据ID查找记忆"""
"""根据ID查找记忆优化版O(1) 哈希表查找)"""
if not memory_id:
return None
for mem in self.memories:
if mem.id == memory_id:
return mem
return None
# 使用索引进行 O(1) 查找
return self._memory_id_index.get(memory_id)
async def _generate_embedding(self, text: str) -> np.ndarray | None:
"""生成文本向量"""
@@ -542,7 +576,7 @@ class ShortTermMemoryManager:
self, query_text: str, top_k: int = 5, similarity_threshold: float = 0.5
) -> list[ShortTermMemory]:
"""
检索相关的短期记忆
检索相关的短期记忆(优化版:并发计算相似度)
Args:
query_text: 查询文本
@@ -561,13 +595,23 @@ class ShortTermMemoryManager:
if query_embedding is None or len(query_embedding) == 0:
return []
# 计算相似度
scored = []
# 并发计算所有相似度
tasks = []
valid_memories = []
for memory in self.memories:
if memory.embedding is None:
continue
valid_memories.append(memory)
tasks.append(cosine_similarity_async(query_embedding, memory.embedding))
similarity = await cosine_similarity_async(query_embedding, memory.embedding)
if not tasks:
return []
similarities = await asyncio.gather(*tasks)
# 构建结果
scored = []
for memory, similarity in zip(valid_memories, similarities):
if similarity >= similarity_threshold:
scored.append((memory, similarity))
@@ -575,7 +619,7 @@ class ShortTermMemoryManager:
scored.sort(key=lambda x: x[1], reverse=True)
results = [mem for mem, _ in scored[:top_k]]
# 更新访问记录
# 批量更新访问记录
for mem in results:
mem.update_access()
@@ -588,19 +632,21 @@ class ShortTermMemoryManager:
def get_memories_for_transfer(self) -> list[ShortTermMemory]:
"""
获取需要转移到长期记忆的记忆
获取需要转移到长期记忆的记忆(优化版:单次遍历)
逻辑:
1. 优先选择重要性 >= 阈值的记忆
2. 如果剩余记忆数量仍超过 max_memories直接清理最早的低重要性记忆直到低于上限
"""
# 1. 正常筛选:重要性达标的记忆
candidates = [mem for mem in self.memories if mem.importance >= self.transfer_importance_threshold]
candidate_ids = {mem.id for mem in candidates}
# 单次遍历:同时分类高重要性和低重要性记忆
candidates = []
low_importance_memories = []
# 2. 检查低重要性记忆是否积压
# 剩余的都是低重要性记忆
low_importance_memories = [mem for mem in self.memories if mem.id not in candidate_ids]
for mem in self.memories:
if mem.importance >= self.transfer_importance_threshold:
candidates.append(mem)
else:
low_importance_memories.append(mem)
# 如果低重要性记忆数量超过了上限(说明积压严重)
# 我们需要清理掉一部分,而不是转移它们
@@ -614,9 +660,12 @@ class ShortTermMemoryManager:
low_importance_memories.sort(key=lambda x: x.created_at)
to_remove = low_importance_memories[:num_to_remove]
for mem in to_remove:
if mem in self.memories:
self.memories.remove(mem)
# 批量删除并更新索引
remove_ids = {mem.id for mem in to_remove}
self.memories = [mem for mem in self.memories if mem.id not in remove_ids]
for mem_id in remove_ids:
del self._memory_id_index[mem_id]
self._similarity_cache.pop(mem_id, None)
logger.info(
f"短期记忆清理: 移除了 {len(to_remove)} 条低重要性记忆 "
@@ -636,7 +685,14 @@ class ShortTermMemoryManager:
memory_ids: 已转移的记忆ID列表
"""
try:
self.memories = [mem for mem in self.memories if mem.id not in memory_ids]
remove_ids = set(memory_ids)
self.memories = [mem for mem in self.memories if mem.id not in remove_ids]
# 更新索引
for mem_id in remove_ids:
self._memory_id_index.pop(mem_id, None)
self._similarity_cache.pop(mem_id, None)
logger.info(f"清除 {len(memory_ids)} 条已转移的短期记忆")
# 异步保存
@@ -696,7 +752,11 @@ class ShortTermMemoryManager:
data = orjson.loads(load_path.read_bytes())
self.memories = [ShortTermMemory.from_dict(m) for m in data.get("memories", [])]
# 重新生成向量
# 重建索引
for mem in self.memories:
self._memory_id_index[mem.id] = mem
# 批量重新生成向量
await self._reload_embeddings()
logger.info(f"短期记忆已从 {load_path} 加载 ({len(self.memories)} 条)")
@@ -705,7 +765,7 @@ class ShortTermMemoryManager:
logger.error(f"加载短期记忆失败: {e}")
async def _reload_embeddings(self) -> None:
"""重新生成记忆的向量"""
"""重新生成记忆的向量(优化版:并发处理)"""
logger.info("重新生成短期记忆向量...")
memories_to_process = []
@@ -722,6 +782,7 @@ class ShortTermMemoryManager:
logger.info(f"开始批量生成 {len(memories_to_process)} 条短期记忆的向量...")
# 使用 gather 并发生成向量
embeddings = await self._generate_embeddings_batch(texts_to_process)
success_count = 0

View File

@@ -226,28 +226,23 @@ class UnifiedMemoryManager:
"judge_decision": None,
}
# 步骤1: 检索感知记忆和短期记忆
perceptual_blocks_task = asyncio.create_task(self.perceptual_manager.recall_blocks(query_text))
short_term_memories_task = asyncio.create_task(self.short_term_manager.search_memories(query_text))
# 步骤1: 并行检索感知记忆和短期记忆(优化:消除任务创建开销)
perceptual_blocks, short_term_memories = await asyncio.gather(
perceptual_blocks_task,
short_term_memories_task,
self.perceptual_manager.recall_blocks(query_text),
self.short_term_manager.search_memories(query_text),
)
# 步骤1.5: 检查需要转移的感知块,推迟到后台处理
blocks_to_transfer = [
block
for block in perceptual_blocks
if block.metadata.get("needs_transfer", False)
]
# 步骤1.5: 检查需要转移的感知块,推迟到后台处理(优化:单遍扫描与转移)
blocks_to_transfer = []
for block in perceptual_blocks:
if block.metadata.get("needs_transfer", False):
block.metadata["needs_transfer"] = False # 立即标记,避免重复
blocks_to_transfer.append(block)
if blocks_to_transfer:
logger.debug(
f"检测到 {len(blocks_to_transfer)} 个感知记忆需要转移,已交由后台后处理任务执行"
)
for block in blocks_to_transfer:
block.metadata["needs_transfer"] = False
self._schedule_perceptual_block_transfer(blocks_to_transfer)
result["perceptual_blocks"] = perceptual_blocks
@@ -412,12 +407,13 @@ class UnifiedMemoryManager:
)
def _schedule_perceptual_block_transfer(self, blocks: list[MemoryBlock]) -> None:
"""将感知记忆块转移到短期记忆,后台执行以避免阻塞"""
"""将感知记忆块转移到短期记忆,后台执行以避免阻塞(优化:避免不必要的列表复制)"""
if not blocks:
return
# 优化:直接传递 blocks 而不再 list(blocks)
task = asyncio.create_task(
self._transfer_blocks_to_short_term(list(blocks))
self._transfer_blocks_to_short_term(blocks)
)
self._attach_background_task_callback(task, "perceptual->short-term transfer")
@@ -440,7 +436,7 @@ class UnifiedMemoryManager:
self._transfer_wakeup_event.set()
def _calculate_auto_sleep_interval(self) -> float:
"""根据短期内存压力计算自适应等待间隔"""
"""根据短期内存压力计算自适应等待间隔(优化:查表法替代链式比较)"""
base_interval = self._auto_transfer_interval
if not getattr(self, "short_term_manager", None):
return base_interval
@@ -448,54 +444,63 @@ class UnifiedMemoryManager:
max_memories = max(1, getattr(self.short_term_manager, "max_memories", 1))
occupancy = len(self.short_term_manager.memories) / max_memories
# 优化:更激进的自适应间隔,加快高负载下的转移
if occupancy >= 0.8:
return max(2.0, base_interval * 0.1)
if occupancy >= 0.5:
return max(5.0, base_interval * 0.2)
if occupancy >= 0.3:
return max(10.0, base_interval * 0.4)
if occupancy >= 0.1:
return max(15.0, base_interval * 0.6)
# 优化:使用查表法替代链式 if 判断O(1) vs O(n)
occupancy_thresholds = [
(0.8, 2.0, 0.1),
(0.5, 5.0, 0.2),
(0.3, 10.0, 0.4),
(0.1, 15.0, 0.6),
]
for threshold, min_val, factor in occupancy_thresholds:
if occupancy >= threshold:
return max(min_val, base_interval * factor)
return base_interval
async def _transfer_blocks_to_short_term(self, blocks: list[MemoryBlock]) -> None:
"""实际转换逻辑在后台执行"""
"""实际转换逻辑在后台执行(优化:并行处理多个块,批量触发唤醒)"""
logger.debug(f"正在后台处理 {len(blocks)} 个感知记忆块")
for block in blocks:
# 优化:使用 asyncio.gather 并行处理转移
async def _transfer_single(block: MemoryBlock) -> tuple[MemoryBlock, bool]:
try:
stm = await self.short_term_manager.add_from_block(block)
if not stm:
continue
return block, False
await self.perceptual_manager.remove_block(block.id)
self._trigger_transfer_wakeup()
logger.debug(f"✓ 记忆块 {block.id} 已被转移到短期记忆 {stm.id}")
return block, True
except Exception as exc:
logger.error(f"后台转移失败,记忆块 {block.id}: {exc}")
return block, False
# 并行处理所有块
results = await asyncio.gather(*[_transfer_single(block) for block in blocks], return_exceptions=True)
# 统计成功的转移
success_count = sum(1 for result in results if isinstance(result, tuple) and result[1])
if success_count > 0:
self._trigger_transfer_wakeup()
logger.debug(f"✅ 后台转移: 成功 {success_count}/{len(blocks)} 个块")
def _build_manual_multi_queries(self, queries: list[str]) -> list[dict[str, float]]:
"""去重裁判查询并附加权重以进行多查询搜索"""
deduplicated: list[str] = []
"""去重裁判查询并附加权重以进行多查询搜索(优化:使用字典推导式)"""
# 优化:单遍去重(避免多次 strip 和 in 检查)
seen = set()
decay = 0.15
manual_queries: list[dict[str, Any]] = []
for raw in queries:
text = (raw or "").strip()
if not text or text in seen:
continue
deduplicated.append(text)
seen.add(text)
if text and text not in seen:
seen.add(text)
weight = max(0.3, 1.0 - len(manual_queries) * decay)
manual_queries.append({"text": text, "weight": round(weight, 2)})
if len(deduplicated) <= 1:
return []
manual_queries: list[dict[str, Any]] = []
decay = 0.15
for idx, text in enumerate(deduplicated):
weight = max(0.3, 1.0 - idx * decay)
manual_queries.append({"text": text, "weight": round(weight, 2)})
return manual_queries
# 过滤单条或空列表
return manual_queries if len(manual_queries) > 1 else []
async def _retrieve_long_term_memories(
self,
@@ -503,36 +508,41 @@ class UnifiedMemoryManager:
queries: list[str],
recent_chat_history: str = "",
) -> list[Any]:
"""可一次性运行多查询搜索的集中式长期检索条目"""
"""可一次性运行多查询搜索的集中式长期检索条目(优化:减少中间对象创建)"""
manual_queries = self._build_manual_multi_queries(queries)
context: dict[str, Any] = {}
if recent_chat_history:
context["chat_history"] = recent_chat_history
if manual_queries:
context["manual_multi_queries"] = manual_queries
# 优化:仅在必要时创建 context 字典
search_params: dict[str, Any] = {
"query": base_query,
"top_k": self._config["long_term"]["search_top_k"],
"use_multi_query": bool(manual_queries),
}
if context:
if recent_chat_history or manual_queries:
context: dict[str, Any] = {}
if recent_chat_history:
context["chat_history"] = recent_chat_history
if manual_queries:
context["manual_multi_queries"] = manual_queries
search_params["context"] = context
memories = await self.memory_manager.search_memories(**search_params)
unique_memories = self._deduplicate_memories(memories)
len(manual_queries) if manual_queries else 1
return unique_memories
return self._deduplicate_memories(memories)
def _deduplicate_memories(self, memories: list[Any]) -> list[Any]:
"""通过 memory.id 去重"""
"""通过 memory.id 去重(优化:支持 dict 和 object单遍处理"""
seen_ids: set[str] = set()
unique_memories: list[Any] = []
for mem in memories:
mem_id = getattr(mem, "id", None)
# 支持两种 ID 访问方式
mem_id = None
if isinstance(mem, dict):
mem_id = mem.get("id")
else:
mem_id = getattr(mem, "id", None)
# 检查去重
if mem_id and mem_id in seen_ids:
continue
@@ -558,7 +568,7 @@ class UnifiedMemoryManager:
logger.debug("自动转移任务已启动")
async def _auto_transfer_loop(self) -> None:
"""自动转移循环(批量缓存模式)"""
"""自动转移循环(批量缓存模式,优化:更高效的缓存管理"""
transfer_cache: list[ShortTermMemory] = []
cached_ids: set[str] = set()
cache_size_threshold = max(1, self._config["long_term"].get("batch_size", 1))
@@ -582,28 +592,29 @@ class UnifiedMemoryManager:
memories_to_transfer = self.short_term_manager.get_memories_for_transfer()
if memories_to_transfer:
added = 0
# 优化:批量构建缓存而不是逐条添加
new_memories = []
for memory in memories_to_transfer:
mem_id = getattr(memory, "id", None)
if mem_id and mem_id in cached_ids:
continue
transfer_cache.append(memory)
if mem_id:
cached_ids.add(mem_id)
added += 1
if added:
if not (mem_id and mem_id in cached_ids):
new_memories.append(memory)
if mem_id:
cached_ids.add(mem_id)
if new_memories:
transfer_cache.extend(new_memories)
logger.debug(
f"自动转移缓存: 新增{added}条, 当前缓存{len(transfer_cache)}/{cache_size_threshold}"
f"自动转移缓存: 新增{len(new_memories)}条, 当前缓存{len(transfer_cache)}/{cache_size_threshold}"
)
max_memories = max(1, getattr(self.short_term_manager, "max_memories", 1))
occupancy_ratio = len(self.short_term_manager.memories) / max_memories
time_since_last_transfer = time.monotonic() - last_transfer_time
# 优化:优先级判断重构(早期 return
should_transfer = (
len(transfer_cache) >= cache_size_threshold
or occupancy_ratio >= 0.5 # 优化:降低触发阈值 (原为 0.85)
or occupancy_ratio >= 0.5
or (transfer_cache and time_since_last_transfer >= self._max_transfer_delay)
or len(self.short_term_manager.memories) >= self.short_term_manager.max_memories
)
@@ -613,13 +624,16 @@ class UnifiedMemoryManager:
f"准备批量转移: {len(transfer_cache)}条短期记忆到长期记忆 (占用率 {occupancy_ratio:.0%})"
)
result = await self.long_term_manager.transfer_from_short_term(list(transfer_cache))
# 优化:直接传递列表而不再复制
result = await self.long_term_manager.transfer_from_short_term(transfer_cache)
if result.get("transferred_memory_ids"):
transferred_ids = set(result["transferred_memory_ids"])
await self.short_term_manager.clear_transferred_memories(
result["transferred_memory_ids"]
)
transferred_ids = set(result["transferred_memory_ids"])
# 优化:使用生成器表达式保留未转移的记忆
transfer_cache = [
m
for m in transfer_cache

View File

@@ -5,12 +5,69 @@
"""
import asyncio
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
def _compute_similarities_sync(
query_embedding: "np.ndarray",
block_embeddings: "np.ndarray | list[np.ndarray] | list[Any]",
block_norms: "np.ndarray | list[float] | None" = None,
) -> "np.ndarray":
"""
计算 query 向量与一组向量的余弦相似度(同步/向量化实现)。
- 返回 float32 ndarray
- 输出范围裁剪到 [0.0, 1.0]
- 支持可选的 block_norms 以减少重复 norm 计算
"""
import numpy as np
if block_embeddings is None:
return np.zeros(0, dtype=np.float32)
query = np.asarray(query_embedding, dtype=np.float32)
if isinstance(block_embeddings, (list, tuple)) and len(block_embeddings) == 0:
return np.zeros(0, dtype=np.float32)
blocks = np.asarray(block_embeddings, dtype=np.float32)
if blocks.dtype == object:
blocks = np.stack(
[np.asarray(vec, dtype=np.float32) for vec in block_embeddings],
axis=0,
)
if blocks.size == 0:
return np.zeros(0, dtype=np.float32)
if blocks.ndim == 1:
blocks = blocks.reshape(1, -1)
query_norm = float(np.linalg.norm(query))
if query_norm == 0.0:
return np.zeros(blocks.shape[0], dtype=np.float32)
if block_norms is None:
block_norms_array = np.linalg.norm(blocks, axis=1).astype(np.float32, copy=False)
else:
block_norms_array = np.asarray(block_norms, dtype=np.float32)
if block_norms_array.shape[0] != blocks.shape[0]:
block_norms_array = np.linalg.norm(blocks, axis=1).astype(np.float32, copy=False)
dot_products = blocks @ query
denom = block_norms_array * np.float32(query_norm)
similarities = np.zeros(blocks.shape[0], dtype=np.float32)
valid_mask = denom > 0
if valid_mask.any():
np.divide(dot_products, denom, out=similarities, where=valid_mask)
return np.clip(similarities, 0.0, 1.0)
def cosine_similarity(vec1: "np.ndarray", vec2: "np.ndarray") -> float:
"""
计算两个向量的余弦相似度
@@ -25,23 +82,16 @@ def cosine_similarity(vec1: "np.ndarray", vec2: "np.ndarray") -> float:
try:
import numpy as np
# 确保是numpy数组
if not isinstance(vec1, np.ndarray):
vec1 = np.array(vec1)
if not isinstance(vec2, np.ndarray):
vec2 = np.array(vec2)
vec1 = np.asarray(vec1, dtype=np.float32)
vec2 = np.asarray(vec2, dtype=np.float32)
# 归一化
vec1_norm = np.linalg.norm(vec1)
vec2_norm = np.linalg.norm(vec2)
vec1_norm = float(np.linalg.norm(vec1))
vec2_norm = float(np.linalg.norm(vec2))
if vec1_norm == 0 or vec2_norm == 0:
if vec1_norm == 0.0 or vec2_norm == 0.0:
return 0.0
# 余弦相似度
similarity = np.dot(vec1, vec2) / (vec1_norm * vec2_norm)
# 确保在 [0, 1] 范围内(处理浮点误差)
similarity = float(np.dot(vec1, vec2) / (vec1_norm * vec2_norm))
return float(np.clip(similarity, 0.0, 1.0))
except Exception:
@@ -74,43 +124,10 @@ def batch_cosine_similarity(vec1: "np.ndarray", vec_list: list["np.ndarray"]) ->
相似度列表
"""
try:
import numpy as np
if not vec_list:
return []
# 确保是numpy数组
if not isinstance(vec1, np.ndarray):
vec1 = np.array(vec1)
# 批量转换为numpy数组
vec_list = [np.array(vec) for vec in vec_list]
# 计算归一化
vec1_norm = np.linalg.norm(vec1)
if vec1_norm == 0:
return [0.0] * len(vec_list)
# 计算所有向量的归一化
vec_norms = np.array([np.linalg.norm(vec) for vec in vec_list])
# 避免除以0
valid_mask = vec_norms != 0
similarities = np.zeros(len(vec_list))
if np.any(valid_mask):
# 批量计算点积
valid_vecs = np.array(vec_list)[valid_mask]
dot_products = np.dot(valid_vecs, vec1)
# 计算相似度
valid_norms = vec_norms[valid_mask]
valid_similarities = dot_products / (vec1_norm * valid_norms)
# 确保在 [0, 1] 范围内
valid_similarities = np.clip(valid_similarities, 0.0, 1.0)
# 填充结果
similarities[valid_mask] = valid_similarities
return similarities.tolist()
return _compute_similarities_sync(vec1, vec_list).tolist()
except Exception:
return [0.0] * len(vec_list)
@@ -134,5 +151,5 @@ __all__ = [
"batch_cosine_similarity",
"batch_cosine_similarity_async",
"cosine_similarity",
"cosine_similarity_async"
"cosine_similarity_async",
]

View File

@@ -241,7 +241,6 @@ class PersonInfoManager:
return person_id
@staticmethod
@staticmethod
async def first_knowing_some_one(platform: str, user_id: str, user_nickname: str, user_cardname: str):
"""判断是否认识某人"""
@@ -697,6 +696,18 @@ class PersonInfoManager:
try:
value = getattr(record, field_name)
if value is not None:
# 对 JSON 序列化字段进行反序列化
if field_name in JSON_SERIALIZED_FIELDS:
try:
# 确保 value 是字符串类型
if isinstance(value, str):
return orjson.loads(value)
else:
# 如果不是字符串,可能已经是解析后的数据,直接返回
return value
except Exception as e:
logger.warning(f"反序列化字段 {field_name} 失败: {e}, value={value}, 使用默认值")
return copy.deepcopy(person_info_default.get(field_name))
return value
else:
return copy.deepcopy(person_info_default.get(field_name))
@@ -737,7 +748,20 @@ class PersonInfoManager:
try:
value = getattr(record, field_name)
if value is not None:
result[field_name] = value
# 对 JSON 序列化字段进行反序列化
if field_name in JSON_SERIALIZED_FIELDS:
try:
# 确保 value 是字符串类型
if isinstance(value, str):
result[field_name] = orjson.loads(value)
else:
# 如果不是字符串,可能已经是解析后的数据,直接使用
result[field_name] = value
except Exception as e:
logger.warning(f"反序列化字段 {field_name} 失败: {e}, value={value}, 使用默认值")
result[field_name] = copy.deepcopy(person_info_default.get(field_name))
else:
result[field_name] = value
else:
result[field_name] = copy.deepcopy(person_info_default.get(field_name))
except Exception as e:

View File

@@ -182,10 +182,10 @@ class RelationshipFetcher:
kw_lower = kw.lower()
# 排除聊天互动、情感需求等不是真实兴趣的词汇
if not any(excluded in kw_lower for excluded in [
'亲亲', '撒娇', '被宠', '被夸', '聊天', '互动', '关心', '专注', '需要'
"亲亲", "撒娇", "被宠", "被夸", "聊天", "互动", "关心", "专注", "需要"
]):
filtered_keywords.append(kw)
if filtered_keywords:
keywords_str = "".join(filtered_keywords)
relation_parts.append(f"\n{person_name}的兴趣爱好:{keywords_str}")

View File

@@ -50,7 +50,6 @@ from .base import (
ToolParamType,
create_plus_command_adapter,
)
from .utils.dependency_config import configure_dependency_settings, get_dependency_config
# 导入依赖管理模块
from .utils.dependency_manager import configure_dependency_manager, get_dependency_manager

View File

@@ -12,6 +12,7 @@ from src.plugin_system.apis import (
config_api,
database_api,
emoji_api,
expression_api,
generator_api,
llm_api,
message_api,
@@ -38,6 +39,7 @@ __all__ = [
"context_api",
"database_api",
"emoji_api",
"expression_api",
"generator_api",
"get_logger",
"llm_api",

File diff suppressed because it is too large Load Diff

View File

@@ -116,8 +116,24 @@ async def get_person_points(person_id: str, limit: int = 5) -> list[tuple]:
if not points:
return []
# 验证 points 是列表类型
if not isinstance(points, list):
logger.warning(f"[PersonAPI] 用户记忆点数据类型错误: person_id={person_id}, type={type(points)}, value={points}")
return []
# 过滤掉格式不正确的记忆点 (应该是包含至少3个元素的元组或列表)
valid_points = []
for point in points:
if isinstance(point, list | tuple) and len(point) >= 3:
valid_points.append(point)
else:
logger.warning(f"[PersonAPI] 跳过格式错误的记忆点: person_id={person_id}, point={point}")
if not valid_points:
return []
# 按权重和时间排序,返回最重要的几个点
sorted_points = sorted(points, key=lambda x: (x[1], x[2]), reverse=True)
sorted_points = sorted(valid_points, key=lambda x: (x[1], x[2]), reverse=True)
return sorted_points[:limit]
except Exception as e:
logger.error(f"[PersonAPI] 获取用户记忆点失败: person_id={person_id}, error={e}")

View File

@@ -1,83 +0,0 @@
from src.common.logger import get_logger
logger = get_logger("dependency_config")
class DependencyConfig:
"""依赖管理配置类 - 现在使用全局配置"""
def __init__(self, global_config=None):
self._global_config = global_config
def _get_config(self):
"""获取全局配置对象"""
if self._global_config is not None:
return self._global_config
# 延迟导入以避免循环依赖
try:
from src.config.config import global_config
return global_config
except ImportError:
logger.warning("无法导入全局配置,使用默认设置")
return None
@property
def auto_install(self) -> bool:
"""是否启用自动安装"""
config = self._get_config()
if config and hasattr(config, "dependency_management"):
return config.dependency_management.auto_install
return True
@property
def use_mirror(self) -> bool:
"""是否使用PyPI镜像源"""
config = self._get_config()
if config and hasattr(config, "dependency_management"):
return config.dependency_management.use_mirror
return False
@property
def mirror_url(self) -> str:
"""PyPI镜像源URL"""
config = self._get_config()
if config and hasattr(config, "dependency_management"):
return config.dependency_management.mirror_url
return ""
@property
def install_timeout(self) -> int:
"""安装超时时间(秒)"""
config = self._get_config()
if config and hasattr(config, "dependency_management"):
return config.dependency_management.auto_install_timeout
return 300
@property
def prompt_before_install(self) -> bool:
"""安装前是否提示用户"""
config = self._get_config()
if config and hasattr(config, "dependency_management"):
return config.dependency_management.prompt_before_install
return False
# 全局配置实例
_global_dependency_config: DependencyConfig | None = None
def get_dependency_config() -> DependencyConfig:
"""获取全局依赖配置实例"""
global _global_dependency_config
if _global_dependency_config is None:
_global_dependency_config = DependencyConfig()
return _global_dependency_config
def configure_dependency_settings(**kwargs) -> None:
"""配置依赖管理设置 - 注意这个函数现在仅用于兼容性实际配置需要修改bot_config.toml"""
logger.info("依赖管理设置现在通过 bot_config.toml 的 [dependency_management] 节进行配置")
logger.info(f"请求的配置更改: {kwargs}")
logger.warning("configure_dependency_settings 函数仅用于兼容性,配置更改不会持久化")

View File

@@ -1,7 +1,10 @@
import importlib
import importlib.util
import os
import shutil
import subprocess
import sys
from pathlib import Path
from typing import Any
from packaging import version
@@ -14,8 +17,89 @@ from src.plugin_system.utils.dependency_alias import INSTALL_NAME_TO_IMPORT_NAME
logger = get_logger("dependency_manager")
class VenvDetector:
"""虚拟环境检测器"""
@staticmethod
def detect_venv_type() -> str | None:
"""
检测虚拟环境类型
返回: 'uv' | 'venv' | 'conda' | None
"""
# 检查是否在虚拟环境中
in_venv = hasattr(sys, "real_prefix") or (
hasattr(sys, "base_prefix") and sys.base_prefix != sys.prefix
)
if not in_venv:
logger.warning("当前不在虚拟环境中")
return None
venv_path = Path(sys.prefix)
# 1. 检测 uv (优先检查 pyvenv.cfg 文件)
pyvenv_cfg = venv_path / "pyvenv.cfg"
if pyvenv_cfg.exists():
try:
with open(pyvenv_cfg, encoding="utf-8") as f:
content = f.read()
if "uv = " in content:
logger.info("检测到 uv 虚拟环境")
return "uv"
except Exception as e:
logger.warning(f"读取 pyvenv.cfg 失败: {e}")
# 2. 检测 conda (检查环境变量和路径)
if "CONDA_DEFAULT_ENV" in os.environ or "CONDA_PREFIX" in os.environ:
logger.info("检测到 conda 虚拟环境")
return "conda"
# 通过路径特征检测 conda
if "conda" in str(venv_path).lower() or "anaconda" in str(venv_path).lower():
logger.info(f"检测到 conda 虚拟环境 (路径: {venv_path})")
return "conda"
# 3. 默认为 venv (标准 Python 虚拟环境)
logger.info(f"检测到标准 venv 虚拟环境 (路径: {venv_path})")
return "venv"
@staticmethod
def get_install_command(venv_type: str | None) -> list[str]:
"""
根据虚拟环境类型获取安装命令
Args:
venv_type: 虚拟环境类型 ('uv' | 'venv' | 'conda' | None)
Returns:
安装命令列表 (不包括包名)
"""
if venv_type == "uv":
# 检查 uv 是否可用
uv_path = shutil.which("uv")
if uv_path:
logger.debug("使用 uv pip 安装")
return [uv_path, "pip", "install"]
else:
logger.warning("未找到 uv 命令,回退到标准 pip")
return [sys.executable, "-m", "pip", "install"]
elif venv_type == "conda":
# 获取当前 conda 环境名
conda_env = os.environ.get("CONDA_DEFAULT_ENV")
if conda_env:
logger.debug(f"使用 conda 在环境 {conda_env} 中安装")
return ["conda", "install", "-n", conda_env, "-y"]
else:
logger.warning("未找到 conda 环境名,回退到 pip")
return [sys.executable, "-m", "pip", "install"]
else:
# 默认使用 pip
logger.debug("使用标准 pip 安装")
return [sys.executable, "-m", "pip", "install"]
class DependencyManager:
"""Python包依赖管理器
"""Python包依赖管理器 (整合配置和虚拟环境检测)
负责检查和自动安装插件的Python包依赖
"""
@@ -30,15 +114,15 @@ class DependencyManager:
"""
# 延迟导入配置以避免循环依赖
try:
from src.plugin_system.utils.dependency_config import get_dependency_config
config = get_dependency_config()
from src.config.config import global_config
dep_config = global_config.dependency_management
# 优先使用配置文件中的设置,参数作为覆盖
self.auto_install = config.auto_install if auto_install is True else auto_install
self.use_mirror = config.use_mirror if use_mirror is False else use_mirror
self.mirror_url = config.mirror_url if mirror_url is None else mirror_url
self.install_timeout = config.install_timeout
self.auto_install = dep_config.auto_install if auto_install is True else auto_install
self.use_mirror = dep_config.use_mirror if use_mirror is False else use_mirror
self.mirror_url = dep_config.mirror_url if mirror_url is None else mirror_url
self.install_timeout = dep_config.auto_install_timeout
self.prompt_before_install = dep_config.prompt_before_install
except Exception as e:
logger.warning(f"无法加载依赖配置,使用默认设置: {e}")
@@ -46,6 +130,15 @@ class DependencyManager:
self.use_mirror = use_mirror or False
self.mirror_url = mirror_url or ""
self.install_timeout = 300
self.prompt_before_install = False
# 检测虚拟环境类型
self.venv_type = VenvDetector.detect_venv_type()
if self.venv_type:
logger.info(f"依赖管理器初始化完成,虚拟环境类型: {self.venv_type}")
else:
logger.warning("依赖管理器初始化完成,但未检测到虚拟环境")
# ========== 依赖检查和安装核心方法 ==========
def check_dependencies(self, dependencies: Any, plugin_name: str = "") -> tuple[bool, list[str], list[str]]:
"""检查依赖包是否满足要求
@@ -250,23 +343,36 @@ class DependencyManager:
return False
def _install_single_package(self, package: str, plugin_name: str = "") -> bool:
"""安装单个包"""
"""安装单个包 (支持虚拟环境自动检测)"""
try:
cmd = [sys.executable, "-m", "pip", "install", package]
log_prefix = f"[Plugin:{plugin_name}] " if plugin_name else ""
# 添加镜像源设置
if self.use_mirror and self.mirror_url:
# 根据虚拟环境类型构建安装命令
cmd = VenvDetector.get_install_command(self.venv_type)
cmd.append(package)
# 添加镜像源设置 (仅对 pip/uv 有效)
if self.use_mirror and self.mirror_url and "pip" in cmd:
cmd.extend(["-i", self.mirror_url])
logger.debug(f"[Plugin:{plugin_name}] 使用PyPI镜像源: {self.mirror_url}")
logger.debug(f"{log_prefix}使用PyPI镜像源: {self.mirror_url}")
logger.debug(f"[Plugin:{plugin_name}] 执行安装命令: {' '.join(cmd)}")
logger.info(f"{log_prefix}执行安装命令: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True, timeout=self.install_timeout, check=False)
result = subprocess.run(
cmd,
capture_output=True,
text=True,
encoding="utf-8",
errors="ignore",
timeout=self.install_timeout,
check=False,
)
if result.returncode == 0:
logger.info(f"{log_prefix}安装成功: {package}")
return True
else:
logger.error(f"[Plugin:{plugin_name}] pip安装失败: {result.stderr}")
logger.error(f"{log_prefix}安装失败: {result.stderr}")
return False
except subprocess.TimeoutExpired:

View File

@@ -11,7 +11,6 @@ from inspect import iscoroutinefunction
from src.chat.message_receive.chat_stream import ChatStream
from src.plugin_system.apis.logging_api import get_logger
from src.plugin_system.apis.permission_api import permission_api
from src.plugin_system.apis.send_api import text_to_stream
logger = get_logger(__name__)

View File

@@ -53,7 +53,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
self.use_semantic_scoring = True # 必须启用
self._semantic_initialized = False # 防止重复初始化
self.model_manager = None
# 评分阈值
self.reply_threshold = affinity_config.reply_action_interest_threshold # 回复动作兴趣阈值
self.mention_threshold = affinity_config.mention_bot_adjustment_threshold # 提及bot后的调整阈值
@@ -286,15 +286,15 @@ class AffinityInterestCalculator(BaseInterestCalculator):
if self._semantic_initialized:
logger.debug("[语义评分] 评分器已初始化,跳过")
return
if not self.use_semantic_scoring:
logger.debug("[语义评分] 未启用语义兴趣度评分")
return
# 防止并发初始化(使用锁)
if not hasattr(self, '_init_lock'):
if not hasattr(self, "_init_lock"):
self._init_lock = asyncio.Lock()
async with self._init_lock:
# 双重检查
if self._semantic_initialized:
@@ -315,15 +315,15 @@ class AffinityInterestCalculator(BaseInterestCalculator):
if self.model_manager is None:
self.model_manager = ModelManager(model_dir)
logger.debug("[语义评分] 模型管理器已创建")
# 获取人设信息
persona_info = self._get_current_persona_info()
# 先检查是否已有可用模型
from src.chat.semantic_interest.auto_trainer import get_auto_trainer
auto_trainer = get_auto_trainer()
existing_model = auto_trainer.get_model_for_persona(persona_info)
# 加载模型(自动选择合适的版本,使用单例 + FastScorer
try:
if existing_model and existing_model.exists():
@@ -336,14 +336,14 @@ class AffinityInterestCalculator(BaseInterestCalculator):
version="auto", # 自动选择或训练
persona_info=persona_info
)
self.semantic_scorer = scorer
logger.info("[语义评分] 语义兴趣度评分器初始化成功FastScorer优化 + 单例)")
# 设置初始化标志
self._semantic_initialized = True
# 启动自动训练任务每24小时检查一次- 只在没有模型时或明确需要时启动
if not existing_model or not existing_model.exists():
await self.model_manager.start_auto_training(
@@ -352,9 +352,9 @@ class AffinityInterestCalculator(BaseInterestCalculator):
)
else:
logger.debug("[语义评分] 已有模型,跳过自动训练启动")
except FileNotFoundError:
logger.warning(f"[语义评分] 未找到训练模型,将自动训练...")
logger.warning("[语义评分] 未找到训练模型,将自动训练...")
# 触发首次训练
trained, model_path = await auto_trainer.auto_train_if_needed(
persona_info=persona_info,
@@ -447,7 +447,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
try:
score = await self.semantic_scorer.score_async(content, timeout=2.0)
logger.debug(f"[语义评分] 内容: '{content[:50]}...' -> 分数: {score:.3f}")
return score
@@ -462,14 +462,14 @@ class AffinityInterestCalculator(BaseInterestCalculator):
return
logger.info("[语义评分] 开始重新加载模型...")
# 检查人设是否变化
if hasattr(self, 'model_manager') and self.model_manager:
if hasattr(self, "model_manager") and self.model_manager:
persona_info = self._get_current_persona_info()
reloaded = await self.model_manager.check_and_reload_for_persona(persona_info)
if reloaded:
self.semantic_scorer = self.model_manager.get_scorer()
logger.info("[语义评分] 模型重载完成(人设已更新)")
else:
logger.info("[语义评分] 人设未变化,无需重载")
@@ -524,4 +524,4 @@ class AffinityInterestCalculator(BaseInterestCalculator):
f"[回复后机制] 未回复消息,剩余降低次数: {self.post_reply_boost_remaining}"
)
afc_interest_calculator = AffinityInterestCalculator()
afc_interest_calculator = AffinityInterestCalculator()

View File

@@ -196,12 +196,12 @@ class UserProfileTool(BaseTool):
# 🎯 核心使用relationship_tracker模型生成印象并决定好感度变化
final_impression = existing_profile.get("relationship_text", "")
affection_change = 0.0 # 好感度变化量
# 只有在LLM明确提供impression_hint时才更新印象更严格
if impression_hint and impression_hint.strip():
# 获取最近的聊天记录用于上下文
chat_history_text = await self._get_recent_chat_history(target_user_id)
impression_result = await self._generate_impression_with_affection(
target_user_name=target_user_name,
impression_hint=impression_hint,
@@ -282,7 +282,7 @@ class UserProfileTool(BaseTool):
valid_types = ["birthday", "job", "location", "dream", "family", "pet", "other"]
if info_type not in valid_types:
info_type = "other"
# 🎯 信息质量判断:过滤掉模糊的描述性内容
low_quality_patterns = [
# 原有的模糊描述
@@ -296,7 +296,7 @@ class UserProfileTool(BaseTool):
"感觉", "心情", "状态", "最近", "今天", "现在"
]
info_value_lower = info_value.lower().strip()
# 如果值太短或包含低质量模式,跳过
if len(info_value_lower) < 2:
logger.warning(f"关键信息值太短,跳过: {info_value}")
@@ -640,7 +640,7 @@ class UserProfileTool(BaseTool):
affection_change = float(result.get("affection_change", 0))
result.get("change_reason", "")
detected_gender = result.get("gender", "unknown")
# 🎯 根据当前好感度阶段限制变化范围
if current_score < 0.3:
# 陌生→初识±0.03
@@ -657,7 +657,7 @@ class UserProfileTool(BaseTool):
else:
# 好友→挚友±0.01
max_change = 0.01
affection_change = max(-max_change, min(max_change, affection_change))
# 如果印象为空或太短回退到hint

View File

@@ -115,9 +115,9 @@ def build_custom_decision_module() -> str:
kfc_config = get_config()
custom_prompt = getattr(kfc_config, "custom_decision_prompt", "")
# 调试输出
logger.debug(f"[自定义决策提示词] 原始值: {repr(custom_prompt)}, 类型: {type(custom_prompt)}")
logger.debug(f"[自定义决策提示词] 原始值: {custom_prompt!r}, 类型: {type(custom_prompt)}")
if not custom_prompt or not custom_prompt.strip():
logger.debug("[自定义决策提示词] 为空或仅含空白字符,跳过")

View File

@@ -2,21 +2,28 @@
from __future__ import annotations
import asyncio
import base64
import time
from pathlib import Path
from typing import TYPE_CHECKING, Any
from mofox_wire import (
MessageBuilder,
SegPayload,
)
import orjson
from mofox_wire import MessageBuilder, SegPayload
from src.common.logger import get_logger
from src.plugin_system.apis import config_api
from ...event_models import ACCEPT_FORMAT, QQ_FACE, RealMessageType
from ..utils import *
from ..utils import (
get_forward_message,
get_group_info,
get_image_base64,
get_member_info,
get_message_detail,
get_record_detail,
get_self_info,
)
if TYPE_CHECKING:
from ....plugin import NapcatAdapter
@@ -300,8 +307,7 @@ class MessageHandler:
try:
if file_path and Path(file_path).exists():
# 本地文件处理
with open(file_path, "rb") as f:
video_data = f.read()
video_data = await asyncio.to_thread(Path(file_path).read_bytes)
video_base64 = base64.b64encode(video_data).decode("utf-8")
logger.debug(f"视频文件大小: {len(video_data) / (1024 * 1024):.2f} MB")

View File

@@ -22,6 +22,7 @@ class MetaEventHandler:
self.adapter = adapter
self.plugin_config: dict[str, Any] | None = None
self._interval_checking = False
self._heartbeat_task: asyncio.Task | None = None
def set_plugin_config(self, config: dict[str, Any]) -> None:
"""设置插件配置"""
@@ -41,7 +42,7 @@ class MetaEventHandler:
self_id = raw.get("self_id")
if not self._interval_checking and self_id:
# 第一次收到心跳包时才启动心跳检查
asyncio.create_task(self.check_heartbeat(self_id))
self._heartbeat_task = asyncio.create_task(self.check_heartbeat(self_id))
self.last_heart_beat = time.time()
interval = raw.get("interval")
if interval:

View File

@@ -7,6 +7,7 @@ import asyncio
import base64
import hashlib
from pathlib import Path
from typing import ClassVar
import aiohttp
import toml
@@ -139,25 +140,34 @@ class SiliconFlowIndexTTSAction(BaseAction):
action_description = "使用SiliconFlow API进行高质量的IndexTTS语音合成支持零样本语音克隆"
# 关键词配置
activation_keywords = ["克隆语音", "模仿声音", "语音合成", "indextts", "声音克隆", "语音生成", "仿声", "变声"]
activation_keywords: ClassVar[list[str]] = [
"克隆语音",
"模仿声音",
"语音合成",
"indextts",
"声音克隆",
"语音生成",
"仿声",
"变声",
]
keyword_case_sensitive = False
# 动作参数定义
action_parameters = {
action_parameters: ClassVar[dict[str, str]] = {
"text": "需要合成语音的文本内容,必填,应当清晰流畅",
"speed": "语速可选范围0.1-3.0默认1.0"
"speed": "语速可选范围0.1-3.0默认1.0",
}
# 动作使用场景
action_require = [
action_require: ClassVar[list[str]] = [
"当用户要求语音克隆或模仿某个声音时使用",
"当用户明确要求进行语音合成时使用",
"当需要高质量语音输出时使用",
"当用户要求变声或仿声时使用"
"当用户要求变声或仿声时使用",
]
# 关联类型 - 支持语音消息
associated_types = ["voice"]
associated_types: ClassVar[list[str]] = ["voice"]
async def execute(self) -> tuple[bool, str]:
"""执行SiliconFlow IndexTTS语音合成"""
@@ -258,11 +268,11 @@ class SiliconFlowTTSCommand(BaseCommand):
command_name = "sf_tts"
command_description = "使用SiliconFlow IndexTTS进行语音合成"
command_aliases = ["sftts", "sf语音", "硅基语音"]
command_aliases: ClassVar[list[str]] = ["sftts", "sf语音", "硅基语音"]
command_parameters = {
command_parameters: ClassVar[dict[str, dict[str, object]]] = {
"text": {"type": str, "required": True, "description": "要合成的文本"},
"speed": {"type": float, "required": False, "description": "语速 (0.1-3.0)"}
"speed": {"type": float, "required": False, "description": "语速 (0.1-3.0)"},
}
async def execute(self, text: str, speed: float = 1.0) -> tuple[bool, str]:
@@ -341,14 +351,14 @@ class SiliconFlowIndexTTSPlugin(BasePlugin):
# 必需的抽象属性
enable_plugin: bool = True
dependencies: list[str] = []
dependencies: ClassVar[list[str]] = []
config_file_name: str = "config.toml"
# Python依赖
python_dependencies = ["aiohttp>=3.8.0"]
python_dependencies: ClassVar[list[str]] = ["aiohttp>=3.8.0"]
# 配置描述
config_section_descriptions = {
config_section_descriptions: ClassVar[dict[str, str]] = {
"plugin": "插件基本配置",
"components": "组件启用配置",
"api": "SiliconFlow API配置",
@@ -356,7 +366,7 @@ class SiliconFlowIndexTTSPlugin(BasePlugin):
}
# 配置schema
config_schema = {
config_schema: ClassVar[dict[str, dict[str, ConfigField]]] = {
"plugin": {
"enabled": ConfigField(type=bool, default=False, description="是否启用插件"),
"config_version": ConfigField(type=str, default="2.0.0", description="配置文件版本"),

View File

@@ -43,8 +43,7 @@ class VoiceUploader:
raise FileNotFoundError(f"音频文件不存在: {audio_path}")
# 读取音频文件并转换为base64
with open(audio_path, "rb") as f:
audio_data = f.read()
audio_data = await asyncio.to_thread(audio_path.read_bytes)
audio_base64 = base64.b64encode(audio_data).decode("utf-8")
@@ -60,7 +59,7 @@ class VoiceUploader:
}
logger.info(f"正在上传音频文件: {audio_path}")
async with aiohttp.ClientSession() as session:
async with session.post(
self.upload_url,

View File

@@ -347,8 +347,10 @@ class SystemCommand(PlusCommand):
return
response_parts = [f"🧩 已注册的提示词组件 (共 {len(components)} 个):"]
for comp in components:
response_parts.append(f"• `{comp.name}` (来自: `{comp.plugin_name}`)")
response_parts.extend(
[f"• `{comp.name}` (来自: `{comp.plugin_name}`)" for comp in components]
)
await self._send_long_message("\n".join(response_parts))
@@ -586,8 +588,10 @@ class SystemCommand(PlusCommand):
for plugin_name, comps in by_plugin.items():
response_parts.append(f"🔌 **{plugin_name}**:")
for comp in comps:
response_parts.append(f" ❌ `{comp.name}` ({comp.component_type.value})")
response_parts.extend(
[f" ❌ `{comp.name}` ({comp.component_type.value})" for comp in comps]
)
await self._send_long_message("\n".join(response_parts))

View File

@@ -121,13 +121,17 @@ class SerperSearchEngine(BaseSearchEngine):
# 添加有机搜索结果
if "organic" in data:
for result in data["organic"][:num_results]:
results.append({
"title": result.get("title", "无标题"),
"url": result.get("link", ""),
"snippet": result.get("snippet", ""),
"provider": "Serper",
})
results.extend(
[
{
"title": result.get("title", "无标题"),
"url": result.get("link", ""),
"snippet": result.get("snippet", ""),
"provider": "Serper",
}
for result in data["organic"][:num_results]
]
)
logger.info(f"Serper搜索成功: 查询='{query}', 结果数={len(results)}")
return results

View File

@@ -4,6 +4,8 @@ Web Search Tool Plugin
一个功能强大的网络搜索和URL解析插件支持多种搜索引擎和解析策略。
"""
from typing import ClassVar
from src.common.logger import get_logger
from src.plugin_system import BasePlugin, ComponentInfo, ConfigField, register_plugin
from src.plugin_system.apis import config_api
@@ -30,7 +32,7 @@ class WEBSEARCHPLUGIN(BasePlugin):
# 插件基本信息
plugin_name: str = "web_search_tool" # 内部标识符
enable_plugin: bool = True
dependencies: list[str] = [] # 插件依赖列表
dependencies: ClassVar[list[str]] = [] # 插件依赖列表
def __init__(self, *args, **kwargs):
"""初始化插件,立即加载所有搜索引擎"""
@@ -80,11 +82,14 @@ class WEBSEARCHPLUGIN(BasePlugin):
config_file_name: str = "config.toml" # 配置文件名
# 配置节描述
config_section_descriptions = {"plugin": "插件基本信息", "proxy": "链接本地解析代理配置"}
config_section_descriptions: ClassVar[dict[str, str]] = {
"plugin": "插件基本信息",
"proxy": "链接本地解析代理配置",
}
# 配置Schema定义
# 注意EXA配置和组件设置已迁移到主配置文件(bot_config.toml)的[exa]和[web_search]部分
config_schema: dict = {
config_schema: ClassVar[dict[str, dict[str, ConfigField]]] = {
"plugin": {
"name": ConfigField(type=str, default="WEB_SEARCH_PLUGIN", description="插件名称"),
"version": ConfigField(type=str, default="1.0.0", description="插件版本"),