feat: 重构记忆系统配置,移除三层记忆相关配置,优化全局记忆管理逻辑,支持批量生成文本向量

This commit is contained in:
Windpicker-owo
2025-11-19 19:16:27 +08:00
parent 5231404852
commit cf48d02ed3
10 changed files with 164 additions and 139 deletions

View File

@@ -138,20 +138,24 @@ class MemoryManager:
)
# 检查配置值
expand_depth = self.config.search_max_expand_depth
expand_semantic_threshold = self.config.search_expand_semantic_threshold
search_top_k = self.config.search_top_k
# 兼容性处理:如果配置项不存在,使用默认值或映射到新配置项
expand_depth = getattr(self.config, "path_expansion_max_hops", 2)
expand_semantic_threshold = getattr(self.config, "search_similarity_threshold", 0.5)
search_top_k = getattr(self.config, "search_top_k", 10)
# 读取权重配置
search_vector_weight = self.config.search_vector_weight
search_importance_weight = self.config.search_importance_weight
search_recency_weight = self.config.search_recency_weight
search_vector_weight = getattr(self.config, "vector_weight", 0.65)
# context_weight 近似映射为 importance_weight
search_importance_weight = getattr(self.config, "context_weight", 0.25)
search_recency_weight = getattr(self.config, "recency_weight", 0.10)
# 读取阈值过滤配置
search_min_importance = self.config.search_min_importance
search_similarity_threshold = self.config.search_similarity_threshold
search_min_importance = getattr(self.config, "search_min_importance", 0.3)
search_similarity_threshold = getattr(self.config, "search_similarity_threshold", 0.5)
logger.info(
f"📊 配置检查: search_max_expand_depth={expand_depth}, "
f"search_expand_semantic_threshold={expand_semantic_threshold}, "
f"📊 配置检查: expand_depth={expand_depth}, "
f"expand_semantic_threshold={expand_semantic_threshold}, "
f"search_top_k={search_top_k}"
)
logger.info(
@@ -422,7 +426,7 @@ class MemoryManager:
"query": query,
"top_k": top_k,
"use_multi_query": use_multi_query,
"expand_depth": expand_depth or global_config.memory.search_max_expand_depth, # 传递图扩展深度
"expand_depth": expand_depth or getattr(global_config.memory, "path_expansion_max_hops", 2), # 传递图扩展深度
"context": context,
"prefer_node_types": prefer_node_types or [], # 🆕 传递偏好节点类型
}

View File

@@ -139,29 +139,29 @@ async def initialize_unified_memory_manager():
from src.memory_graph.unified_manager import UnifiedMemoryManager
# 检查是否启用三层记忆系统
if not hasattr(global_config, "three_tier_memory") or not getattr(
global_config.three_tier_memory, "enable", False
if not hasattr(global_config, "memory") or not getattr(
global_config.memory, "enable", False
):
logger.warning("三层记忆系统未启用,跳过初始化")
return None
config = global_config.three_tier_memory
config = global_config.memory
# 创建管理器实例
_unified_memory_manager = UnifiedMemoryManager(
data_dir=Path(getattr(config, "data_dir", "data/memory_graph/three_tier")),
data_dir=Path(getattr(config, "data_dir", "data/memory_graph")),
# 感知记忆配置
perceptual_max_blocks=getattr(config, "perceptual_max_blocks", 50),
perceptual_block_size=getattr(config, "perceptual_block_size", 5),
perceptual_activation_threshold=getattr(config, "perceptual_activation_threshold", 3),
perceptual_recall_top_k=getattr(config, "perceptual_recall_top_k", 5),
perceptual_recall_threshold=getattr(config, "perceptual_recall_threshold", 0.55),
perceptual_recall_top_k=getattr(config, "perceptual_topk", 5),
perceptual_recall_threshold=getattr(config, "perceptual_similarity_threshold", 0.55),
# 短期记忆配置
short_term_max_memories=getattr(config, "short_term_max_memories", 30),
short_term_transfer_threshold=getattr(config, "short_term_transfer_threshold", 0.6),
# 长期记忆配置
long_term_batch_size=getattr(config, "long_term_batch_size", 10),
long_term_search_top_k=getattr(config, "long_term_search_top_k", 5),
long_term_search_top_k=getattr(config, "search_top_k", 5),
long_term_decay_factor=getattr(config, "long_term_decay_factor", 0.95),
long_term_auto_transfer_interval=getattr(config, "long_term_auto_transfer_interval", 600),
# 智能检索配置

View File

@@ -279,6 +279,28 @@ class PerceptualMemoryManager:
logger.error(f"生成向量失败: {e}", exc_info=True)
return None
async def _generate_embeddings_batch(self, texts: list[str]) -> list[np.ndarray | None]:
"""
批量生成文本向量
Args:
texts: 文本列表
Returns:
向量列表,与输入一一对应
"""
try:
if not self.embedding_generator:
logger.error("嵌入生成器未初始化")
return [None] * len(texts)
embeddings = await self.embedding_generator.generate_batch(texts)
return embeddings
except Exception as e:
logger.error(f"批量生成向量失败: {e}", exc_info=True)
return [None] * len(texts)
async def recall_blocks(
self,
query_text: str,
@@ -528,11 +550,29 @@ class PerceptualMemoryManager:
logger.info("重新生成记忆块向量...")
for block in self.perceptual_memory.blocks:
if block.embedding is None and block.combined_text:
block.embedding = await self._generate_embedding(block.combined_text)
blocks_to_process = []
texts_to_process = []
logger.info(f"✅ 向量重新生成完成({len(self.perceptual_memory.blocks)} 个块)")
for block in self.perceptual_memory.blocks:
if block.embedding is None and block.combined_text and block.combined_text.strip():
blocks_to_process.append(block)
texts_to_process.append(block.combined_text)
if not blocks_to_process:
logger.info("没有需要重新生成向量的块")
return
logger.info(f"开始批量生成 {len(blocks_to_process)} 个块的向量...")
embeddings = await self._generate_embeddings_batch(texts_to_process)
success_count = 0
for block, embedding in zip(blocks_to_process, embeddings):
if embedding is not None:
block.embedding = embedding
success_count += 1
logger.info(f"✅ 向量重新生成完成(成功: {success_count}/{len(blocks_to_process)}")
async def shutdown(self) -> None:
"""关闭管理器"""

View File

@@ -492,6 +492,28 @@ class ShortTermMemoryManager:
logger.error(f"生成向量失败: {e}", exc_info=True)
return None
async def _generate_embeddings_batch(self, texts: list[str]) -> list[np.ndarray | None]:
"""
批量生成文本向量
Args:
texts: 文本列表
Returns:
向量列表,与输入一一对应
"""
try:
if not self.embedding_generator:
logger.error("嵌入生成器未初始化")
return [None] * len(texts)
embeddings = await self.embedding_generator.generate_batch(texts)
return embeddings
except Exception as e:
logger.error(f"批量生成向量失败: {e}", exc_info=True)
return [None] * len(texts)
def _parse_json_response(self, response: str) -> dict[str, Any] | None:
"""解析 LLM 的 JSON 响应"""
try:
@@ -684,11 +706,29 @@ class ShortTermMemoryManager:
"""重新生成记忆的向量"""
logger.info("重新生成短期记忆向量...")
for memory in self.memories:
if memory.embedding is None and memory.content:
memory.embedding = await self._generate_embedding(memory.content)
memories_to_process = []
texts_to_process = []
logger.info(f"✅ 向量重新生成完成({len(self.memories)} 条记忆)")
for memory in self.memories:
if memory.embedding is None and memory.content and memory.content.strip():
memories_to_process.append(memory)
texts_to_process.append(memory.content)
if not memories_to_process:
logger.info("没有需要重新生成向量的短期记忆")
return
logger.info(f"开始批量生成 {len(memories_to_process)} 条短期记忆的向量...")
embeddings = await self._generate_embeddings_batch(texts_to_process)
success_count = 0
for memory, embedding in zip(memories_to_process, embeddings):
if embedding is not None:
memory.embedding = embedding
success_count += 1
logger.info(f"✅ 向量重新生成完成(成功: {success_count}/{len(memories_to_process)}")
async def shutdown(self) -> None:
"""关闭管理器"""