优化日志

This commit is contained in:
Windpicker-owo
2025-11-26 21:16:16 +08:00
parent e0157256b1
commit 0908fb50a0
100 changed files with 493 additions and 574 deletions

View File

@@ -159,7 +159,7 @@ class MemoryBuilder:
return memory
except Exception as e:
logger.error(f"记忆构建失败: {e}", exc_info=True)
logger.error(f"记忆构建失败: {e}")
raise RuntimeError(f"记忆构建失败: {e}")
async def _create_or_reuse_node(
@@ -551,7 +551,7 @@ class MemoryBuilder:
return edge
except Exception as e:
logger.error(f"记忆关联失败: {e}", exc_info=True)
logger.error(f"记忆关联失败: {e}")
raise RuntimeError(f"记忆关联失败: {e}")
def _find_topic_node(self, memory: Memory) -> MemoryNode | None:

View File

@@ -83,7 +83,7 @@ class MemoryExtractor:
return extracted
except Exception as e:
logger.error(f"记忆提取失败: {e}", exc_info=True)
logger.error(f"记忆提取失败: {e}")
raise ValueError(f"记忆提取失败: {e}")
def _validate_required_params(self, params: dict[str, Any]) -> None:
@@ -260,7 +260,7 @@ class MemoryExtractor:
return extracted
except Exception as e:
logger.error(f"关联参数提取失败: {e}", exc_info=True)
logger.error(f"关联参数提取失败: {e}")
raise ValueError(f"关联参数提取失败: {e}")
def validate_relation_type(self, relation_type: str) -> str:

View File

@@ -96,7 +96,7 @@ class NodeMerger:
return similar_nodes
except Exception as e:
logger.error(f"查找相似节点失败: {e}", exc_info=True)
logger.error(f"查找相似节点失败: {e}")
return []
async def should_merge(
@@ -244,7 +244,7 @@ class NodeMerger:
return True
except Exception as e:
logger.error(f"节点合并失败: {e}", exc_info=True)
logger.error(f"节点合并失败: {e}")
return False
def _update_memory_references(self, old_node_id: str, new_node_id: str) -> None:
@@ -325,7 +325,7 @@ class NodeMerger:
progress_callback(i + 1, stats["total"], stats)
except Exception as e:
logger.error(f"处理节点 {node.id} 时失败: {e}", exc_info=True)
logger.error(f"处理节点 {node.id} 时失败: {e}")
stats["skipped"] += 1
logger.info(

View File

@@ -71,17 +71,17 @@ class LongTermMemoryManager:
return
try:
logger.info("开始初始化长期记忆管理器...")
logger.debug("开始初始化长期记忆管理器...")
# 确保底层 MemoryManager 已初始化
if not self.memory_manager._initialized:
await self.memory_manager.initialize()
self._initialized = True
logger.info("长期记忆管理器初始化完成")
logger.debug("长期记忆管理器初始化完成")
except Exception as e:
logger.error(f"长期记忆管理器初始化失败: {e}", exc_info=True)
logger.error(f"长期记忆管理器初始化失败: {e}")
raise
async def transfer_from_short_term(
@@ -107,7 +107,7 @@ class LongTermMemoryManager:
await self.initialize()
try:
logger.info(f"开始转移 {len(short_term_memories)} 条短期记忆到长期记忆...")
logger.debug(f"开始转移 {len(short_term_memories)} 条短期记忆到长期记忆...")
result = {
"processed_count": 0,
@@ -143,11 +143,11 @@ class LongTermMemoryManager:
# 让出控制权
await asyncio.sleep(0.01)
logger.info(f"短期记忆转移完成: {result}")
logger.debug(f"短期记忆转移完成: {result}")
return result
except Exception as e:
logger.error(f"转移短期记忆失败: {e}", exc_info=True)
logger.error(f"转移短期记忆失败: {e}")
return {"error": str(e), "processed_count": 0}
async def _process_batch(self, batch: list[ShortTermMemory]) -> dict[str, Any]:
@@ -196,7 +196,7 @@ class LongTermMemoryManager:
result["failed_count"] += 1
except Exception as e:
logger.error(f"处理短期记忆 {stm.id} 失败: {e}", exc_info=True)
logger.error(f"处理短期记忆 {stm.id} 失败: {e}")
result["failed_count"] += 1
return result
@@ -272,7 +272,7 @@ class LongTermMemoryManager:
return expanded_memories
except Exception as e:
logger.error(f"检索相似长期记忆失败: {e}", exc_info=True)
logger.error(f"检索相似长期记忆失败: {e}")
return []
async def _decide_graph_operations(
@@ -310,11 +310,11 @@ class LongTermMemoryManager:
# 解析图操作指令
operations = self._parse_graph_operations(response)
logger.info(f"LLM 生成 {len(operations)} 个图操作指令")
logger.debug(f"LLM 生成 {len(operations)} 个图操作指令")
return operations
except Exception as e:
logger.error(f"LLM 决策图操作失败: {e}", exc_info=True)
logger.error(f"LLM 决策图操作失败: {e}")
# 默认创建新记忆
return [
GraphOperation(
@@ -553,13 +553,13 @@ class LongTermMemoryManager:
logger.warning(f"未实现的操作类型: {op.operation_type}")
except Exception as e:
logger.error(f"执行图操作失败: {op}, 错误: {e}", exc_info=True)
logger.error(f"执行图操作失败: {op}, 错误: {e}")
logger.info(f"执行了 {success_count}/{len(operations)} 个图操作")
logger.debug(f"执行了 {success_count}/{len(operations)} 个图操作")
return success_count > 0
except Exception as e:
logger.error(f"执行图操作失败: {e}", exc_info=True)
logger.error(f"执行图操作失败: {e}")
return False
@staticmethod
@@ -984,7 +984,7 @@ class LongTermMemoryManager:
return {"decayed_count": decayed_count, "total_memories": len(all_memories)}
except Exception as e:
logger.error(f"应用长期记忆衰减失败: {e}", exc_info=True)
logger.error(f"应用长期记忆衰减失败: {e}")
return {"error": str(e), "decayed_count": 0}
def get_statistics(self) -> dict[str, Any]:
@@ -1012,7 +1012,7 @@ class LongTermMemoryManager:
logger.info("✅ 长期记忆管理器已关闭")
except Exception as e:
logger.error(f"关闭长期记忆管理器失败: {e}", exc_info=True)
logger.error(f"关闭长期记忆管理器失败: {e}")
# 全局单例

View File

@@ -95,7 +95,7 @@ class MemoryManager:
return
try:
logger.info("开始初始化记忆管理器...")
logger.debug("开始初始化记忆管理器...")
# 1. 初始化存储层
self.data_dir.mkdir(parents=True, exist_ok=True)
@@ -153,21 +153,6 @@ class MemoryManager:
search_min_importance = getattr(self.config, "search_min_importance", 0.3)
search_similarity_threshold = getattr(self.config, "search_similarity_threshold", 0.5)
logger.info(
f"📊 配置检查: expand_depth={expand_depth}, "
f"expand_semantic_threshold={expand_semantic_threshold}, "
f"search_top_k={search_top_k}"
)
logger.info(
f"📊 权重配置: vector={search_vector_weight}, "
f"importance={search_importance_weight}, "
f"recency={search_recency_weight}"
)
logger.info(
f"📊 阈值过滤: min_importance={search_min_importance}, "
f"similarity_threshold={search_similarity_threshold}"
)
self.tools = MemoryTools(
vector_store=self.vector_store,
graph_store=self.graph_store,
@@ -190,7 +175,7 @@ class MemoryManager:
self._start_maintenance_task()
except Exception as e:
logger.error(f"记忆管理器初始化失败: {e}", exc_info=True)
logger.error(f"记忆管理器初始化失败: {e}")
raise
async def shutdown(self) -> None:
@@ -226,7 +211,7 @@ class MemoryManager:
logger.info("✅ 记忆管理器已关闭")
except Exception as e:
logger.error(f"关闭记忆管理器失败: {e}", exc_info=True)
logger.error(f"关闭记忆管理器失败: {e}")
# ==================== 记忆 CRUD 操作 ====================
@@ -279,7 +264,7 @@ class MemoryManager:
return None
except Exception as e:
logger.error(f"创建记忆时发生异常: {e}", exc_info=True)
logger.error(f"创建记忆时发生异常: {e}")
return None
async def get_memory(self, memory_id: str) -> Memory | None:
@@ -336,7 +321,7 @@ class MemoryManager:
return True
except Exception as e:
logger.error(f"更新记忆失败: {e}", exc_info=True)
logger.error(f"更新记忆失败: {e}")
return False
async def delete_memory(self, memory_id: str) -> bool:
@@ -376,7 +361,7 @@ class MemoryManager:
return True
except Exception as e:
logger.error(f"删除记忆失败: {e}", exc_info=True)
logger.error(f"删除记忆失败: {e}")
return False
# ==================== 记忆检索操作 ====================
@@ -487,7 +472,7 @@ class MemoryManager:
return filtered_memories[:top_k]
except Exception as e:
logger.error(f"搜索记忆失败: {e}", exc_info=True)
logger.error(f"搜索记忆失败: {e}")
return []
async def link_memories(
@@ -531,7 +516,7 @@ class MemoryManager:
return False
except Exception as e:
logger.error(f"关联记忆失败: {e}", exc_info=True)
logger.error(f"关联记忆失败: {e}")
return False
# ==================== 记忆生命周期管理 ====================
@@ -608,7 +593,7 @@ class MemoryManager:
return True
except Exception as e:
logger.error(f"激活记忆失败: {e}", exc_info=True)
logger.error(f"激活记忆失败: {e}")
return False
async def _auto_activate_searched_memories(self, memories: list[Memory]) -> None:
@@ -938,7 +923,7 @@ class MemoryManager:
return False
except Exception as e:
logger.error(f"遗忘记忆失败: {e}", exc_info=True)
logger.error(f"遗忘记忆失败: {e}")
return False
async def auto_forget_memories(self, threshold: float = 0.1) -> int:
@@ -1037,7 +1022,7 @@ class MemoryManager:
return forgotten_count
except Exception as e:
logger.error(f"自动遗忘失败: {e}", exc_info=True)
logger.error(f"自动遗忘失败: {e}")
return 0
async def _cleanup_orphan_nodes_and_edges(self) -> tuple[int, int]:
@@ -1098,7 +1083,7 @@ class MemoryManager:
return orphan_nodes_count, orphan_edges_count
except Exception as e:
logger.error(f"清理孤立节点和边失败: {e}", exc_info=True)
logger.error(f"清理孤立节点和边失败: {e}")
return 0, 0
# ==================== 统计与维护 ====================
@@ -1185,7 +1170,7 @@ class MemoryManager:
return result
except Exception as e:
logger.error(f"记忆整理失败: {e}", exc_info=True)
logger.error(f"记忆整理失败: {e}")
return {"error": str(e), "forgotten_count": 0}
async def _consolidate_memories_background(
@@ -1322,7 +1307,7 @@ class MemoryManager:
return result
except Exception as e:
logger.error(f"❌ 维护失败: {e}", exc_info=True)
logger.error(f"❌ 维护失败: {e}")
return {"error": str(e), "total_time": 0}
async def _lightweight_auto_link_memories( # 已废弃
@@ -1394,7 +1379,7 @@ class MemoryManager:
)
except Exception as e:
logger.error(f"启动维护后台任务失败: {e}", exc_info=True)
logger.error(f"启动维护后台任务失败: {e}")
async def _stop_maintenance_task(self) -> None:
"""
@@ -1416,7 +1401,7 @@ class MemoryManager:
self._maintenance_task = None
except Exception as e:
logger.error(f"停止维护后台任务失败: {e}", exc_info=True)
logger.error(f"停止维护后台任务失败: {e}")
async def _maintenance_loop(self) -> None:
"""
@@ -1447,7 +1432,7 @@ class MemoryManager:
try:
await self.maintenance()
except Exception as e:
logger.error(f"维护任务执行失败: {e}", exc_info=True)
logger.error(f"维护任务执行失败: {e}")
# 后续执行使用相同间隔
initial_delay = self._maintenance_interval_hours * 3600
@@ -1456,7 +1441,7 @@ class MemoryManager:
logger.debug("维护循环被取消")
break
except Exception as e:
logger.error(f"维护循环发生异常: {e}", exc_info=True)
logger.error(f"维护循环发生异常: {e}")
# 异常后等待较短时间再重试
try:
await asyncio.sleep(300) # 5分钟后重试
@@ -1466,7 +1451,7 @@ class MemoryManager:
except asyncio.CancelledError:
logger.debug("维护循环完全退出")
except Exception as e:
logger.error(f"维护循环意外结束: {e}", exc_info=True)
logger.error(f"维护循环意外结束: {e}")
finally:
self._maintenance_running = False
logger.debug("维护循环已清理完毕")
@@ -1493,5 +1478,5 @@ class MemoryManager:
await self.persistence.save_graph_store(self.graph_store)
logger.debug(f"异步保存图数据成功: {operation_name}")
except Exception as e:
logger.error(f"异步保存图数据失败 ({operation_name}): {e}", exc_info=True)
logger.error(f"异步保存图数据失败 ({operation_name}): {e}")
# 可以考虑添加重试机制或者通知机制

View File

@@ -62,8 +62,6 @@ async def initialize_memory_manager(
if isinstance(data_dir, str):
data_dir = Path(data_dir)
logger.info(f"正在初始化全局 MemoryManager (data_dir={data_dir})...")
_memory_manager = MemoryManager(data_dir=data_dir)
await _memory_manager.initialize()
@@ -73,7 +71,7 @@ async def initialize_memory_manager(
return _memory_manager
except Exception as e:
logger.error(f"初始化 MemoryManager 失败: {e}", exc_info=True)
logger.error(f"初始化 MemoryManager 失败: {e}")
_initialized = False
_memory_manager = None
raise
@@ -99,11 +97,10 @@ async def shutdown_memory_manager():
if _memory_manager:
try:
logger.info("正在关闭全局 MemoryManager...")
await _memory_manager.shutdown()
logger.info("✅ 全局 MemoryManager 已关闭")
except Exception as e:
logger.error(f"关闭 MemoryManager 时出错: {e}", exc_info=True)
logger.error(f"关闭 MemoryManager 时出错: {e}")
finally:
_memory_manager = None
_initialized = False
@@ -175,12 +172,10 @@ async def initialize_unified_memory_manager():
# 初始化
await _unified_memory_manager.initialize()
logger.info("✅ 统一记忆管理器单例已初始化")
return _unified_memory_manager
except Exception as e:
logger.error(f"初始化统一记忆管理器失败: {e}", exc_info=True)
logger.error(f"初始化统一记忆管理器失败: {e}")
raise
@@ -210,5 +205,5 @@ async def shutdown_unified_memory_manager() -> None:
logger.info("✅ 统一记忆管理器已关闭")
except Exception as e:
logger.error(f"关闭统一记忆管理器失败: {e}", exc_info=True)
logger.error(f"关闭统一记忆管理器失败: {e}")

View File

@@ -80,11 +80,6 @@ class PerceptualMemoryManager:
self._initialized = False
self._save_lock = asyncio.Lock()
logger.info(
f"感知记忆管理器已创建 (max_blocks={max_blocks}, "
f"block_size={block_size}, activation_threshold={activation_threshold})"
)
@property
def memory(self) -> PerceptualMemory:
"""获取感知记忆对象(保证非 None"""
@@ -99,7 +94,7 @@ class PerceptualMemoryManager:
return
try:
logger.info("开始初始化感知记忆管理器...")
logger.debug("开始初始化感知记忆管理器...")
# 初始化嵌入生成器
self.embedding_generator = EmbeddingGenerator()
@@ -109,7 +104,7 @@ class PerceptualMemoryManager:
# 如果没有加载到数据,创建新的
if not self.perceptual_memory:
logger.info("未找到现有数据,创建新的感知记忆堆")
logger.debug("未找到现有数据,创建新的感知记忆堆")
self.perceptual_memory = PerceptualMemory(
max_blocks=self.max_blocks,
block_size=self.block_size,
@@ -119,12 +114,12 @@ class PerceptualMemoryManager:
self._initialized = True
logger.info(
f"感知记忆管理器初始化完成 "
f"感知记忆管理器初始化完成 "
f"(已加载 {len(self.perceptual_memory.blocks)} 个记忆块)"
)
except Exception as e:
logger.error(f"感知记忆管理器初始化失败: {e}", exc_info=True)
logger.error(f"感知记忆管理器初始化失败: {e}")
raise
async def add_message(self, message: dict[str, Any]) -> MemoryBlock | None:
@@ -179,7 +174,7 @@ class PerceptualMemoryManager:
return None
except Exception as e:
logger.error(f"添加消息失败: {e}", exc_info=True)
logger.error(f"添加消息失败: {e}")
return None
async def _create_memory_block(self, stream_id: str) -> MemoryBlock | None:
@@ -234,9 +229,9 @@ class PerceptualMemoryManager:
if len(self.perceptual_memory.blocks) > self.max_blocks:
removed_blocks = self.perceptual_memory.blocks[self.max_blocks :]
self.perceptual_memory.blocks = self.perceptual_memory.blocks[: self.max_blocks]
logger.info(f"记忆堆已满,移除 {len(removed_blocks)} 个旧块")
logger.debug(f"记忆堆已满,移除 {len(removed_blocks)} 个旧块")
logger.info(
logger.debug(
f"✅ 创建新记忆块: {block.id} (stream={stream_id[:8]}, "
f"堆大小={len(self.perceptual_memory.blocks)}/{self.max_blocks})"
)
@@ -247,7 +242,7 @@ class PerceptualMemoryManager:
return block
except Exception as e:
logger.error(f"创建记忆块失败: {e}", exc_info=True)
logger.error(f"创建记忆块失败: {e}")
return None
def _normalize_message_timestamp(self, message: dict[str, Any]) -> float:
@@ -375,7 +370,7 @@ class PerceptualMemoryManager:
return embedding
except Exception as e:
logger.error(f"生成向量失败: {e}", exc_info=True)
logger.error(f"生成向量失败: {e}")
return None
async def _generate_embeddings_batch(self, texts: list[str]) -> list[np.ndarray | None]:
@@ -397,7 +392,7 @@ class PerceptualMemoryManager:
return embeddings
except Exception as e:
logger.error(f"批量生成向量失败: {e}", exc_info=True)
logger.error(f"批量生成向量失败: {e}")
return [None] * len(texts)
async def recall_blocks(
@@ -464,8 +459,8 @@ class PerceptualMemoryManager:
# 检查是否达到激活阈值
if block.recall_count >= self.activation_threshold:
logger.info(
f"🔥 记忆块 {block.id} 被激活"
logger.debug(
f"记忆块 {block.id} 被激活 "
f"(召回次数={block.recall_count}, 阈值={self.activation_threshold})"
)
@@ -480,15 +475,11 @@ class PerceptualMemoryManager:
]
if activated_blocks:
logger.info(
f"检测到 {len(activated_blocks)} 个记忆块达到激活阈值 "
f"(recall_count >= {self.activation_threshold}),需要转移到短期记忆"
)
# 设置标记供 unified_manager 处理
for block in activated_blocks:
block.metadata["needs_transfer"] = True
logger.info(
logger.debug(
f"召回 {len(recalled_blocks)} 个记忆块 "
f"(top_k={top_k}, threshold={similarity_threshold:.2f})"
)
@@ -499,7 +490,7 @@ class PerceptualMemoryManager:
return recalled_blocks
except Exception as e:
logger.error(f"召回记忆块失败: {e}", exc_info=True)
logger.error(f"召回记忆块失败: {e}")
return []
async def _promote_blocks(self, blocks_to_promote: list[MemoryBlock]) -> None:
@@ -526,7 +517,7 @@ class PerceptualMemoryManager:
logger.debug(f"提升 {len(blocks_to_promote)} 个块到堆顶")
except Exception as e:
logger.error(f"提升块失败: {e}", exc_info=True)
logger.error(f"提升块失败: {e}")
def get_activated_blocks(self) -> list[MemoryBlock]:
"""
@@ -569,8 +560,6 @@ class PerceptualMemoryManager:
for j, b in enumerate(self.perceptual_memory.blocks):
b.position_in_stack = j
logger.info(f"移除记忆块: {block_id}")
# 异步保存
asyncio.create_task(self._save_to_disk())
@@ -580,7 +569,7 @@ class PerceptualMemoryManager:
return False
except Exception as e:
logger.error(f"移除记忆块失败: {e}", exc_info=True)
logger.error(f"移除记忆块失败: {e}")
return False
def get_statistics(self) -> dict[str, Any]:
@@ -628,7 +617,7 @@ class PerceptualMemoryManager:
logger.debug(f"感知记忆已保存到 {save_path}")
except Exception as e:
logger.error(f"保存感知记忆失败: {e}", exc_info=True)
logger.error(f"保存感知记忆失败: {e}")
async def _load_from_disk(self) -> None:
"""从磁盘加载感知记忆"""
@@ -638,7 +627,7 @@ class PerceptualMemoryManager:
load_path = self.data_dir / "perceptual_memory.json"
if not load_path.exists():
logger.info("未找到感知记忆数据文件")
logger.debug("未找到感知记忆数据文件")
return
data = orjson.loads(load_path.read_bytes())
@@ -647,18 +636,14 @@ class PerceptualMemoryManager:
# 重新加载向量数据
await self._reload_embeddings()
logger.info(f"感知记忆已从 {load_path} 加载")
except Exception as e:
logger.error(f"加载感知记忆失败: {e}", exc_info=True)
logger.error(f"加载感知记忆失败: {e}")
async def _reload_embeddings(self) -> None:
"""重新生成记忆块的向量"""
if not self.perceptual_memory:
return
logger.info("重新生成记忆块向量...")
blocks_to_process = []
texts_to_process = []
@@ -668,10 +653,9 @@ class PerceptualMemoryManager:
texts_to_process.append(block.combined_text)
if not blocks_to_process:
logger.info("没有需要重新生成向量的块")
return
logger.info(f"开始批量生成 {len(blocks_to_process)} 个块的向量...")
logger.debug(f"开始批量生成 {len(blocks_to_process)} 个块的向量...")
embeddings = await self._generate_embeddings_batch(texts_to_process)
@@ -681,7 +665,7 @@ class PerceptualMemoryManager:
block.embedding = embedding
success_count += 1
logger.info(f"向量重新生成完成(成功: {success_count}/{len(blocks_to_process)}")
logger.debug(f"向量重新生成完成(成功: {success_count}/{len(blocks_to_process)}")
async def shutdown(self) -> None:
"""关闭管理器"""
@@ -689,16 +673,15 @@ class PerceptualMemoryManager:
return
try:
logger.info("正在关闭感知记忆管理器...")
logger.debug("正在关闭感知记忆管理器...")
# 最后一次保存
await self._save_to_disk()
self._initialized = False
logger.info("✅ 感知记忆管理器已关闭")
except Exception as e:
logger.error(f"关闭感知记忆管理器失败: {e}", exc_info=True)
logger.error(f"关闭感知记忆管理器失败: {e}")
# 全局单例

View File

@@ -130,7 +130,7 @@ class _DeprecatedCreateMemoryTool(BaseTool):
}
except Exception as e:
logger.error(f"[CreateMemoryTool] 执行失败: {e}", exc_info=True)
logger.error(f"[CreateMemoryTool] 执行失败: {e}")
return {
"name": self.name,
"content": f"创建记忆时出错: {e!s}"
@@ -190,7 +190,7 @@ class _DeprecatedLinkMemoriesTool(BaseTool):
}
except Exception as e:
logger.error(f"[LinkMemoriesTool] 执行失败: {e}", exc_info=True)
logger.error(f"[LinkMemoriesTool] 执行失败: {e}")
return {
"name": self.name,
"content": f"关联记忆时出错: {e!s}"
@@ -260,7 +260,7 @@ class _DeprecatedSearchMemoriesTool(BaseTool):
}
except Exception as e:
logger.error(f"[SearchMemoriesTool] 执行失败: {e}", exc_info=True)
logger.error(f"[SearchMemoriesTool] 执行失败: {e}")
return {
"name": self.name,
"content": f"搜索记忆时出错: {e!s}"

View File

@@ -81,7 +81,7 @@ class ShortTermMemoryManager:
return
try:
logger.info("开始初始化短期记忆管理器...")
logger.debug("开始初始化短期记忆管理器...")
# 初始化嵌入生成器
self.embedding_generator = EmbeddingGenerator()
@@ -90,10 +90,10 @@ class ShortTermMemoryManager:
await self._load_from_disk()
self._initialized = True
logger.info(f"短期记忆管理器初始化完成 (已加载 {len(self.memories)} 条记忆)")
logger.debug(f"短期记忆管理器初始化完成 (已加载 {len(self.memories)} 条记忆)")
except Exception as e:
logger.error(f"短期记忆管理器初始化失败: {e}", exc_info=True)
logger.error(f"短期记忆管理器初始化失败: {e}")
raise
async def add_from_block(self, block: MemoryBlock) -> ShortTermMemory | None:
@@ -116,7 +116,7 @@ class ShortTermMemoryManager:
await self.initialize()
try:
logger.info(f"开始处理记忆块: {block.id}")
logger.debug(f"开始处理记忆块: {block.id}")
# 步骤1: 使用 LLM 提取结构化记忆
extracted_memory = await self._extract_structured_memory(block)
@@ -126,7 +126,7 @@ class ShortTermMemoryManager:
# 步骤2: 决策如何处理新记忆
decision = await self._decide_memory_operation(extracted_memory)
logger.info(f"LLM 决策: {decision}")
logger.debug(f"LLM 决策: {decision}")
# 步骤3: 执行决策
result_memory = await self._execute_decision(extracted_memory, decision)
@@ -145,7 +145,7 @@ class ShortTermMemoryManager:
return result_memory
except Exception as e:
logger.error(f"添加短期记忆失败: {e}", exc_info=True)
logger.error(f"添加短期记忆失败: {e}")
return None
async def _extract_structured_memory(self, block: MemoryBlock) -> ShortTermMemory | None:
@@ -232,11 +232,11 @@ class ShortTermMemoryManager:
attributes=data.get("attributes", {}),
)
logger.info(f"提取结构化记忆: {memory.content[:50]}...")
logger.debug(f"提取结构化记忆: {memory.content[:50]}...")
return memory
except Exception as e:
logger.error(f"提取结构化记忆失败: {e}", exc_info=True)
logger.error(f"提取结构化记忆失败: {e}")
return None
async def _decide_memory_operation(self, new_memory: ShortTermMemory) -> ShortTermDecision:
@@ -337,11 +337,11 @@ class ShortTermMemoryManager:
updated_importance=data.get("updated_importance"),
)
logger.info(f"LLM 决策完成: {decision}")
logger.debug(f"LLM 决策完成: {decision}")
return decision
except Exception as e:
logger.error(f"LLM 决策失败: {e}", exc_info=True)
logger.error(f"LLM 决策失败: {e}")
# 默认创建新记忆
return ShortTermDecision(
operation=ShortTermOperation.CREATE_NEW,
@@ -366,7 +366,7 @@ class ShortTermMemoryManager:
if decision.operation == ShortTermOperation.CREATE_NEW:
# 创建新记忆
self.memories.append(new_memory)
logger.info(f"创建新短期记忆: {new_memory.id}")
logger.debug(f"创建新短期记忆: {new_memory.id}")
return new_memory
elif decision.operation == ShortTermOperation.MERGE:
@@ -389,7 +389,7 @@ class ShortTermMemoryManager:
target.embedding = await self._generate_embedding(target.content)
target.update_access()
logger.info(f"合并记忆到: {target.id}")
logger.debug(f"合并记忆到: {target.id}")
return target
elif decision.operation == ShortTermOperation.UPDATE:
@@ -412,7 +412,7 @@ class ShortTermMemoryManager:
target.source_block_ids.extend(new_memory.source_block_ids)
target.update_access()
logger.info(f"更新记忆: {target.id}")
logger.debug(f"更新记忆: {target.id}")
return target
elif decision.operation == ShortTermOperation.DISCARD:
@@ -432,7 +432,7 @@ class ShortTermMemoryManager:
return new_memory
except Exception as e:
logger.error(f"执行决策失败: {e}", exc_info=True)
logger.error(f"执行决策失败: {e}")
return None
async def _find_similar_memories(
@@ -466,7 +466,7 @@ class ShortTermMemoryManager:
return scored[:top_k]
except Exception as e:
logger.error(f"查找相似记忆失败: {e}", exc_info=True)
logger.error(f"查找相似记忆失败: {e}")
return []
def _find_memory_by_id(self, memory_id: str | None) -> ShortTermMemory | None:
@@ -491,7 +491,7 @@ class ShortTermMemoryManager:
return embedding
except Exception as e:
logger.error(f"生成向量失败: {e}", exc_info=True)
logger.error(f"生成向量失败: {e}")
return None
async def _generate_embeddings_batch(self, texts: list[str]) -> list[np.ndarray | None]:
@@ -513,7 +513,7 @@ class ShortTermMemoryManager:
return embeddings
except Exception as e:
logger.error(f"批量生成向量失败: {e}", exc_info=True)
logger.error(f"批量生成向量失败: {e}")
return [None] * len(texts)
def _parse_json_response(self, response: str) -> dict[str, Any] | None:
@@ -583,7 +583,7 @@ class ShortTermMemoryManager:
return results
except Exception as e:
logger.error(f"检索短期记忆失败: {e}", exc_info=True)
logger.error(f"检索短期记忆失败: {e}")
return []
def get_memories_for_transfer(self) -> list[ShortTermMemory]:
@@ -643,7 +643,7 @@ class ShortTermMemoryManager:
asyncio.create_task(self._save_to_disk())
except Exception as e:
logger.error(f"清除已转移记忆失败: {e}", exc_info=True)
logger.error(f"清除已转移记忆失败: {e}")
def get_statistics(self) -> dict[str, Any]:
"""获取短期记忆层统计信息"""
@@ -680,7 +680,7 @@ class ShortTermMemoryManager:
logger.debug(f"短期记忆已保存到 {save_path}")
except Exception as e:
logger.error(f"保存短期记忆失败: {e}", exc_info=True)
logger.error(f"保存短期记忆失败: {e}")
async def _load_from_disk(self) -> None:
"""从磁盘加载短期记忆"""
@@ -702,7 +702,7 @@ class ShortTermMemoryManager:
logger.info(f"短期记忆已从 {load_path} 加载 ({len(self.memories)} 条)")
except Exception as e:
logger.error(f"加载短期记忆失败: {e}", exc_info=True)
logger.error(f"加载短期记忆失败: {e}")
async def _reload_embeddings(self) -> None:
"""重新生成记忆的向量"""
@@ -747,7 +747,7 @@ class ShortTermMemoryManager:
logger.info("✅ 短期记忆管理器已关闭")
except Exception as e:
logger.error(f"关闭短期记忆管理器失败: {e}", exc_info=True)
logger.error(f"关闭短期记忆管理器失败: {e}")
# 全局单例

View File

@@ -127,7 +127,7 @@ class GraphStore:
logger.debug(f"添加记忆到图: {memory}")
except Exception as e:
logger.error(f"添加记忆失败: {e}", exc_info=True)
logger.error(f"添加记忆失败: {e}")
raise
def add_node(
@@ -208,7 +208,7 @@ class GraphStore:
return True
except Exception as e:
logger.error(f"添加节点失败: {e}", exc_info=True)
logger.error(f"添加节点失败: {e}")
return False
def update_node(
@@ -257,7 +257,7 @@ class GraphStore:
return True
except Exception as e:
logger.error(f"更新节点失败: {e}", exc_info=True)
logger.error(f"更新节点失败: {e}")
return False
def add_edge(
@@ -341,7 +341,7 @@ class GraphStore:
return edge_id
except Exception as e:
logger.error(f"添加边失败: {e}", exc_info=True)
logger.error(f"添加边失败: {e}")
return None
def update_edge(
@@ -405,7 +405,7 @@ class GraphStore:
return True
except Exception as e:
logger.error(f"更新边失败: {e}", exc_info=True)
logger.error(f"更新边失败: {e}")
return False
def remove_edge(self, edge_id: str) -> bool:
@@ -455,7 +455,7 @@ class GraphStore:
return True
except Exception as e:
logger.error(f"删除边失败: {e}", exc_info=True)
logger.error(f"删除边失败: {e}")
return False
def merge_memories(self, target_memory_id: str, source_memory_ids: list[str]) -> bool:
@@ -511,7 +511,7 @@ class GraphStore:
return True
except Exception as e:
logger.error(f"合并记忆失败: {e}", exc_info=True)
logger.error(f"合并记忆失败: {e}")
return False
def get_memory_by_id(self, memory_id: str) -> Memory | None:
@@ -697,7 +697,7 @@ class GraphStore:
except nx.NetworkXNoPath:
return None
except Exception as e:
logger.error(f"查找路径失败: {e}", exc_info=True)
logger.error(f"查找路径失败: {e}")
return None
def bfs_expand(
@@ -787,7 +787,7 @@ class GraphStore:
logger.info(f"节点合并: {source_id}{target_id}")
except Exception as e:
logger.error(f"合并节点失败: {e}", exc_info=True)
logger.error(f"合并节点失败: {e}")
raise
def get_node_degree(self, node_id: str) -> tuple[int, int]:
@@ -985,7 +985,7 @@ class GraphStore:
return True
except Exception as e:
logger.error(f"删除记忆失败 {memory_id}: {e}", exc_info=True)
logger.error(f"删除记忆失败 {memory_id}: {e}")
return False
def clear(self) -> None:

View File

@@ -170,8 +170,6 @@ class PersistenceManager:
self._running = False
self._file_lock = asyncio.Lock() # 文件操作锁
logger.info(f"初始化持久化管理器: data_dir={data_dir}")
async def save_graph_store(self, graph_store: GraphStore) -> None:
"""
保存图存储到文件
@@ -211,7 +209,7 @@ class PersistenceManager:
logger.debug(f"图数据已保存: {self.graph_file}, 大小: {len(json_data) / 1024:.2f} KB")
except Exception as e:
logger.error(f"保存图数据失败: {e}", exc_info=True)
logger.error(f"保存图数据失败: {e}")
raise
async def load_graph_store(self) -> GraphStore | None:
@@ -222,7 +220,7 @@ class PersistenceManager:
GraphStore 对象,如果文件不存在则返回 None
"""
if not self.graph_file.exists():
logger.info("图数据文件不存在,返回空图")
logger.debug("图数据文件不存在,返回空图")
return None
# 使用全局文件锁防止多个系统同时读写同一文件
@@ -249,18 +247,14 @@ class PersistenceManager:
logger.error("无法读取图数据文件")
return await self._load_from_backup()
# 检查版本(未来可能需要数据迁移)
version = data.get("metadata", {}).get("version", "unknown")
logger.info(f"加载图数据: version={version}")
# 恢复图存储
graph_store = GraphStore.from_dict(data)
logger.info(f"图数据加载完成: {graph_store.get_statistics()}")
logger.debug(f"图数据加载完成: {graph_store.get_statistics()}")
return graph_store
except Exception as e:
logger.error(f"加载图数据失败: {e}", exc_info=True)
logger.error(f"加载图数据失败: {e}")
# 尝试加载备份
return await self._load_from_backup()
@@ -291,10 +285,8 @@ class PersistenceManager:
# 使用安全的原子写入
await safe_atomic_write(temp_file, self.staged_file)
logger.info(f"临时记忆已保存: {len(staged_memories)}")
except Exception as e:
logger.error(f"保存临时记忆失败: {e}", exc_info=True)
logger.error(f"保存临时记忆失败: {e}")
raise
async def load_staged_memories(self) -> list[StagedMemory]:
@@ -305,7 +297,7 @@ class PersistenceManager:
临时记忆列表
"""
if not self.staged_file.exists():
logger.info("临时记忆文件不存在,返回空列表")
logger.debug("临时记忆文件不存在,返回空列表")
return []
async with self._file_lock: # 使用文件锁防止并发访问
@@ -330,12 +322,10 @@ class PersistenceManager:
return []
staged_memories = [StagedMemory.from_dict(sm) for sm in data.get("staged_memories", [])]
logger.info(f"临时记忆加载完成: {len(staged_memories)}")
return staged_memories
except Exception as e:
logger.error(f"加载临时记忆失败: {e}", exc_info=True)
logger.error(f"加载临时记忆失败: {e}")
return []
async def create_backup(self) -> Path | None:
@@ -359,13 +349,13 @@ class PersistenceManager:
# 清理旧备份只保留最近10个
await self._cleanup_old_backups(keep=10)
logger.info(f"备份创建成功: {backup_file}")
logger.debug(f"备份创建成功: {backup_file}")
return backup_file
return None
except Exception as e:
logger.error(f"创建备份失败: {e}", exc_info=True)
logger.error(f"创建备份失败: {e}")
return None
async def _load_from_backup(self) -> GraphStore | None:
@@ -401,12 +391,12 @@ class PersistenceManager:
return None
graph_store = GraphStore.from_dict(data)
logger.info(f"从备份恢复成功: {graph_store.get_statistics()}")
logger.debug(f"从备份恢复成功: {graph_store.get_statistics()}")
return graph_store
except Exception as e:
logger.error(f"从备份恢复失败: {e}", exc_info=True)
logger.error(f"从备份恢复失败: {e}")
return None
async def _cleanup_old_backups(self, keep: int = 10) -> None:
@@ -446,7 +436,7 @@ class PersistenceManager:
self._running = True
async def auto_save_loop():
logger.info(f"自动保存任务已启动,间隔: {self.auto_save_interval}")
logger.debug(f"自动保存任务已启动,间隔: {self.auto_save_interval}")
while self._running:
try:
@@ -470,9 +460,7 @@ class PersistenceManager:
await self.create_backup()
except Exception as e:
logger.error(f"自动保存失败: {e}", exc_info=True)
logger.info("自动保存任务已停止")
logger.error(f"自动保存失败: {e}")
self._auto_save_task = asyncio.create_task(auto_save_loop())
@@ -481,7 +469,7 @@ class PersistenceManager:
self._running = False
if self._auto_save_task:
self._auto_save_task.cancel()
logger.info("自动保存任务已取消")
logger.debug("自动保存任务已取消")
async def export_to_json(self, output_file: Path, graph_store: GraphStore) -> None:
"""
@@ -505,10 +493,8 @@ class PersistenceManager:
json_str = json.dumps(data, ensure_ascii=False, indent=2)
await f.write(json_str)
logger.info(f"图数据已导出: {output_file}")
except Exception as e:
logger.error(f"导出图数据失败: {e}", exc_info=True)
logger.error(f"导出图数据失败: {e}")
raise
async def import_from_json(self, input_file: Path) -> GraphStore | None:
@@ -527,12 +513,10 @@ class PersistenceManager:
data = json.loads(content)
graph_store = GraphStore.from_dict(data)
logger.info(f"图数据已导入: {graph_store.get_statistics()}")
return graph_store
except Exception as e:
logger.error(f"导入图数据失败: {e}", exc_info=True)
logger.error(f"导入图数据失败: {e}")
raise
def get_data_size(self) -> dict[str, int]:

View File

@@ -47,8 +47,6 @@ class VectorStore:
self.collection = None
self.embedding_function = embedding_function
logger.info(f"初始化向量存储: collection={collection_name}, dir={self.data_dir}")
async def initialize(self) -> None:
"""异步初始化 ChromaDB"""
try:
@@ -70,10 +68,10 @@ class VectorStore:
metadata={"description": "Memory graph node embeddings"},
)
logger.info(f"ChromaDB 初始化完成,集合包含 {self.collection.count()} 个节点")
logger.debug(f"ChromaDB 初始化完成,集合包含 {self.collection.count()} 个节点")
except Exception as e:
logger.error(f"初始化 ChromaDB 失败: {e}", exc_info=True)
logger.error(f"初始化 ChromaDB 失败: {e}")
raise
async def add_node(self, node: MemoryNode) -> None:
@@ -118,7 +116,7 @@ class VectorStore:
logger.debug(f"添加节点到向量存储: {node}")
except Exception as e:
logger.error(f"添加节点失败: {e}", exc_info=True)
logger.error(f"添加节点失败: {e}")
raise
async def add_nodes_batch(self, nodes: list[MemoryNode]) -> None:
@@ -164,10 +162,8 @@ class VectorStore:
documents=[n.content for n in valid_nodes],
)
logger.info(f"批量添加 {len(valid_nodes)} 个节点到向量存储")
except Exception as e:
logger.error(f"批量添加节点失败: {e}", exc_info=True)
logger.error(f"批量添加节点失败: {e}")
raise
async def search_similar_nodes(
@@ -237,7 +233,7 @@ class VectorStore:
return similar_nodes
except Exception as e:
logger.error(f"相似节点搜索失败: {e}", exc_info=True)
logger.error(f"相似节点搜索失败: {e}")
raise
async def search_with_multiple_queries(
@@ -344,15 +340,10 @@ class VectorStore:
fused_results.sort(key=lambda x: x[1], reverse=True)
final_results = fused_results[:limit]
logger.info(
f"多查询融合搜索完成: {len(query_embeddings)} 个查询, "
f"融合后 {len(fused_results)} 个结果, 返回 {len(final_results)}"
)
return final_results
except Exception as e:
logger.error(f"多查询融合搜索失败: {e}", exc_info=True)
logger.error(f"多查询融合搜索失败: {e}")
raise
async def get_node_by_id(self, node_id: str) -> dict[str, Any] | None:
@@ -387,7 +378,7 @@ class VectorStore:
return None
except Exception as e:
logger.error(f"获取节点失败: {e}", exc_info=True)
logger.error(f"获取节点失败: {e}")
return None
async def delete_node(self, node_id: str) -> None:
@@ -405,7 +396,7 @@ class VectorStore:
logger.debug(f"删除节点: {node_id}")
except Exception as e:
logger.error(f"删除节点失败: {e}", exc_info=True)
logger.error(f"删除节点失败: {e}")
raise
async def update_node_embedding(self, node_id: str, embedding: np.ndarray) -> None:
@@ -424,7 +415,7 @@ class VectorStore:
logger.debug(f"更新节点 embedding: {node_id}")
except Exception as e:
logger.error(f"更新节点 embedding 失败: {e}", exc_info=True)
logger.error(f"更新节点 embedding 失败: {e}")
raise
def get_total_count(self) -> int:
@@ -448,5 +439,5 @@ class VectorStore:
logger.warning(f"向量存储已清空: {self.collection_name}")
except Exception as e:
logger.error(f"清空向量存储失败: {e}", exc_info=True)
logger.error(f"清空向量存储失败: {e}")
raise

View File

@@ -5,6 +5,7 @@ LLM 工具接口:定义记忆系统的工具 schema 和执行逻辑
from __future__ import annotations
import asyncio
import logging
from typing import Any
from src.common.logger import get_logger
@@ -82,13 +83,7 @@ class MemoryTools:
self.search_min_importance = search_min_importance
self.search_similarity_threshold = search_similarity_threshold
logger.info(
f"MemoryTools 初始化: max_expand_depth={max_expand_depth}, "
f"expand_semantic_threshold={expand_semantic_threshold}, "
f"search_top_k={search_top_k}, "
f"权重配置: vector={search_vector_weight}, importance={search_importance_weight}, recency={search_recency_weight}, "
f"阈值过滤: min_importance={search_min_importance}, similarity_threshold={search_similarity_threshold}"
)
logger.debug(f"MemoryTools 初始化完成")
# 初始化组件
self.extractor = MemoryExtractor()
@@ -362,7 +357,7 @@ class MemoryTools:
执行结果
"""
try:
logger.info(f"创建记忆: {params.get('subject')} - {params.get('topic')}")
logger.debug(f"创建记忆: {params.get('subject')} - {params.get('topic')}")
# 0. 确保初始化
await self._ensure_initialized()
@@ -379,7 +374,7 @@ class MemoryTools:
# 4. 异步保存到磁盘(不阻塞当前操作)
asyncio.create_task(self._async_save_graph_store())
logger.info(f"记忆创建成功: {memory.id}")
logger.debug(f"记忆创建成功: {memory.id}")
return {
"success": True,
@@ -390,7 +385,7 @@ class MemoryTools:
}
except Exception as e:
logger.error(f"记忆创建失败: {e}", exc_info=True)
logger.error(f"记忆创建失败: {e}")
return {
"success": False,
"error": str(e),
@@ -408,7 +403,7 @@ class MemoryTools:
执行结果
"""
try:
logger.info(
logger.debug(
f"关联记忆: {params.get('source_memory_description')} -> "
f"{params.get('target_memory_description')}"
)
@@ -459,7 +454,7 @@ class MemoryTools:
# 5. 异步保存(不阻塞当前操作)
asyncio.create_task(self._async_save_graph_store())
logger.info(f"记忆关联成功: {source_memory.id} -> {target_memory.id}")
logger.debug(f"记忆关联成功: {source_memory.id} -> {target_memory.id}")
return {
"success": True,
@@ -470,7 +465,7 @@ class MemoryTools:
}
except Exception as e:
logger.error(f"记忆关联失败: {e}", exc_info=True)
logger.error(f"记忆关联失败: {e}")
return {
"success": False,
"error": str(e),
@@ -506,10 +501,7 @@ class MemoryTools:
prefer_node_types = params.get("prefer_node_types", []) # 🆕 优先节点类型
context = params.get("context", None)
logger.info(
f"搜索记忆: {query} (top_k={top_k}, expand_depth={expand_depth}, "
f"multi_query={use_multi_query}, prefer_types={prefer_node_types})"
)
logger.info(f"搜索记忆: {query} (返回{top_k}条)")
# 0. 确保初始化
await self._ensure_initialized()
@@ -527,7 +519,7 @@ class MemoryTools:
# 合并用户指定的偏好类型和LLM识别的偏好类型
all_prefer_types = list(set(prefer_node_types + llm_prefer_types))
if all_prefer_types:
logger.info(f"最终偏好节点类型: {all_prefer_types} (用户指定: {prefer_node_types}, LLM识别: {llm_prefer_types})")
logger.debug(f"最终偏好节点类型: {all_prefer_types} (用户指定: {prefer_node_types}, LLM识别: {llm_prefer_types})")
# 更新prefer_node_types用于后续评分
prefer_node_types = all_prefer_types
@@ -552,19 +544,20 @@ class MemoryTools:
if mem_id not in memory_scores or similarity > memory_scores[mem_id]:
memory_scores[mem_id] = similarity
# 🔥 详细日志:检查初始召回情况
logger.info(
f"初始向量搜索: 返回{len(similar_nodes)}个节点 → "
f"提取{len(initial_memory_ids)}条记忆"
)
# 检查初始召回情况
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"初始向量搜索: 返回{len(similar_nodes)}个节点 → "
f"提取{len(initial_memory_ids)}条记忆"
)
if len(initial_memory_ids) == 0:
logger.warning(
"⚠️ 向量搜索未找到任何记忆!"
"可能原因1) 嵌入模型理解问题 2) 记忆节点未建立索引 3) 查询表达与存储内容差异过大"
)
# 输出相似节点的详细信息用于调试
if similar_nodes:
logger.debug(f"向量搜索返回的节点元数据样例: {similar_nodes[0][2] if len(similar_nodes) > 0 else 'None'}")
if logger.isEnabledFor(logging.DEBUG) and similar_nodes:
logger.debug(f"向量搜索返回的节点元数据样例: {similar_nodes[0][2]}")
elif len(initial_memory_ids) < 3:
logger.warning(f"⚠️ 初始召回记忆数量较少({len(initial_memory_ids)}条),可能影响结果质量")
@@ -584,8 +577,9 @@ class MemoryTools:
if query_embedding is not None:
if use_path_expansion:
# 🆕 使用路径评分扩展算法
logger.info(f"🔬 使用路径评分扩展算法: 初始{len(similar_nodes)}个节点, 深度={expand_depth}")
# 使用路径评分扩展算法
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"使用路径评分扩展算法: 初始{len(similar_nodes)}个节点, 深度={expand_depth}")
# 延迟初始化路径扩展器
if self.path_expander is None:
@@ -618,7 +612,7 @@ class MemoryTools:
# 路径扩展返回的是 [(Memory, final_score, paths), ...]
# 我们需要直接返回这些记忆,跳过后续的传统评分
logger.info(f"✅ 路径扩展返回 {len(path_results)} 条记忆")
logger.debug(f"✅ 路径扩展返回 {len(path_results)} 条记忆")
# 直接构建返回结果
path_memories = []
@@ -635,7 +629,7 @@ class MemoryTools:
}
})
logger.info(f"🎯 路径扩展最终返回: {len(path_memories)} 条记忆")
logger.debug(f"路径扩展最终返回: {len(path_memories)} 条记忆")
return {
"success": True,
@@ -645,7 +639,7 @@ class MemoryTools:
}
except Exception as e:
logger.error(f"路径扩展失败: {e}", exc_info=True)
logger.error(f"路径扩展失败: {e}")
# 路径扩展失败,不再回退到旧的图扩展算法
# 4. 合并初始记忆和扩展记忆
@@ -668,16 +662,10 @@ class MemoryTools:
reverse=True
) # 🔥 不再提前截断,让所有候选参与详细评分
# 🔍 统计初始记忆的相似度分布(用于诊断)
if memory_scores:
# 统计初始记忆的相似度分布(用于诊断)
if logger.isEnabledFor(logging.DEBUG) and memory_scores:
similarities = list(memory_scores.values())
logger.info(
f"📊 向量相似度分布: 最高={max(similarities):.3f}, "
f"最低={min(similarities):.3f}, "
f"平均={sum(similarities)/len(similarities):.3f}, "
f">0.3: {len([s for s in similarities if s > 0.3])}/{len(similarities)}, "
f">0.2: {len([s for s in similarities if s > 0.2])}/{len(similarities)}"
)
logger.debug(f"向量相似度分布: 最高={max(similarities):.3f}, 最低={min(similarities):.3f}, 平均={sum(similarities)/len(similarities):.3f}")
# 5. 获取完整记忆并进行最终排序(优化后的动态权重系统)
memories_with_scores = []
@@ -787,28 +775,24 @@ class MemoryTools:
# if not is_initial_memory and some_score < threshold:
# continue
# 记录通过过滤的记忆(用于调试)
if is_initial_memory:
logger.debug(
f"保留 {memory.id[:8]} [初始]: 相似度={true_similarity:.3f}, "
f"重要性={memory.importance:.2f}, 综合分数={final_score:.4f}"
)
else:
logger.debug(
f"✅ 保留 {memory.id[:8]} [扩展]: 重要性={memory.importance:.2f}, "
f"综合分数={final_score:.4f}"
)
# 记录通过过滤的记忆(仅保留关键信息用于调试)
if logger.isEnabledFor(logging.DEBUG):
if is_initial_memory:
logger.debug(f"保留记忆 {memory.id[:8]} [初始]: 相似度={true_similarity:.3f}, 综合分数={final_score:.4f}")
else:
logger.debug(f"保留记忆 {memory.id[:8]} [扩展]: 综合分数={final_score:.4f}")
# 🆕 节点类型加权对REFERENCE/ATTRIBUTE节点额外加分促进事实性信息召回
if "REFERENCE" in node_types_count or "ATTRIBUTE" in node_types_count:
final_score *= 1.1 # 10% 加成
# 🆕 用户指定的优先节点类型额外加权
# 用户指定的优先节点类型额外加权
if prefer_node_types:
for prefer_type in prefer_node_types:
if prefer_type in node_types_count:
final_score *= 1.15 # 15% 额外加成
logger.debug(f"记忆 {memory.id[:8]} 包含优先节点类型 {prefer_type},加权后分数: {final_score:.4f}")
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"记忆 {memory.id[:8]} 包含优先节点类型 {prefer_type},加权后分数: {final_score:.4f}")
break
memories_with_scores.append((memory, final_score, dominant_node_type))
@@ -834,13 +818,7 @@ class MemoryTools:
}
results.append(result)
logger.info(
f"搜索完成: 初始{len(initial_memory_ids)}个 → "
f"扩展{len(expanded_memory_scores)}个 → "
f"候选{total_candidates}个 → "
f"过滤{filtered_count}个 (重要性过滤) → "
f"最终返回{len(results)}条记忆"
)
logger.info(f"搜索完成: 初始{len(initial_memory_ids)}个 → 最终返回{len(results)}条记忆")
# 如果过滤率过高,发出警告
if total_candidates > 0:
@@ -863,7 +841,7 @@ class MemoryTools:
}
except Exception as e:
logger.error(f"记忆搜索失败: {e}", exc_info=True)
logger.error(f"记忆搜索失败: {e}")
return {
"success": False,
"error": str(e),
@@ -1087,10 +1065,8 @@ class MemoryTools:
prefer_node_types = [t for t in prefer_node_types if t in valid_types]
if result_queries:
logger.info(
f"生成查询: {[q for q, _ in result_queries]} "
f"(偏好类型: {prefer_node_types if prefer_node_types else ''})"
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"生成{len(result_queries)}个查询,偏好类型: {prefer_node_types if prefer_node_types else ''}")
return result_queries, prefer_node_types
except Exception as e:
@@ -1129,9 +1105,8 @@ class MemoryTools:
min_similarity=0.0, # 不在这里过滤,交给后续评分
)
logger.debug(f"单查询向量搜索: 查询='{query}', 返回节点数={len(similar_nodes)}")
if similar_nodes:
logger.debug(f"Top 3相似度: {[f'{sim:.3f}' for _, sim, _ in similar_nodes[:3]]}")
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"单查询搜索: 返回{len(similar_nodes)}个节点")
return similar_nodes
@@ -1160,7 +1135,8 @@ class MemoryTools:
# 1. 使用小模型生成多个查询 + 节点类型识别
multi_queries, prefer_node_types = await self._generate_multi_queries_simple(query, context)
logger.debug(f"生成 {len(multi_queries)} 个查询: {multi_queries}, 偏好类型: {prefer_node_types}")
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"多查询搜索: 生成{len(multi_queries)}个查询,偏好类型: {prefer_node_types}")
# 2. 生成所有查询的嵌入
if not self.builder.embedding_generator:
@@ -1193,14 +1169,13 @@ class MemoryTools:
fusion_strategy="weighted_max",
)
logger.info(f"多查询检索完成: {len(similar_nodes)} 个节点 (偏好类型: {prefer_node_types})")
if similar_nodes:
logger.debug(f"Top 5融合相似度: {[f'{sim:.3f}' for _, sim, _ in similar_nodes[:5]]}")
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"多查询检索完成: {len(similar_nodes)}个节点,偏好类型: {prefer_node_types}")
return similar_nodes, prefer_node_types
except Exception as e:
logger.warning(f"多查询搜索失败,回退到单查询模式: {e}", exc_info=True)
logger.warning(f"多查询搜索失败,回退到单查询模式: {e}")
single_results = await self._single_query_search(query, top_k)
return single_results, []
@@ -1234,7 +1209,8 @@ class MemoryTools:
# 如果嵌入生成失败,无法进行语义搜索
if query_embedding is None:
logger.debug("嵌入生成失败,跳过描述搜索")
if logger.isEnabledFor(logging.DEBUG):
logger.debug("嵌入生成失败,跳过描述搜索")
return None
# 搜索相似节点
@@ -1311,6 +1287,7 @@ class MemoryTools:
return
await self.persistence_manager.save_graph_store(self.graph_store)
logger.debug("异步保存图数据成功")
if logger.isEnabledFor(logging.DEBUG):
logger.debug("异步保存图数据成功")
except Exception as e:
logger.error(f"异步保存图数据失败: {e}", exc_info=True)
logger.error(f"异步保存图数据失败: {e}")

View File

@@ -122,7 +122,7 @@ class UnifiedMemoryManager:
return
try:
logger.info("开始初始化统一记忆管理器...")
logger.debug("开始初始化统一记忆管理器...")
# 初始化底层 MemoryManager长期记忆
if self.memory_manager is None:
@@ -132,7 +132,7 @@ class UnifiedMemoryManager:
self.memory_manager = MemoryManager(data_dir=self.data_dir)
await self.memory_manager.initialize()
else:
logger.info("使用外部提供的 MemoryManager")
logger.debug("使用外部提供的 MemoryManager")
# 确保外部 MemoryManager 已初始化
if not getattr(self.memory_manager, "_initialized", False):
await self.memory_manager.initialize()
@@ -165,7 +165,7 @@ class UnifiedMemoryManager:
self._start_auto_transfer_task()
except Exception as e:
logger.error(f"统一记忆管理器初始化失败: {e}", exc_info=True)
logger.error(f"统一记忆管理器初始化失败: {e}")
raise
async def add_message(self, message: dict[str, Any]) -> MemoryBlock | None:
@@ -244,7 +244,7 @@ class UnifiedMemoryManager:
]
if blocks_to_transfer:
logger.info(
logger.debug(
f"检测到 {len(blocks_to_transfer)} 个感知记忆需要转移,已交由后台后处理任务执行"
)
for block in blocks_to_transfer:
@@ -254,11 +254,6 @@ class UnifiedMemoryManager:
result["perceptual_blocks"] = perceptual_blocks
result["short_term_memories"] = short_term_memories
logger.info(
f"初步检索: 感知记忆 {len(perceptual_blocks)} 块, "
f"短期记忆 {len(short_term_memories)}"
)
# 步骤2: 裁判模型评估
if use_judge:
judge_decision = await self._judge_retrieval_sufficiency(
@@ -291,7 +286,7 @@ class UnifiedMemoryManager:
return result
except Exception as e:
logger.error(f"智能检索失败: {e}", exc_info=True)
logger.error(f"智能检索失败: {e}")
return {
"perceptual_blocks": [],
"short_term_memories": [],
@@ -396,11 +391,10 @@ class UnifiedMemoryManager:
missing_aspects=data.get("missing_aspects", []),
)
logger.info(f"裁判决策: {decision}")
return decision
except Exception as e:
logger.error(f"裁判模型评估失败: {e}", exc_info=True)
logger.error(f"裁判模型评估失败: {e}")
# 默认判定为不充足,需要检索长期记忆
return JudgeDecision(
is_sufficient=False,
@@ -428,7 +422,7 @@ class UnifiedMemoryManager:
except asyncio.CancelledError:
logger.info(f"{task_name} 后台任务已取消")
except Exception as exc:
logger.error(f"{task_name} 后台任务失败: {exc}", exc_info=True)
logger.error(f"{task_name} 后台任务失败: {exc}")
task.add_done_callback(_callback)
@@ -460,7 +454,7 @@ class UnifiedMemoryManager:
async def _transfer_blocks_to_short_term(self, blocks: list[MemoryBlock]) -> None:
"""实际转换逻辑在后台执行"""
logger.info(f"正在后台处理 {len(blocks)} 个感知记忆块")
logger.debug(f"正在后台处理 {len(blocks)} 个感知记忆块")
for block in blocks:
try:
stm = await self.short_term_manager.add_from_block(block)
@@ -469,9 +463,9 @@ class UnifiedMemoryManager:
await self.perceptual_manager.remove_block(block.id)
self._trigger_transfer_wakeup()
logger.info(f"✓ 记忆块 {block.id} 已被转移到短期记忆 {stm.id}")
logger.debug(f"✓ 记忆块 {block.id} 已被转移到短期记忆 {stm.id}")
except Exception as exc:
logger.error(f"后台转移失败,记忆块 {block.id}: {exc}", exc_info=True)
logger.error(f"后台转移失败,记忆块 {block.id}: {exc}")
def _build_manual_multi_queries(self, queries: list[str]) -> list[dict[str, float]]:
"""去重裁判查询并附加权重以进行多查询搜索"""
@@ -522,9 +516,6 @@ class UnifiedMemoryManager:
unique_memories = self._deduplicate_memories(memories)
query_count = len(manual_queries) if manual_queries else 1
logger.info(
f"Long-term retrieval done: {len(unique_memories)} hits (queries fused={query_count})"
)
return unique_memories
def _deduplicate_memories(self, memories: list[Any]) -> list[Any]:
@@ -556,7 +547,7 @@ class UnifiedMemoryManager:
self._transfer_wakeup_event.clear()
self._auto_transfer_task = asyncio.create_task(self._auto_transfer_loop())
logger.info("自动转移任务已启动")
logger.debug("自动转移任务已启动")
async def _auto_transfer_loop(self) -> None:
"""自动转移循环(批量缓存模式)"""
@@ -594,7 +585,7 @@ class UnifiedMemoryManager:
added += 1
if added:
logger.info(
logger.debug(
f"自动转移缓存: 新增{added}条, 当前缓存{len(transfer_cache)}/{cache_size_threshold}"
)
@@ -610,7 +601,7 @@ class UnifiedMemoryManager:
)
if should_transfer and transfer_cache:
logger.info(
logger.debug(
f"准备批量转移: {len(transfer_cache)}条短期记忆到长期记忆 (占用率 {occupancy_ratio:.0%})"
)
@@ -629,13 +620,13 @@ class UnifiedMemoryManager:
cached_ids.difference_update(transferred_ids)
last_transfer_time = time.monotonic()
logger.info(f"✅ 批量转移完成: {result}")
logger.debug(f"✅ 批量转移完成: {result}")
except asyncio.CancelledError:
logger.info("自动转移循环被取消")
logger.debug("自动转移循环被取消")
break
except Exception as e:
logger.error(f"自动转移循环异常: {e}", exc_info=True)
logger.error(f"自动转移循环异常: {e}")
async def manual_transfer(self) -> dict[str, Any]:
"""
@@ -651,7 +642,6 @@ class UnifiedMemoryManager:
memories_to_transfer = self.short_term_manager.get_memories_for_transfer()
if not memories_to_transfer:
logger.info("没有需要转移的短期记忆")
return {"message": "没有需要转移的记忆", "transferred_count": 0}
# 执行转移
@@ -667,7 +657,7 @@ class UnifiedMemoryManager:
return result
except Exception as e:
logger.error(f"手动转移失败: {e}", exc_info=True)
logger.error(f"手动转移失败: {e}")
return {"error": str(e), "transferred_count": 0}
def get_statistics(self) -> dict[str, Any]:
@@ -719,4 +709,4 @@ class UnifiedMemoryManager:
logger.info("✅ 统一记忆管理器已关闭")
except Exception as e:
logger.error(f"关闭统一记忆管理器失败: {e}", exc_info=True)
logger.error(f"关闭统一记忆管理器失败: {e}")

View File

@@ -101,7 +101,7 @@ class EmbeddingGenerator:
return None
except Exception as e:
logger.error(f"❌ 嵌入生成异常: {e}", exc_info=True)
logger.error(f"❌ 嵌入生成异常: {e}")
return None
async def _generate_with_api(self, text: str) -> np.ndarray | None:
@@ -171,7 +171,7 @@ class EmbeddingGenerator:
return results
except Exception as e:
logger.error(f"批量生成嵌入失败: {e}", exc_info=True)
logger.error(f"批量生成嵌入失败: {e}")
return [None for _ in texts]
async def _generate_batch_with_api(self, texts: list[str]) -> list[np.ndarray | None] | None:

View File

@@ -126,7 +126,7 @@ class PathScoreExpansion:
self._neighbor_cache: dict[str, list[Any]] = {}
self._node_score_cache: dict[str, float] = {}
logger.info(
logger.debug(
f"PathScoreExpansion 初始化: max_hops={self.config.max_hops}, "
f"damping={self.config.damping_factor}, "
f"merge_strategy={self.config.path_merge_strategy}"
@@ -164,7 +164,7 @@ class PathScoreExpansion:
# 保存偏好类型
self.prefer_node_types = prefer_node_types or []
if self.prefer_node_types:
logger.info(f"🎯 偏好节点类型: {self.prefer_node_types}")
logger.debug(f"偏好节点类型: {self.prefer_node_types}")
# 1. 初始化路径
active_paths = []
@@ -175,7 +175,7 @@ class PathScoreExpansion:
active_paths.append(path)
best_score_to_node[node_id] = score
logger.info(f"🚀 路径扩展开始: {len(active_paths)} 条初始路径")
logger.debug(f"路径扩展开始: {len(active_paths)} 条初始路径")
# 2. 多跳扩展
hop_stats = [] # 每跳统计信息
@@ -284,8 +284,8 @@ class PathScoreExpansion:
if self.config.enable_early_stop and prev_path_count > 0:
growth_rate = (len(active_paths) - prev_path_count) / prev_path_count
if growth_rate < self.config.early_stop_growth_threshold:
logger.info(
f"⏸️ 早停触发: 路径增长率 {growth_rate:.2%} < {self.config.early_stop_growth_threshold:.0%}, "
logger.debug(
f"早停触发: 路径增长率 {growth_rate:.2%} < {self.config.early_stop_growth_threshold:.0%}, "
f"在第 {hop+1}/{self.config.max_hops} 跳停止"
)
hop_time = time.time() - hop_start
@@ -325,16 +325,16 @@ class PathScoreExpansion:
# 早停:如果没有新路径
if not active_paths:
logger.info(f"⏹️ 提前停止:第 {hop+1} 跳无新路径")
logger.debug(f"提前停止:第 {hop+1} 跳无新路径")
break
# 3. 提取叶子路径(最小子路径)
leaf_paths = self._extract_leaf_paths(active_paths)
logger.info(f"📊 提取 {len(leaf_paths)} 条叶子路径")
logger.debug(f"提取 {len(leaf_paths)} 条叶子路径")
# 4. 路径到记忆的映射
memory_paths = await self._map_paths_to_memories(leaf_paths)
logger.info(f"🔗 映射到 {len(memory_paths)} 条候选记忆")
logger.debug(f"映射到 {len(memory_paths)} 条候选记忆")
# 🚀 4.5. 粗排过滤:在详细评分前过滤掉低质量记忆
if len(memory_paths) > self.config.max_candidate_memories:
@@ -357,8 +357,8 @@ class PathScoreExpansion:
if mem_id in retained_mem_ids
}
logger.info(
f"粗排过滤: {len(memory_scores_rough)}{len(memory_paths)} 条候选记忆"
logger.debug(
f"粗排过滤: {len(memory_scores_rough)}{len(memory_paths)} 条候选记忆"
)
# 5. 最终评分
@@ -369,8 +369,8 @@ class PathScoreExpansion:
result = scored_memories[:top_k]
elapsed = time.time() - start_time
logger.info(
f"路径扩展完成: {len(initial_nodes)} 个初始节点 → "
logger.debug(
f"路径扩展完成: {len(initial_nodes)} 个初始节点 → "
f"{len(result)} 条记忆 (耗时 {elapsed:.3f}s)"
)