@@ -4,8 +4,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Callable
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.config.official_configs import MemoryConfig
|
||||
from src.memory_graph.models import MemoryNode, NodeType
|
||||
@@ -74,7 +72,7 @@ class NodeMerger:
|
||||
try:
|
||||
# 在向量存储中搜索相似节点
|
||||
results = await self.vector_store.search_similar_nodes(
|
||||
query_embedding=node.embedding, # type: ignore[arg-type]
|
||||
query_embedding=node.embedding,
|
||||
limit=limit + 1, # +1 因为可能包含节点自己
|
||||
node_types=[node.node_type], # 只搜索相同类型的节点
|
||||
min_similarity=threshold,
|
||||
@@ -82,7 +80,7 @@ class NodeMerger:
|
||||
|
||||
# 过滤掉节点自己,并构建结果
|
||||
similar_nodes = []
|
||||
for node_id, similarity, _ in results:
|
||||
for node_id, similarity, metadata in results:
|
||||
if node_id == node.id:
|
||||
continue # 跳过自己
|
||||
|
||||
@@ -97,7 +95,7 @@ class NodeMerger:
|
||||
logger.debug(f"找到 {len(similar_nodes)} 个相似节点 (阈值: {threshold})")
|
||||
return similar_nodes
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"查找相似节点失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -181,13 +179,13 @@ class NodeMerger:
|
||||
|
||||
# 3. 检查邻居内容是否有重叠
|
||||
source_neighbor_contents = set()
|
||||
for neighbor_id, _ in source_neighbors:
|
||||
for neighbor_id, edge_data in source_neighbors:
|
||||
neighbor_node = self._get_node_content(neighbor_id)
|
||||
if neighbor_node:
|
||||
source_neighbor_contents.add(neighbor_node.lower())
|
||||
|
||||
target_neighbor_contents = set()
|
||||
for neighbor_id, _ in target_neighbors:
|
||||
for neighbor_id, edge_data in target_neighbors:
|
||||
neighbor_node = self._get_node_content(neighbor_id)
|
||||
if neighbor_node:
|
||||
target_neighbor_contents.add(neighbor_node.lower())
|
||||
@@ -245,7 +243,7 @@ class NodeMerger:
|
||||
logger.debug(f"节点合并成功: {source.id} → {target.id}")
|
||||
return True
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"节点合并失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -278,7 +276,7 @@ class NodeMerger:
|
||||
async def batch_merge_similar_nodes(
|
||||
self,
|
||||
nodes: list[MemoryNode],
|
||||
progress_callback: Callable[..., None] | None = None,
|
||||
progress_callback: callable | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
批量处理节点合并
|
||||
@@ -326,7 +324,7 @@ class NodeMerger:
|
||||
if progress_callback:
|
||||
progress_callback(i + 1, stats["total"], stats)
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"处理节点 {node.id} 时失败: {e}")
|
||||
stats["skipped"] += 1
|
||||
|
||||
@@ -339,19 +337,19 @@ class NodeMerger:
|
||||
|
||||
def get_merge_candidates(
|
||||
self,
|
||||
_min_similarity: float = 0.85,
|
||||
_limit: int = 100,
|
||||
min_similarity: float = 0.85,
|
||||
limit: int = 100,
|
||||
) -> list[tuple[str, str, float]]:
|
||||
"""
|
||||
获取待合并的候选节点对
|
||||
|
||||
Args:
|
||||
_min_similarity: 最小相似度
|
||||
_limit: 最大返回数量
|
||||
min_similarity: 最小相似度
|
||||
limit: 最大返回数量
|
||||
|
||||
Returns:
|
||||
List of (node_id_1, node_id_2, similarity)
|
||||
"""
|
||||
# 改进空间: 实现更智能的候选查找算法
|
||||
# TODO: 实现更智能的候选查找算法
|
||||
# 目前返回空列表,后续可以基于向量存储进行批量查询
|
||||
return []
|
||||
|
||||
@@ -72,13 +72,13 @@ class LongTermMemoryManager:
|
||||
logger.debug("开始初始化长期记忆管理器...")
|
||||
|
||||
# 确保底层 MemoryManager 已初始化
|
||||
if not self.memory_manager._initialized: # type: ignore[attr-defined,misc]
|
||||
if not self.memory_manager._initialized:
|
||||
await self.memory_manager.initialize()
|
||||
|
||||
self._initialized = True
|
||||
logger.debug("长期记忆管理器初始化完成")
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"长期记忆管理器初始化失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -144,7 +144,7 @@ class LongTermMemoryManager:
|
||||
logger.debug(f"短期记忆转移完成: {result}")
|
||||
return result
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"转移短期记忆失败: {e}")
|
||||
return {"error": str(e), "processed_count": 0}
|
||||
|
||||
@@ -193,7 +193,7 @@ class LongTermMemoryManager:
|
||||
else:
|
||||
result["failed_count"] += 1
|
||||
|
||||
except (KeyError, TypeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"处理短期记忆 {stm.id} 失败: {e}")
|
||||
result["failed_count"] += 1
|
||||
|
||||
@@ -211,12 +211,12 @@ class LongTermMemoryManager:
|
||||
from src.config.config import global_config
|
||||
|
||||
# 检查是否启用了高级路径扩展算法
|
||||
use_path_expansion = getattr(global_config.memory, "enable_path_expansion", False) # type: ignore[union-attr]
|
||||
use_path_expansion = getattr(global_config.memory, "enable_path_expansion", False)
|
||||
|
||||
# 1. 检索记忆
|
||||
# 如果启用了路径扩展,search_memories 内部会自动使用 PathScoreExpansion
|
||||
# 我们只需要传入合适的 expand_depth
|
||||
expand_depth = getattr(global_config.memory, "path_expansion_max_hops", 2) if use_path_expansion else 0 # type: ignore[union-attr]
|
||||
expand_depth = getattr(global_config.memory, "path_expansion_max_hops", 2) if use_path_expansion else 0
|
||||
|
||||
memories = await self.memory_manager.search_memories(
|
||||
query=stm.content,
|
||||
@@ -242,7 +242,7 @@ class LongTermMemoryManager:
|
||||
# 获取该记忆的直接关联记忆(1跳邻居)
|
||||
try:
|
||||
# 利用 MemoryManager 的底层图遍历能力
|
||||
related_ids = self.memory_manager._get_related_memories(mem.id, max_depth=1) # type: ignore[attr-defined,misc]
|
||||
related_ids = self.memory_manager._get_related_memories(mem.id, max_depth=1)
|
||||
|
||||
# 限制每个记忆扩展的邻居数量,避免上下文爆炸
|
||||
max_neighbors = 2
|
||||
@@ -259,7 +259,7 @@ class LongTermMemoryManager:
|
||||
if neighbor_count >= max_neighbors:
|
||||
break
|
||||
|
||||
except (KeyError, TypeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.warning(f"获取关联记忆失败: {e}")
|
||||
|
||||
# 总数限制
|
||||
@@ -269,7 +269,7 @@ class LongTermMemoryManager:
|
||||
logger.debug(f"为短期记忆 {stm.id} 找到 {len(expanded_memories)} 个长期记忆 (含简单图扩展)")
|
||||
return expanded_memories
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"检索相似长期记忆失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -295,7 +295,7 @@ class LongTermMemoryManager:
|
||||
|
||||
# 调用长期记忆构建模型
|
||||
llm = LLMRequest(
|
||||
model_set=model_config.model_task_config.memory_long_term_builder, # type: ignore[union-attr]
|
||||
model_set=model_config.model_task_config.memory_long_term_builder,
|
||||
request_type="long_term_memory.graph_operations",
|
||||
)
|
||||
|
||||
@@ -311,7 +311,7 @@ class LongTermMemoryManager:
|
||||
logger.debug(f"LLM 生成 {len(operations)} 个图操作指令")
|
||||
return operations
|
||||
|
||||
except (RuntimeError, ValueError, KeyError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"LLM 决策图操作失败: {e}")
|
||||
# 默认创建新记忆
|
||||
return [
|
||||
@@ -550,13 +550,13 @@ class LongTermMemoryManager:
|
||||
else:
|
||||
logger.warning(f"未实现的操作类型: {op.operation_type}")
|
||||
|
||||
except (KeyError, TypeError, ValueError, RuntimeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"执行图操作失败: {op}, 错误: {e}")
|
||||
|
||||
logger.debug(f"执行了 {success_count}/{len(operations)} 个图操作")
|
||||
return success_count > 0
|
||||
|
||||
except (KeyError, TypeError, ValueError, RuntimeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"执行图操作失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -715,7 +715,7 @@ class LongTermMemoryManager:
|
||||
logger.info(f"开始智能合并记忆: {memories_to_merge} -> {target_id}")
|
||||
|
||||
# 1. 调用 GraphStore 的合并功能(转移节点和边)
|
||||
merge_success = self.memory_manager.graph_store.merge_memories(target_id, memories_to_merge) # type: ignore[union-attr]
|
||||
merge_success = self.memory_manager.graph_store.merge_memories(target_id, memories_to_merge)
|
||||
|
||||
if merge_success:
|
||||
# 2. 更新目标记忆的元数据
|
||||
@@ -731,7 +731,7 @@ class LongTermMemoryManager:
|
||||
)
|
||||
|
||||
# 3. 异步保存
|
||||
asyncio.create_task(self.memory_manager._async_save_graph_store("合并记忆")) # type: ignore[attr-defined,misc]
|
||||
asyncio.create_task(self.memory_manager._async_save_graph_store("合并记忆"))
|
||||
logger.info(f"合并记忆完成: {source_ids} -> {target_id}")
|
||||
else:
|
||||
logger.error(f"合并记忆失败: {source_ids}")
|
||||
@@ -752,7 +752,7 @@ class LongTermMemoryManager:
|
||||
import uuid
|
||||
node_id = str(uuid.uuid4())
|
||||
|
||||
success = self.memory_manager.graph_store.add_node( # type: ignore[union-attr]
|
||||
success = self.memory_manager.graph_store.add_node(
|
||||
node_id=node_id,
|
||||
content=content,
|
||||
node_type=node_type,
|
||||
@@ -788,7 +788,7 @@ class LongTermMemoryManager:
|
||||
logger.warning("更新节点失败: 缺少 node_id")
|
||||
return
|
||||
|
||||
success = self.memory_manager.graph_store.update_node( # type: ignore[union-attr]
|
||||
success = self.memory_manager.graph_store.update_node(
|
||||
node_id=node_id,
|
||||
content=updated_content
|
||||
)
|
||||
@@ -815,11 +815,11 @@ class LongTermMemoryManager:
|
||||
|
||||
# 更新目标节点内容
|
||||
if merged_content:
|
||||
self.memory_manager.graph_store.update_node(target_id, content=merged_content) # type: ignore[union-attr]
|
||||
self.memory_manager.graph_store.update_node(target_id, content=merged_content)
|
||||
|
||||
# 合并其他节点到目标节点
|
||||
for source_id in sources:
|
||||
self.memory_manager.graph_store.merge_nodes(source_id, target_id) # type: ignore[union-attr]
|
||||
self.memory_manager.graph_store.merge_nodes(source_id, target_id)
|
||||
|
||||
logger.info(f"合并节点: {sources} -> {target_id}")
|
||||
|
||||
@@ -873,7 +873,7 @@ class LongTermMemoryManager:
|
||||
logger.warning("更新边失败: 缺少 edge_id")
|
||||
return
|
||||
|
||||
success = self.memory_manager.graph_store.update_edge( # type: ignore[union-attr]
|
||||
success = self.memory_manager.graph_store.update_edge(
|
||||
edge_id=edge_id,
|
||||
relation=updated_relation,
|
||||
importance=updated_importance
|
||||
@@ -894,7 +894,7 @@ class LongTermMemoryManager:
|
||||
logger.warning("删除边失败: 缺少 edge_id")
|
||||
return
|
||||
|
||||
success = self.memory_manager.graph_store.remove_edge(edge_id) # type: ignore[union-attr]
|
||||
success = self.memory_manager.graph_store.remove_edge(edge_id)
|
||||
|
||||
if success:
|
||||
logger.info(f"删除边: {edge_id}")
|
||||
@@ -919,9 +919,9 @@ class LongTermMemoryManager:
|
||||
)
|
||||
await self.memory_manager.vector_store.add_node(node)
|
||||
node.mark_vector_stored()
|
||||
if self.memory_manager.graph_store.graph.has_node(node_id): # type: ignore[union-attr]
|
||||
self.memory_manager.graph_store.graph.nodes[node_id]["has_vector"] = True # type: ignore[union-attr]
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
if self.memory_manager.graph_store.graph.has_node(node_id):
|
||||
self.memory_manager.graph_store.graph.nodes[node_id]["has_vector"] = True
|
||||
except Exception as e:
|
||||
logger.warning(f"生成节点 embedding 失败: {e}")
|
||||
|
||||
async def apply_long_term_decay(self) -> dict[str, Any]:
|
||||
@@ -939,7 +939,7 @@ class LongTermMemoryManager:
|
||||
try:
|
||||
logger.info("开始应用长期记忆激活度衰减...")
|
||||
|
||||
all_memories = self.memory_manager.graph_store.get_all_memories() # type: ignore[union-attr]
|
||||
all_memories = self.memory_manager.graph_store.get_all_memories()
|
||||
decayed_count = 0
|
||||
|
||||
for memory in all_memories:
|
||||
@@ -972,14 +972,14 @@ class LongTermMemoryManager:
|
||||
logger.warning(f"解析时间失败: {e}")
|
||||
|
||||
# 保存更新
|
||||
await self.memory_manager.persistence.save_graph_store( # type: ignore[union-attr]
|
||||
self.memory_manager.graph_store # type: ignore[arg-type]
|
||||
await self.memory_manager.persistence.save_graph_store(
|
||||
self.memory_manager.graph_store
|
||||
)
|
||||
|
||||
logger.info(f"长期记忆衰减完成: {decayed_count} 条记忆已更新")
|
||||
return {"decayed_count": decayed_count, "total_memories": len(all_memories)}
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"应用长期记忆衰减失败: {e}")
|
||||
return {"error": str(e), "decayed_count": 0}
|
||||
|
||||
@@ -1007,7 +1007,7 @@ class LongTermMemoryManager:
|
||||
self._initialized = False
|
||||
logger.info("长期记忆管理器已关闭")
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"关闭长期记忆管理器失败: {e}")
|
||||
|
||||
|
||||
@@ -1015,9 +1015,9 @@ class LongTermMemoryManager:
|
||||
_long_term_manager_instance: LongTermMemoryManager | None = None
|
||||
|
||||
|
||||
def get_long_term_manager() -> LongTermMemoryManager: # type: ignore
|
||||
def get_long_term_manager() -> LongTermMemoryManager:
|
||||
"""获取长期记忆管理器单例(需要先初始化记忆图系统)"""
|
||||
global _long_term_manager_instance # type: ignore[name-defined,misc]
|
||||
global _long_term_manager_instance
|
||||
if _long_term_manager_instance is None:
|
||||
from src.memory_graph.manager_singleton import get_memory_manager
|
||||
|
||||
|
||||
@@ -118,7 +118,7 @@ class PerceptualMemoryManager:
|
||||
f"(已加载 {len(self.perceptual_memory.blocks)} 个记忆块)"
|
||||
)
|
||||
|
||||
except (OSError, RuntimeError, ValueError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"感知记忆管理器初始化失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -146,24 +146,24 @@ class PerceptualMemoryManager:
|
||||
|
||||
try:
|
||||
if not hasattr(self.perceptual_memory, "pending_messages"):
|
||||
self.perceptual_memory.pending_messages = [] # type: ignore[union-attr]
|
||||
self.perceptual_memory.pending_messages = []
|
||||
|
||||
self._cleanup_pending_messages()
|
||||
|
||||
stream_id = message.get("stream_id", "unknown")
|
||||
self._normalize_message_timestamp(message)
|
||||
self.perceptual_memory.pending_messages.append(message) # type: ignore[union-attr]
|
||||
self.perceptual_memory.pending_messages.append(message)
|
||||
self._enforce_pending_limits(stream_id)
|
||||
|
||||
logger.debug(
|
||||
f"消息已添加到待处理队列 (stream={stream_id[:8]}, "
|
||||
f"总数={len(self.perceptual_memory.pending_messages)})" # type: ignore[union-attr]
|
||||
f"总数={len(self.perceptual_memory.pending_messages)})"
|
||||
)
|
||||
|
||||
# 按 stream_id 检查是否达到创建块的条件
|
||||
stream_messages = [
|
||||
msg
|
||||
for msg in self.perceptual_memory.pending_messages # type: ignore[union-attr]
|
||||
for msg in self.perceptual_memory.pending_messages
|
||||
if msg.get("stream_id") == stream_id
|
||||
]
|
||||
|
||||
@@ -173,7 +173,7 @@ class PerceptualMemoryManager:
|
||||
|
||||
return None
|
||||
|
||||
except (KeyError, TypeError, ValueError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"添加消息失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -190,7 +190,7 @@ class PerceptualMemoryManager:
|
||||
try:
|
||||
self._cleanup_pending_messages()
|
||||
# 只取出指定 stream_id 的 block_size 条消息
|
||||
stream_messages = [msg for msg in self.perceptual_memory.pending_messages if msg.get("stream_id") == stream_id] # type: ignore[union-attr]
|
||||
stream_messages = [msg for msg in self.perceptual_memory.pending_messages if msg.get("stream_id") == stream_id]
|
||||
|
||||
if len(stream_messages) < self.block_size:
|
||||
logger.warning(f"stream {stream_id} 的消息不足 {self.block_size} 条,无法创建块")
|
||||
@@ -201,7 +201,7 @@ class PerceptualMemoryManager:
|
||||
|
||||
# 从 pending_messages 中移除这些消息
|
||||
for msg in messages:
|
||||
self.perceptual_memory.pending_messages.remove(msg) # type: ignore[union-attr]
|
||||
self.perceptual_memory.pending_messages.remove(msg)
|
||||
|
||||
# 合并消息文本
|
||||
combined_text = self._combine_messages(messages)
|
||||
@@ -219,21 +219,21 @@ class PerceptualMemoryManager:
|
||||
)
|
||||
|
||||
# 添加到记忆堆顶部
|
||||
self.perceptual_memory.blocks.insert(0, block) # type: ignore[union-attr]
|
||||
self.perceptual_memory.blocks.insert(0, block)
|
||||
|
||||
# 更新所有块的位置
|
||||
for i, b in enumerate(self.perceptual_memory.blocks): # type: ignore[union-attr]
|
||||
for i, b in enumerate(self.perceptual_memory.blocks):
|
||||
b.position_in_stack = i
|
||||
|
||||
# FIFO 淘汰:如果超过最大容量,移除最旧的块
|
||||
if len(self.perceptual_memory.blocks) > self.max_blocks: # type: ignore[union-attr]
|
||||
removed_blocks = self.perceptual_memory.blocks[self.max_blocks :] # type: ignore[union-attr]
|
||||
self.perceptual_memory.blocks = self.perceptual_memory.blocks[: self.max_blocks] # type: ignore[union-attr]
|
||||
if len(self.perceptual_memory.blocks) > self.max_blocks:
|
||||
removed_blocks = self.perceptual_memory.blocks[self.max_blocks :]
|
||||
self.perceptual_memory.blocks = self.perceptual_memory.blocks[: self.max_blocks]
|
||||
logger.debug(f"记忆堆已满,移除 {len(removed_blocks)} 个旧块")
|
||||
|
||||
logger.debug(
|
||||
f"✅ 创建新记忆块: {block.id} (stream={stream_id[:8]}, "
|
||||
f"堆大小={len(self.perceptual_memory.blocks)}/{self.max_blocks})" # type: ignore[union-attr]
|
||||
f"堆大小={len(self.perceptual_memory.blocks)}/{self.max_blocks})"
|
||||
)
|
||||
|
||||
# 异步保存
|
||||
@@ -241,7 +241,7 @@ class PerceptualMemoryManager:
|
||||
|
||||
return block
|
||||
|
||||
except (KeyError, TypeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"创建记忆块失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -249,7 +249,7 @@ class PerceptualMemoryManager:
|
||||
"""确保消息包含 timestamp 字段并返回其值。"""
|
||||
raw_ts = message.get("timestamp", message.get("time"))
|
||||
try:
|
||||
timestamp = float(raw_ts) if raw_ts is not None else time.time()
|
||||
timestamp = float(raw_ts)
|
||||
except (TypeError, ValueError):
|
||||
timestamp = time.time()
|
||||
message["timestamp"] = timestamp
|
||||
@@ -270,7 +270,7 @@ class PerceptualMemoryManager:
|
||||
for msg in pending:
|
||||
ts = msg.get("timestamp") or msg.get("time")
|
||||
try:
|
||||
ts_value = float(ts) if ts is not None else time.time()
|
||||
ts_value = float(ts)
|
||||
except (TypeError, ValueError):
|
||||
ts_value = time.time()
|
||||
msg["timestamp"] = ts_value
|
||||
@@ -369,7 +369,7 @@ class PerceptualMemoryManager:
|
||||
embedding = await self.embedding_generator.generate(text)
|
||||
return embedding
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"生成向量失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -391,7 +391,7 @@ class PerceptualMemoryManager:
|
||||
embeddings = await self.embedding_generator.generate_batch(texts)
|
||||
return embeddings
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"批量生成向量失败: {e}")
|
||||
return [None] * len(texts)
|
||||
|
||||
@@ -425,18 +425,18 @@ class PerceptualMemoryManager:
|
||||
logger.warning("查询向量生成失败,返回空列表")
|
||||
return []
|
||||
|
||||
# 批量计算相似度(使用异步版本)
|
||||
# 批量计算所有块的相似度(使用异步版本)
|
||||
blocks_with_embeddings = [
|
||||
block for block in self.perceptual_memory.blocks # type: ignore[union-attr]
|
||||
block for block in self.perceptual_memory.blocks
|
||||
if block.embedding is not None
|
||||
]
|
||||
|
||||
if not blocks_with_embeddings:
|
||||
return []
|
||||
|
||||
# 批量计算相似度,过滤掉 None 向量
|
||||
# 批量计算相似度
|
||||
block_embeddings = [block.embedding for block in blocks_with_embeddings]
|
||||
similarities = await batch_cosine_similarity_async(query_embedding, block_embeddings) # type: ignore[arg-type]
|
||||
similarities = await batch_cosine_similarity_async(query_embedding, block_embeddings)
|
||||
|
||||
# 过滤和排序
|
||||
scored_blocks = []
|
||||
@@ -489,7 +489,7 @@ class PerceptualMemoryManager:
|
||||
|
||||
return recalled_blocks
|
||||
|
||||
except (KeyError, TypeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"召回记忆块失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -503,20 +503,20 @@ class PerceptualMemoryManager:
|
||||
try:
|
||||
# 从原位置移除这些块
|
||||
for block in blocks_to_promote:
|
||||
if block in self.perceptual_memory.blocks: # type: ignore[union-attr]
|
||||
self.perceptual_memory.blocks.remove(block) # type: ignore[union-attr]
|
||||
if block in self.perceptual_memory.blocks:
|
||||
self.perceptual_memory.blocks.remove(block)
|
||||
|
||||
# 将它们插入到堆顶(保持原有的相对顺序)
|
||||
for block in reversed(blocks_to_promote):
|
||||
self.perceptual_memory.blocks.insert(0, block) # type: ignore[union-attr]
|
||||
self.perceptual_memory.blocks.insert(0, block)
|
||||
|
||||
# 更新所有块的位置
|
||||
for i, block in enumerate(self.perceptual_memory.blocks): # type: ignore[union-attr]
|
||||
for i, block in enumerate(self.perceptual_memory.blocks):
|
||||
block.position_in_stack = i
|
||||
|
||||
logger.debug(f"提升 {len(blocks_to_promote)} 个块到堆顶")
|
||||
|
||||
except (ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"提升块失败: {e}")
|
||||
|
||||
def get_activated_blocks(self) -> list[MemoryBlock]:
|
||||
@@ -531,7 +531,7 @@ class PerceptualMemoryManager:
|
||||
|
||||
activated = [
|
||||
block
|
||||
for block in self.perceptual_memory.blocks # type: ignore[union-attr]
|
||||
for block in self.perceptual_memory.blocks
|
||||
if block.recall_count >= self.activation_threshold
|
||||
]
|
||||
|
||||
@@ -552,12 +552,12 @@ class PerceptualMemoryManager:
|
||||
|
||||
try:
|
||||
# 查找并移除块
|
||||
for i, block in enumerate(self.perceptual_memory.blocks): # type: ignore[union-attr]
|
||||
for i, block in enumerate(self.perceptual_memory.blocks):
|
||||
if block.id == block_id:
|
||||
self.perceptual_memory.blocks.pop(i) # type: ignore[union-attr]
|
||||
self.perceptual_memory.blocks.pop(i)
|
||||
|
||||
# 更新剩余块的位置
|
||||
for j, b in enumerate(self.perceptual_memory.blocks): # type: ignore[union-attr]
|
||||
for j, b in enumerate(self.perceptual_memory.blocks):
|
||||
b.position_in_stack = j
|
||||
|
||||
# 异步保存
|
||||
@@ -568,7 +568,7 @@ class PerceptualMemoryManager:
|
||||
logger.warning(f"记忆块不存在: {block_id}")
|
||||
return False
|
||||
|
||||
except (ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"移除记忆块失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -607,22 +607,22 @@ class PerceptualMemoryManager:
|
||||
self._cleanup_pending_messages()
|
||||
|
||||
# 保存到 JSON 文件
|
||||
import json
|
||||
import orjson
|
||||
|
||||
save_path = self.data_dir / "perceptual_memory.json"
|
||||
data = self.perceptual_memory.to_dict()
|
||||
|
||||
save_path.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
save_path.write_bytes(orjson.dumps(data, option=orjson.OPT_INDENT_2))
|
||||
|
||||
logger.debug(f"感知记忆已保存到 {save_path}")
|
||||
|
||||
except (OSError, TypeError, ValueError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"保存感知记忆失败: {e}")
|
||||
|
||||
async def _load_from_disk(self) -> None:
|
||||
"""从磁盘加载感知记忆"""
|
||||
try:
|
||||
import json
|
||||
import orjson
|
||||
|
||||
load_path = self.data_dir / "perceptual_memory.json"
|
||||
|
||||
@@ -630,13 +630,13 @@ class PerceptualMemoryManager:
|
||||
logger.debug("未找到感知记忆数据文件")
|
||||
return
|
||||
|
||||
data = json.loads(load_path.read_text(encoding="utf-8"))
|
||||
data = orjson.loads(load_path.read_bytes())
|
||||
self.perceptual_memory = PerceptualMemory.from_dict(data)
|
||||
|
||||
# 重新加载向量数据
|
||||
await self._reload_embeddings()
|
||||
|
||||
except (OSError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"加载感知记忆失败: {e}")
|
||||
|
||||
async def _reload_embeddings(self) -> None:
|
||||
@@ -680,7 +680,7 @@ class PerceptualMemoryManager:
|
||||
|
||||
self._initialized = False
|
||||
|
||||
except (OSError, RuntimeError, ValueError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"关闭感知记忆管理器失败: {e}")
|
||||
|
||||
|
||||
@@ -690,7 +690,7 @@ _perceptual_manager_instance: PerceptualMemoryManager | None = None
|
||||
|
||||
def get_perceptual_manager() -> PerceptualMemoryManager:
|
||||
"""获取感知记忆管理器单例"""
|
||||
global _perceptual_manager_instance # type: ignore
|
||||
global _perceptual_manager_instance
|
||||
if _perceptual_manager_instance is None:
|
||||
_perceptual_manager_instance = PerceptualMemoryManager()
|
||||
return _perceptual_manager_instance
|
||||
|
||||
@@ -92,7 +92,7 @@ class ShortTermMemoryManager:
|
||||
self._initialized = True
|
||||
logger.debug(f"短期记忆管理器初始化完成 (已加载 {len(self.memories)} 条记忆)")
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"短期记忆管理器初始化失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -144,7 +144,7 @@ class ShortTermMemoryManager:
|
||||
|
||||
return result_memory
|
||||
|
||||
except (RuntimeError, ValueError, KeyError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"添加短期记忆失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -198,7 +198,7 @@ class ShortTermMemoryManager:
|
||||
|
||||
# 调用短期记忆构建模型
|
||||
llm = LLMRequest(
|
||||
model_set=model_config.model_task_config.memory_short_term_builder, # type: ignore[union-attr]
|
||||
model_set=model_config.model_task_config.memory_short_term_builder,
|
||||
request_type="short_term_memory.extract",
|
||||
)
|
||||
|
||||
@@ -235,7 +235,7 @@ class ShortTermMemoryManager:
|
||||
logger.debug(f"提取结构化记忆: {memory.content[:50]}...")
|
||||
return memory
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"提取结构化记忆失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -303,7 +303,7 @@ class ShortTermMemoryManager:
|
||||
|
||||
# 调用短期记忆决策模型
|
||||
llm = LLMRequest(
|
||||
model_set=model_config.model_task_config.memory_short_term_decider, # type: ignore[union-attr]
|
||||
model_set=model_config.model_task_config.memory_short_term_decider,
|
||||
request_type="short_term_memory.decide",
|
||||
)
|
||||
|
||||
@@ -340,7 +340,7 @@ class ShortTermMemoryManager:
|
||||
logger.debug(f"LLM 决策完成: {decision}")
|
||||
return decision
|
||||
|
||||
except (RuntimeError, ValueError, KeyError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"LLM 决策失败: {e}")
|
||||
# 默认创建新记忆
|
||||
return ShortTermDecision(
|
||||
@@ -431,7 +431,7 @@ class ShortTermMemoryManager:
|
||||
self.memories.append(new_memory)
|
||||
return new_memory
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"执行决策失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -465,7 +465,7 @@ class ShortTermMemoryManager:
|
||||
|
||||
return scored[:top_k]
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"查找相似记忆失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -490,7 +490,7 @@ class ShortTermMemoryManager:
|
||||
embedding = await self.embedding_generator.generate(text)
|
||||
return embedding
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"生成向量失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -512,7 +512,7 @@ class ShortTermMemoryManager:
|
||||
embeddings = await self.embedding_generator.generate_batch(texts)
|
||||
return embeddings
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"批量生成向量失败: {e}")
|
||||
return [None] * len(texts)
|
||||
|
||||
@@ -532,11 +532,7 @@ class ShortTermMemoryManager:
|
||||
json_str = re.sub(r"/\*.*?\*/", "", json_str, flags=re.DOTALL)
|
||||
|
||||
data = json_repair.loads(json_str)
|
||||
if isinstance(data, dict):
|
||||
return data
|
||||
else:
|
||||
logger.warning(f"JSON 解析返回非字典类型: {type(data)}")
|
||||
return None
|
||||
return data
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"JSON 解析失败: {e}, 响应: {response[:200]}")
|
||||
@@ -586,7 +582,7 @@ class ShortTermMemoryManager:
|
||||
logger.debug(f"检索到 {len(results)} 条短期记忆")
|
||||
return results
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"检索短期记忆失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -646,7 +642,7 @@ class ShortTermMemoryManager:
|
||||
# 异步保存
|
||||
asyncio.create_task(self._save_to_disk())
|
||||
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"清除已转移记忆失败: {e}")
|
||||
|
||||
def get_statistics(self) -> dict[str, Any]:
|
||||
@@ -670,6 +666,8 @@ class ShortTermMemoryManager:
|
||||
"""保存短期记忆到磁盘"""
|
||||
async with self._save_lock:
|
||||
try:
|
||||
import orjson
|
||||
|
||||
save_path = self.data_dir / "short_term_memory.json"
|
||||
data = {
|
||||
"memories": [mem.to_dict() for mem in self.memories],
|
||||
@@ -677,23 +675,25 @@ class ShortTermMemoryManager:
|
||||
"transfer_threshold": self.transfer_importance_threshold,
|
||||
}
|
||||
|
||||
save_path.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
save_path.write_bytes(orjson.dumps(data, option=orjson.OPT_INDENT_2))
|
||||
|
||||
logger.debug(f"短期记忆已保存到 {save_path}")
|
||||
|
||||
except (OSError, TypeError, ValueError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"保存短期记忆失败: {e}")
|
||||
|
||||
async def _load_from_disk(self) -> None:
|
||||
"""从磁盘加载短期记忆"""
|
||||
try:
|
||||
import orjson
|
||||
|
||||
load_path = self.data_dir / "short_term_memory.json"
|
||||
|
||||
if not load_path.exists():
|
||||
logger.debug("未找到短期记忆数据文件")
|
||||
logger.info("未找到短期记忆数据文件")
|
||||
return
|
||||
|
||||
data = json.loads(load_path.read_text(encoding="utf-8"))
|
||||
data = orjson.loads(load_path.read_bytes())
|
||||
self.memories = [ShortTermMemory.from_dict(m) for m in data.get("memories", [])]
|
||||
|
||||
# 重新生成向量
|
||||
@@ -701,7 +701,7 @@ class ShortTermMemoryManager:
|
||||
|
||||
logger.info(f"短期记忆已从 {load_path} 加载 ({len(self.memories)} 条)")
|
||||
|
||||
except (OSError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"加载短期记忆失败: {e}")
|
||||
|
||||
async def _reload_embeddings(self) -> None:
|
||||
@@ -746,7 +746,7 @@ class ShortTermMemoryManager:
|
||||
self._initialized = False
|
||||
logger.info("短期记忆管理器已关闭")
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"关闭短期记忆管理器失败: {e}")
|
||||
|
||||
|
||||
@@ -754,9 +754,9 @@ class ShortTermMemoryManager:
|
||||
_short_term_manager_instance: ShortTermMemoryManager | None = None
|
||||
|
||||
|
||||
def get_short_term_manager() -> ShortTermMemoryManager: # type: ignore
|
||||
def get_short_term_manager() -> ShortTermMemoryManager:
|
||||
"""获取短期记忆管理器单例"""
|
||||
global _short_term_manager_instance # type: ignore[name-defined,misc]
|
||||
global _short_term_manager_instance
|
||||
if _short_term_manager_instance is None:
|
||||
_short_term_manager_instance = ShortTermMemoryManager()
|
||||
return _short_term_manager_instance
|
||||
|
||||
@@ -9,7 +9,7 @@ from collections.abc import Iterable
|
||||
import networkx as nx
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.memory_graph.models import EdgeType, Memory, MemoryEdge
|
||||
from src.memory_graph.models import Memory, MemoryEdge
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
@@ -204,7 +204,7 @@ class GraphStore:
|
||||
logger.debug(f"添加节点成功: {node_id} -> {memory_id}")
|
||||
return True
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"添加节点失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -253,7 +253,7 @@ class GraphStore:
|
||||
break
|
||||
|
||||
return True
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"更新节点失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -288,7 +288,8 @@ class GraphStore:
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
# EdgeType 已在模块顶部导入
|
||||
from src.memory_graph.models import EdgeType, MemoryEdge
|
||||
|
||||
edge_id = str(uuid.uuid4())
|
||||
created_at = datetime.now().isoformat()
|
||||
|
||||
@@ -314,7 +315,7 @@ class GraphStore:
|
||||
|
||||
# 尝试转换 edge_type
|
||||
try:
|
||||
edge_type_enum: EdgeType = EdgeType(edge_type)
|
||||
edge_type_enum = EdgeType(edge_type)
|
||||
except ValueError:
|
||||
edge_type_enum = EdgeType.RELATION
|
||||
|
||||
@@ -337,7 +338,7 @@ class GraphStore:
|
||||
logger.debug(f"添加边成功: {source_id} -> {target_id} ({relation})")
|
||||
return edge_id
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"添加边失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -401,7 +402,7 @@ class GraphStore:
|
||||
break
|
||||
|
||||
return True
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"更新边失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -451,7 +452,7 @@ class GraphStore:
|
||||
memory.edges = [e for e in memory.edges if e.id != edge_id]
|
||||
|
||||
return True
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"删除边失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -507,7 +508,7 @@ class GraphStore:
|
||||
logger.info(f"成功合并记忆: {source_memory_ids} -> {target_memory_id}")
|
||||
return True
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"合并记忆失败: {e}")
|
||||
return False
|
||||
|
||||
@@ -693,7 +694,7 @@ class GraphStore:
|
||||
|
||||
except nx.NetworkXNoPath:
|
||||
return None
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"查找路径失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -745,8 +746,7 @@ class GraphStore:
|
||||
Returns:
|
||||
NetworkX 子图
|
||||
"""
|
||||
subgraph = self.graph.subgraph(node_ids)
|
||||
return subgraph.copy() # type: ignore[return-value]
|
||||
return self.graph.subgraph(node_ids).copy()
|
||||
|
||||
def merge_nodes(self, source_id: str, target_id: str) -> None:
|
||||
"""
|
||||
@@ -870,7 +870,7 @@ class GraphStore:
|
||||
# 5. 同步图中的边到 Memory.edges(保证内存对象和图一致)
|
||||
try:
|
||||
store._sync_memory_edges_from_graph()
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError):
|
||||
except Exception:
|
||||
logger.exception("同步图边到记忆.edges 失败")
|
||||
|
||||
store._rebuild_node_edge_index()
|
||||
@@ -924,7 +924,7 @@ class GraphStore:
|
||||
try:
|
||||
# 使用 MemoryEdge.from_dict 构建对象
|
||||
mem_edge = MemoryEdge.from_dict(edge_dict)
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError):
|
||||
except Exception:
|
||||
# 兼容性:直接构造对象
|
||||
mem_edge = MemoryEdge(
|
||||
id=edge_dict["id"] or "",
|
||||
@@ -978,7 +978,7 @@ class GraphStore:
|
||||
logger.debug(f"成功删除记忆: {memory_id}")
|
||||
return True
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"删除记忆失败 {memory_id}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import aiofiles
|
||||
import orjson
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.memory_graph.models import StagedMemory
|
||||
@@ -67,6 +68,7 @@ async def safe_atomic_write(temp_path: Path, target_path: Path, max_retries: int
|
||||
target_path.rename(old_file)
|
||||
except OSError:
|
||||
# 策略3: 使用时间戳后缀
|
||||
from datetime import datetime
|
||||
backup_file = target_path.with_suffix(f".bak_{datetime.now().strftime('%H%M%S')}")
|
||||
target_path.rename(backup_file)
|
||||
# 标记稍后清理
|
||||
@@ -125,7 +127,7 @@ async def _cleanup_backup_files(directory: Path, file_stem: str, keep_recent: in
|
||||
except OSError as e:
|
||||
logger.debug(f"清理备份文件失败: {old_file.name}, {e}")
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.debug(f"清理备份文件任务失败: {e}")
|
||||
|
||||
|
||||
@@ -190,12 +192,11 @@ class PersistenceManager:
|
||||
"statistics": graph_store.get_statistics(),
|
||||
}
|
||||
|
||||
# 使用 json 序列化
|
||||
json_data = json.dumps(
|
||||
# 使用 orjson 序列化(更快)
|
||||
json_data = orjson.dumps(
|
||||
data,
|
||||
indent=2,
|
||||
ensure_ascii=False
|
||||
).encode("utf-8")
|
||||
option=orjson.OPT_INDENT_2 | orjson.OPT_SERIALIZE_NUMPY,
|
||||
)
|
||||
|
||||
# 原子写入(先写临时文件,再重命名)
|
||||
temp_file = self.graph_file.with_suffix(".tmp")
|
||||
@@ -207,7 +208,7 @@ class PersistenceManager:
|
||||
|
||||
logger.debug(f"图数据已保存: {self.graph_file}, 大小: {len(json_data) / 1024:.2f} KB")
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"保存图数据失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -234,7 +235,7 @@ class PersistenceManager:
|
||||
try:
|
||||
async with aiofiles.open(self.graph_file, "rb") as f:
|
||||
json_data = await f.read()
|
||||
data = json.loads(json_data.decode("utf-8"))
|
||||
data = orjson.loads(json_data)
|
||||
break
|
||||
except OSError as e:
|
||||
if attempt == max_retries - 1:
|
||||
@@ -252,7 +253,7 @@ class PersistenceManager:
|
||||
logger.debug(f"图数据加载完成: {graph_store.get_statistics()}")
|
||||
return graph_store
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"加载图数据失败: {e}")
|
||||
# 尝试加载备份
|
||||
return await self._load_from_backup()
|
||||
@@ -275,7 +276,7 @@ class PersistenceManager:
|
||||
"staged_memories": [sm.to_dict() for sm in staged_memories],
|
||||
}
|
||||
|
||||
json_data = json.dumps(data, indent=2, ensure_ascii=False).encode("utf-8")
|
||||
json_data = orjson.dumps(data, option=orjson.OPT_INDENT_2 | orjson.OPT_SERIALIZE_NUMPY)
|
||||
|
||||
temp_file = self.staged_file.with_suffix(".tmp")
|
||||
async with aiofiles.open(temp_file, "wb") as f:
|
||||
@@ -284,7 +285,7 @@ class PersistenceManager:
|
||||
# 使用安全的原子写入
|
||||
await safe_atomic_write(temp_file, self.staged_file)
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"保存临时记忆失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -308,7 +309,7 @@ class PersistenceManager:
|
||||
try:
|
||||
async with aiofiles.open(self.staged_file, "rb") as f:
|
||||
json_data = await f.read()
|
||||
data = json.loads(json_data.decode("utf-8"))
|
||||
data = orjson.loads(json_data)
|
||||
break
|
||||
except OSError as e:
|
||||
if attempt == max_retries - 1:
|
||||
@@ -323,7 +324,7 @@ class PersistenceManager:
|
||||
staged_memories = [StagedMemory.from_dict(sm) for sm in data.get("staged_memories", [])]
|
||||
return staged_memories
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"加载临时记忆失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -353,7 +354,7 @@ class PersistenceManager:
|
||||
|
||||
return None
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"创建备份失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -377,7 +378,7 @@ class PersistenceManager:
|
||||
try:
|
||||
async with aiofiles.open(latest_backup, "rb") as f:
|
||||
json_data = await f.read()
|
||||
data = json.loads(json_data.decode("utf-8"))
|
||||
data = orjson.loads(json_data)
|
||||
break
|
||||
except OSError as e:
|
||||
if attempt == max_retries - 1:
|
||||
@@ -394,7 +395,7 @@ class PersistenceManager:
|
||||
|
||||
return graph_store
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"从备份恢复失败: {e}")
|
||||
return None
|
||||
|
||||
@@ -413,7 +414,7 @@ class PersistenceManager:
|
||||
backup_file.unlink()
|
||||
logger.debug(f"删除旧备份: {backup_file}")
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.warning(f"清理旧备份失败: {e}")
|
||||
|
||||
async def start_auto_save(
|
||||
@@ -458,7 +459,7 @@ class PersistenceManager:
|
||||
if current_time.minute == 0: # 每个整点
|
||||
await self.create_backup()
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"自动保存失败: {e}")
|
||||
|
||||
self._auto_save_task = asyncio.create_task(auto_save_loop())
|
||||
|
||||
@@ -99,18 +99,16 @@ class VectorStore:
|
||||
# 处理额外的元数据,将 list 转换为 JSON 字符串
|
||||
for key, value in node.metadata.items():
|
||||
if isinstance(value, list | dict):
|
||||
import json
|
||||
metadata[key] = json.dumps(value, ensure_ascii=False)
|
||||
elif isinstance(value, str):
|
||||
import orjson
|
||||
metadata[key] = orjson.dumps(value, option=orjson.OPT_NON_STR_KEYS).decode("utf-8")
|
||||
elif isinstance(value, str | int | float | bool) or value is None:
|
||||
metadata[key] = value
|
||||
elif isinstance(value, (int, float, bool)) or value is None:
|
||||
metadata[key] = str(value) if value is not None else ""
|
||||
else:
|
||||
metadata[key] = str(value)
|
||||
|
||||
self.collection.add(
|
||||
ids=[node.id],
|
||||
embeddings=[node.embedding.tolist()] if node.embedding is not None else [[0.0]], # type: ignore[union-attr]
|
||||
embeddings=[node.embedding.tolist()],
|
||||
metadatas=[metadata],
|
||||
documents=[node.content], # 文本内容用于检索
|
||||
)
|
||||
@@ -140,6 +138,7 @@ class VectorStore:
|
||||
|
||||
try:
|
||||
# 准备元数据
|
||||
import orjson
|
||||
metadatas = []
|
||||
for n in valid_nodes:
|
||||
metadata = {
|
||||
@@ -149,24 +148,21 @@ class VectorStore:
|
||||
}
|
||||
for key, value in n.metadata.items():
|
||||
if isinstance(value, list | dict):
|
||||
import json
|
||||
metadata[key] = json.dumps(value, ensure_ascii=False)
|
||||
elif isinstance(value, str):
|
||||
metadata[key] = value
|
||||
elif isinstance(value, (int, float, bool)) or value is None:
|
||||
metadata[key] = str(value) if value is not None else ""
|
||||
metadata[key] = orjson.dumps(value, option=orjson.OPT_NON_STR_KEYS).decode("utf-8")
|
||||
elif isinstance(value, str | int | float | bool) or value is None:
|
||||
metadata[key] = value # type: ignore
|
||||
else:
|
||||
metadata[key] = str(value)
|
||||
metadatas.append(metadata)
|
||||
|
||||
self.collection.add(
|
||||
ids=[n.id for n in valid_nodes],
|
||||
embeddings=[n.embedding.tolist() if n.embedding is not None else [0.0] for n in valid_nodes], # type: ignore[union-attr]
|
||||
embeddings=[n.embedding.tolist() for n in valid_nodes], # type: ignore
|
||||
metadatas=metadatas,
|
||||
documents=[n.content for n in valid_nodes],
|
||||
)
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"批量添加节点失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -206,6 +202,7 @@ class VectorStore:
|
||||
)
|
||||
|
||||
# 解析结果
|
||||
import orjson
|
||||
similar_nodes = []
|
||||
# 修复:检查 ids 列表长度而不是直接判断真值(避免 numpy 数组歧义)
|
||||
ids = results.get("ids")
|
||||
@@ -226,9 +223,8 @@ class VectorStore:
|
||||
for key, value in list(metadata.items()):
|
||||
if isinstance(value, str) and (value.startswith("[") or value.startswith("{")):
|
||||
try:
|
||||
import json
|
||||
metadata[key] = json.loads(value)
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError):
|
||||
metadata[key] = orjson.loads(value)
|
||||
except Exception:
|
||||
pass # 保持原值
|
||||
|
||||
similar_nodes.append((node_id, similarity, metadata))
|
||||
@@ -236,7 +232,7 @@ class VectorStore:
|
||||
logger.debug(f"相似节点搜索: 找到 {len(similar_nodes)} 个结果")
|
||||
return similar_nodes
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"相似节点搜索失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -288,7 +284,7 @@ class VectorStore:
|
||||
# 1. 对每个查询执行搜索
|
||||
all_results: dict[str, dict[str, Any]] = {} # node_id -> {scores, metadata}
|
||||
|
||||
for _, (query_emb, weight) in enumerate(zip(query_embeddings, query_weights)):
|
||||
for i, (query_emb, weight) in enumerate(zip(query_embeddings, query_weights)):
|
||||
# 搜索更多结果以提高融合质量
|
||||
search_limit = limit * 3
|
||||
results = await self.search_similar_nodes(
|
||||
@@ -381,7 +377,7 @@ class VectorStore:
|
||||
|
||||
return None
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
# 节点不存在是正常情况,降级为 debug
|
||||
logger.debug(f"获取节点失败(节点可能不存在): {e}")
|
||||
return None
|
||||
@@ -436,14 +432,13 @@ class VectorStore:
|
||||
|
||||
try:
|
||||
# 删除并重新创建集合
|
||||
if self.client is not None:
|
||||
self.client.delete_collection(self.collection_name)
|
||||
self.collection = self.client.get_or_create_collection(
|
||||
name=self.collection_name,
|
||||
metadata={"description": "Memory graph node embeddings"},
|
||||
)
|
||||
self.client.delete_collection(self.collection_name)
|
||||
self.collection = self.client.get_or_create_collection(
|
||||
name=self.collection_name,
|
||||
metadata={"description": "Memory graph node embeddings"},
|
||||
)
|
||||
logger.warning(f"向量存储已清空: {self.collection_name}")
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError, KeyError, TypeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"清空向量存储失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -82,7 +82,7 @@ class UnifiedMemoryManager:
|
||||
self.long_term_manager: LongTermMemoryManager
|
||||
|
||||
# 底层 MemoryManager(长期记忆)
|
||||
self.memory_manager: MemoryManager = memory_manager # type: ignore[assignment]
|
||||
self.memory_manager: MemoryManager = memory_manager
|
||||
|
||||
# 配置参数存储(用于初始化)
|
||||
self._config = {
|
||||
@@ -163,7 +163,7 @@ class UnifiedMemoryManager:
|
||||
# 启动自动转移任务
|
||||
self._start_auto_transfer_task()
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"统一记忆管理器初始化失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -284,7 +284,7 @@ class UnifiedMemoryManager:
|
||||
|
||||
return result
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"智能检索失败: {e}")
|
||||
return {
|
||||
"perceptual_blocks": [],
|
||||
@@ -366,10 +366,10 @@ class UnifiedMemoryManager:
|
||||
请输出JSON:"""
|
||||
|
||||
# 调用记忆裁判模型
|
||||
if not model_config.model_task_config: # type: ignore[union-attr]
|
||||
if not model_config.model_task_config:
|
||||
raise ValueError("模型任务配置未加载")
|
||||
llm = LLMRequest(
|
||||
model_set=model_config.model_task_config.memory_judge, # type: ignore[union-attr]
|
||||
model_set=model_config.model_task_config.memory_judge,
|
||||
request_type="unified_memory.judge",
|
||||
)
|
||||
|
||||
@@ -401,7 +401,7 @@ class UnifiedMemoryManager:
|
||||
|
||||
return decision
|
||||
|
||||
except (RuntimeError, ValueError, KeyError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"裁判模型评估失败: {e}")
|
||||
# 默认判定为不充足,需要检索长期记忆
|
||||
return JudgeDecision(
|
||||
@@ -429,7 +429,7 @@ class UnifiedMemoryManager:
|
||||
done_task.result()
|
||||
except asyncio.CancelledError:
|
||||
logger.info(f"{task_name} 后台任务已取消")
|
||||
except (RuntimeError, ValueError, KeyError) as exc:
|
||||
except Exception as exc:
|
||||
logger.error(f"{task_name} 后台任务失败: {exc}")
|
||||
|
||||
task.add_done_callback(_callback)
|
||||
@@ -472,7 +472,7 @@ class UnifiedMemoryManager:
|
||||
await self.perceptual_manager.remove_block(block.id)
|
||||
self._trigger_transfer_wakeup()
|
||||
logger.debug(f"✓ 记忆块 {block.id} 已被转移到短期记忆 {stm.id}")
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as exc:
|
||||
except Exception as exc:
|
||||
logger.error(f"后台转移失败,记忆块 {block.id}: {exc}")
|
||||
|
||||
def _build_manual_multi_queries(self, queries: list[str]) -> list[dict[str, float]]:
|
||||
@@ -523,6 +523,7 @@ class UnifiedMemoryManager:
|
||||
memories = await self.memory_manager.search_memories(**search_params)
|
||||
unique_memories = self._deduplicate_memories(memories)
|
||||
|
||||
len(manual_queries) if manual_queries else 1
|
||||
return unique_memories
|
||||
|
||||
def _deduplicate_memories(self, memories: list[Any]) -> list[Any]:
|
||||
@@ -632,7 +633,7 @@ class UnifiedMemoryManager:
|
||||
except asyncio.CancelledError:
|
||||
logger.debug("自动转移循环被取消")
|
||||
break
|
||||
except (RuntimeError, ValueError, KeyError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"自动转移循环异常: {e}")
|
||||
|
||||
async def manual_transfer(self) -> dict[str, Any]:
|
||||
@@ -663,7 +664,7 @@ class UnifiedMemoryManager:
|
||||
logger.info(f"手动转移完成: {result}")
|
||||
return result
|
||||
|
||||
except (OSError, RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"手动转移失败: {e}")
|
||||
return {"error": str(e), "transferred_count": 0}
|
||||
|
||||
@@ -715,5 +716,5 @@ class UnifiedMemoryManager:
|
||||
self._initialized = False
|
||||
logger.info("统一记忆管理器已关闭")
|
||||
|
||||
except (RuntimeError, ValueError, AttributeError) as e:
|
||||
except Exception as e:
|
||||
logger.error(f"关闭统一记忆管理器失败: {e}")
|
||||
|
||||
Reference in New Issue
Block a user