From 9a8a3214a72b50ceef4ffc581ec0ea29afebf392 Mon Sep 17 00:00:00 2001 From: minecraft1024a Date: Fri, 7 Nov 2025 22:03:09 +0800 Subject: [PATCH 1/3] =?UTF-8?q?Revert=20"fix(api):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E8=AE=B0=E5=BF=86=E5=8F=AF=E8=A7=86=E5=8C=96=E4=B8=AD=E9=87=8D?= =?UTF-8?q?=E5=A4=8D=E7=9A=84=E8=BE=B9"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit f8e42f3348d7af88425aa299dd43e430e0005de9. --- src/api/memory_visualizer_router.py | 209 +++++++++------------------- src/api/templates/visualizer.html | 170 +++++----------------- 2 files changed, 101 insertions(+), 278 deletions(-) diff --git a/src/api/memory_visualizer_router.py b/src/api/memory_visualizer_router.py index 1f0eb27ee..b35c1c074 100644 --- a/src/api/memory_visualizer_router.py +++ b/src/api/memory_visualizer_router.py @@ -61,27 +61,15 @@ def find_available_data_files() -> List[Path]: return sorted(files, key=lambda f: f.stat().st_mtime, reverse=True) -def load_graph_data_from_file( - file_path: Optional[Path] = None, - nodes_page: Optional[int] = None, - nodes_per_page: Optional[int] = None, - edges_page: Optional[int] = None, - edges_per_page: Optional[int] = None, -) -> Dict[str, Any]: - """ - 从磁盘加载图数据, 支持分页。 - 如果不提供分页参数, 则加载并缓存所有数据。 - """ +def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any]: + """从磁盘加载图数据""" global graph_data_cache, current_data_file - # 如果是请求分页数据, 则不使用缓存的全量数据 - is_paged_request = nodes_page is not None or edges_page is not None - if file_path and file_path != current_data_file: graph_data_cache = None current_data_file = file_path - if graph_data_cache and not is_paged_request: + if graph_data_cache: return graph_data_cache try: @@ -96,78 +84,53 @@ def load_graph_data_from_file( if not graph_file.exists(): return {"error": f"文件不存在: {graph_file}", "nodes": [], "edges": [], "stats": {}} - # 只有在没有缓存时才从磁盘读取和处理文件 - if not graph_data_cache: - with open(graph_file, "r", encoding="utf-8") as f: - data = orjson.loads(f.read()) + with open(graph_file, "r", encoding="utf-8") as f: + data = orjson.loads(f.read()) - nodes = data.get("nodes", []) - edges = data.get("edges", []) - metadata = data.get("metadata", {}) + nodes = data.get("nodes", []) + edges = data.get("edges", []) + metadata = data.get("metadata", {}) - nodes_dict = { - node["id"]: { - **node, - "label": node.get("content", ""), - "group": node.get("node_type", ""), - "title": f"{node.get('node_type', '')}: {node.get('content', '')}", - } - for node in nodes - if node.get("id") + nodes_dict = { + node["id"]: { + **node, + "label": node.get("content", ""), + "group": node.get("node_type", ""), + "title": f"{node.get('node_type', '')}: {node.get('content', '')}", } + for node in nodes + if node.get("id") + } - edges_list = [] - seen_edge_ids = set() - for edge in edges: - edge_id = edge.get("id") - if edge_id and edge_id not in seen_edge_ids: - edges_list.append( - { - **edge, - "from": edge.get("source", edge.get("source_id")), - "to": edge.get("target", edge.get("target_id")), - "label": edge.get("relation", ""), - "arrows": "to", - } - ) - seen_edge_ids.add(edge_id) - - stats = metadata.get("statistics", {}) - total_memories = stats.get("total_memories", 0) - - graph_data_cache = { - "nodes": list(nodes_dict.values()), - "edges": edges_list, - "memories": [], # TODO: 未来也可以考虑分页加载记忆 - "stats": { - "total_nodes": len(nodes_dict), - "total_edges": len(edges_list), - "total_memories": total_memories, - }, - "current_file": str(graph_file), - "file_size": graph_file.stat().st_size, - "file_modified": datetime.fromtimestamp(graph_file.stat().st_mtime).isoformat(), + edges_list = [ + { + **edge, + "from": edge.get("source", edge.get("source_id")), + "to": edge.get("target", edge.get("target_id")), + "label": edge.get("relation", ""), + "arrows": "to", } + for edge in edges + ] - # 如果是分页请求, 则从缓存中切片数据 - if is_paged_request: - paged_data = graph_data_cache.copy() # 浅拷贝一份, 避免修改缓存 - - # 分页节点 - if nodes_page is not None and nodes_per_page is not None: - node_start = (nodes_page - 1) * nodes_per_page - node_end = node_start + nodes_per_page - paged_data["nodes"] = graph_data_cache["nodes"][node_start:node_end] - - # 分页边 - if edges_page is not None and edges_per_page is not None: - edge_start = (edges_page - 1) * edges_per_page - edge_end = edge_start + edges_per_page - paged_data["edges"] = graph_data_cache["edges"][edge_start:edge_end] - - return paged_data + stats = metadata.get("statistics", {}) + total_memories = stats.get("total_memories", 0) + graph_data_cache = { + "nodes": list(nodes_dict.values()), + "edges": edges_list, + "memories": [], + "stats": { + "total_nodes": len(nodes_dict), + "total_edges": len(edges_list), + "total_memories": total_memories, + }, + "current_file": str(graph_file), + "file_size": graph_file.stat().st_size, + "file_modified": datetime.fromtimestamp(graph_file.stat().st_mtime).isoformat(), + } return graph_data_cache + except Exception as e: import traceback @@ -188,7 +151,7 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: all_memories = memory_manager.graph_store.get_all_memories() nodes_dict = {} - edges_dict = {} + edges_list = [] memory_info = [] for memory in all_memories: @@ -210,8 +173,8 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: "title": f"{node.node_type.value}: {node.content}", } for edge in memory.edges: - if edge.id not in edges_dict: - edges_dict[edge.id] = { + edges_list.append( # noqa: PERF401 + { "id": edge.id, "from": edge.source_id, "to": edge.target_id, @@ -219,8 +182,7 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: "arrows": "to", "memory_id": memory.id, } - - edges_list = list(edges_dict.values()) + ) stats = memory_manager.get_statistics() return { @@ -235,67 +197,28 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: "current_file": "memory_manager (实时数据)", } -@router.get("/api/graph/paged") -async def get_paged_graph( - nodes_page: int = 1, nodes_per_page: int = 100, edges_page: int = 1, edges_per_page: int = 200 -): - """获取分页的记忆图数据""" - try: - # 确保全量数据已加载到缓存 - full_data = load_graph_data_from_file() - if "error" in full_data: - raise HTTPException(status_code=404, detail=full_data["error"]) - - # 从缓存中获取全量数据 - all_nodes = full_data.get("nodes", []) - all_edges = full_data.get("edges", []) - total_nodes = len(all_nodes) - total_edges = len(all_edges) - - # 计算节点分页 - node_start = (nodes_page - 1) * nodes_per_page - node_end = node_start + nodes_per_page - paginated_nodes = all_nodes[node_start:node_end] - - # 计算边分页 - edge_start = (edges_page - 1) * edges_per_page - edge_end = edge_start + edges_per_page - paginated_edges = all_edges[edge_start:edge_end] - - return JSONResponse( - content={ - "success": True, - "data": { - "nodes": paginated_nodes, - "edges": paginated_edges, - "pagination": { - "nodes": { - "page": nodes_page, - "per_page": nodes_per_page, - "total": total_nodes, - "total_pages": (total_nodes + nodes_per_page - 1) // nodes_per_page, - }, - "edges": { - "page": edges_page, - "per_page": edges_per_page, - "total": total_edges, - "total_pages": (total_edges + edges_per_page - 1) // edges_per_page, - }, - }, - }, - } - ) - except Exception as e: - return JSONResponse(content={"success": False, "error": str(e)}, status_code=500) - @router.get("/api/graph/full") -async def get_full_graph_deprecated(): - """ - (已废弃) 获取完整记忆图数据。 - 此接口现在只返回第一页的数据, 请使用 /api/graph/paged 进行分页获取。 - """ - return await get_paged_graph(nodes_page=1, nodes_per_page=100, edges_page=1, edges_per_page=200) +async def get_full_graph(): + """获取完整记忆图数据""" + try: + from src.memory_graph.manager_singleton import get_memory_manager + + memory_manager = get_memory_manager() + + data = {} + if memory_manager and memory_manager._initialized: + data = _format_graph_data_from_manager(memory_manager) + else: + # 如果内存管理器不可用,则从文件加载 + data = load_graph_data_from_file() + + return JSONResponse(content={"success": True, "data": data}) + except Exception as e: + import traceback + + traceback.print_exc() + return JSONResponse(content={"success": False, "error": str(e)}, status_code=500) @router.get("/api/files") diff --git a/src/api/templates/visualizer.html b/src/api/templates/visualizer.html index 9c44a420f..47c105863 100644 --- a/src/api/templates/visualizer.html +++ b/src/api/templates/visualizer.html @@ -533,18 +533,11 @@ let network = null; let availableFiles = []; let graphData = { - nodes: new vis.DataSet([]) -, - edges: new vis.DataSet([]) + nodes: [], + edges: [], + memories: [] }; - let originalData = null; // 用于过滤器 - - // 分页状态 - let pagination = { - nodes: { page: 1, per_page: 200, total_pages: 1, total: 0 }, - edges: { page: 1, per_page: 500, total_pages: 1, total: 0 } - }; - let isLoading = false; + let originalData = null; // 节点颜色配置 const nodeColors = { @@ -660,93 +653,35 @@ }); } - // 重置并加载第一页数据 + // 加载图形数据 async function loadGraph() { - if (isLoading) return; - console.log('开始加载初始图数据...'); - - // 重置状态 - graphData.nodes.clear(); - graphData.edges.clear(); - pagination.nodes.page = 1; - pagination.edges.page = 1; - try { - // 先获取一次完整的统计信息 - const statsResponse = await fetch('/visualizer/api/stats'); - const statsResult = await statsResponse.json(); - if(statsResult.success) { - updateStats(statsResult.data); - pagination.nodes.total = statsResult.data.total_nodes; - pagination.edges.total = statsResult.data.total_edges; - pagination.nodes.total_pages = Math.ceil(statsResult.data.total_nodes / pagination.nodes.per_page); - pagination.edges.total_pages = Math.ceil(statsResult.data.total_edges / pagination.edges.per_page); - } else { - throw new Error('获取统计信息失败: ' + statsResult.error); - } - - // 加载第一页 - await loadMoreData(); - - } catch (error) { - console.error('初始加载失败:', error); - alert('初始加载失败: ' + error.message); - } - } - - // 加载更多数据(分页核心) - async function loadMoreData() { - if (isLoading) return; - - const canLoadNodes = pagination.nodes.page <= pagination.nodes.total_pages; - const canLoadEdges = pagination.edges.page <= pagination.edges.total_pages; - - if (!canLoadNodes && !canLoadEdges) { - console.log('所有数据已加载完毕'); - return; - } - - isLoading = true; - document.getElementById('loading').style.display = 'block'; - - try { - const url = `/visualizer/api/graph/paged?nodes_page=${pagination.nodes.page}&nodes_per_page=${pagination.nodes.per_page}&edges_page=${pagination.edges.page}&edges_per_page=${pagination.edges.per_page}`; - console.log(`正在请求: ${url}`); - const response = await fetch(url); + document.getElementById('loading').style.display = 'block'; + + const response = await fetch('/visualizer/api/graph/full'); const result = await response.json(); if (result.success) { - console.log(`成功获取 ${result.data.nodes.length} 个节点, ${result.data.edges.length} 个边`); - updateGraph(result.data); // 追加数据 - - // 更新分页信息 - if (result.data.pagination) { - pagination.nodes.page++; - pagination.edges.page++; - } + originalData = result.data; + updateGraph(result.data); + updateStats(result.data.stats); } else { - throw new Error('加载分页数据失败: ' + result.error); + alert('加载失败: ' + result.error); } } catch (error) { - console.error('加载更多数据失败:', error); + console.error('加载图形失败:', error); alert('加载失败: ' + error.message); } finally { - isLoading = false; document.getElementById('loading').style.display = 'none'; } } - // 更新图形显示(追加数据) - function updateGraph(data) { - // originalData 用于过滤器, 这里只追加, 不完全覆盖 - if (!originalData) { - originalData = { nodes: [], edges: [] }; - } - originalData.nodes.push(...data.nodes); - originalData.edges.push(...data.edges); + // 更新图形显示 + function updateGraph(data) { + graphData = data; // 处理节点数据 - const newNodes = data.nodes.map(node => ({ + const nodes = data.nodes.map(node => ({ id: node.id, label: node.label, title: node.title, @@ -756,31 +691,25 @@ })); // 处理边数据 - const newEdges = data.edges.map(edge => ({ + const edges = data.edges.map(edge => ({ id: edge.id, from: edge.from, to: edge.to, label: edge.label, title: edge.title, - width: (edge.importance || 0.5) * 2 + 1 + width: edge.importance * 3 + 1 })); - - // 追加数据到 DataSet - if (newNodes.length > 0) { - graphData.nodes.add(newNodes); - } - if (newEdges.length > 0) { - graphData.edges.add(newEdges); - } - - // 第一次加载时设置数据 - if (pagination.nodes.page === 2) { // 意味着第一页刚加载完 - network.setData({ - nodes: graphData.nodes, - edges: graphData.edges - }); - } + + // 更新网络 + network.setData({ + nodes: new vis.DataSet(nodes), + edges: new vis.DataSet(edges) + }); + + // 注意:setData 会自动触发物理引擎重新布局 + // stabilizationIterationsDone 事件监听器会自动停止物理引擎 } + // 更新统计信息 function updateStats(stats) { document.getElementById('statNodes').textContent = stats.total_nodes; @@ -1234,42 +1163,13 @@ closeFileSelector(); } } -// 页面加载完成后初始化 -window.addEventListener('load', function() { - initNetwork(); - loadGraph(); // 加载初始数据 - loadFileList(); - // 添加滚动加载监听器 - const graphContainer = document.getElementById('memory-graph'); - graphContainer.addEventListener('mousewheel', async (event) => { - if(network) { - const canvasHeight = network.canvas.body.height; - const viewPosition = network.getViewPosition(); - const scale = network.getScale(); - const viewHeight = canvasHeight / scale; - - // 简单的滚动到底部检测(可能需要根据实际情况微调) - if (event.deltaY > 0 && !isLoading) { - const isAtBottom = viewPosition.y > (canvasHeight/2 - viewHeight/2) * 0.8; - if (isAtBottom) { - console.log("滚动到底部,加载更多数据..."); - await loadMoreData(); - } - } - } - }); - // 添加一个按钮用于手动加载 - const loadMoreBtn = document.createElement('button'); - loadMoreBtn.textContent = '加载更多'; - loadMoreBtn.className = 'btn'; - loadMoreBtn.style.position = 'absolute'; - loadMoreBtn.style.bottom = '20px'; - loadMoreBtn.style.right = '20px'; - loadMoreBtn.style.zIndex = '10'; - loadMoreBtn.onclick = loadMoreData; - document.querySelector('.graph-container').appendChild(loadMoreBtn); -}); + // 页面加载完成后初始化 + window.addEventListener('load', function() { + initNetwork(); + loadGraph(); + loadFileList(); + }); From c22c6b72319849e4289c6e3dccf0b52b18bfbc0d Mon Sep 17 00:00:00 2001 From: minecraft1024a Date: Sat, 8 Nov 2025 09:53:44 +0800 Subject: [PATCH 2/3] =?UTF-8?q?fix(api):=20=E4=BF=AE=E5=A4=8D=E8=AE=B0?= =?UTF-8?q?=E5=BF=86=E5=8F=AF=E8=A7=86=E5=8C=96=E4=B8=AD=E9=87=8D=E5=A4=8D?= =?UTF-8?q?=E7=9A=84=E8=BE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 在从内存管理器和文件加载图数据时,由于遍历所有记忆(memory)并收集其关联的边(edge)时没有进行去重处理,导致同一条边如果被多个记忆引用,会在最终的图谱中重复出现,造成前端可视化混乱。 为了解决这个问题,引入了一个集合(set)或字典(dict)来跟踪已经处理过的边的ID。在遍历边的过程中,只有当边的ID未被记录时,才会将其添加到最终的边列表中。这样可以确保每条边在图谱数据中只出现一次,保证了可视化结果的准确性。 --- src/api/memory_visualizer_router.py | 34 +++++++++++++++++------------ 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/src/api/memory_visualizer_router.py b/src/api/memory_visualizer_router.py index b35c1c074..e80e8ec0e 100644 --- a/src/api/memory_visualizer_router.py +++ b/src/api/memory_visualizer_router.py @@ -102,16 +102,21 @@ def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any if node.get("id") } - edges_list = [ - { - **edge, - "from": edge.get("source", edge.get("source_id")), - "to": edge.get("target", edge.get("target_id")), - "label": edge.get("relation", ""), - "arrows": "to", - } - for edge in edges - ] + edges_list = [] + seen_edge_ids = set() + for edge in edges: + edge_id = edge.get("id") + if edge_id and edge_id not in seen_edge_ids: + edges_list.append( + { + **edge, + "from": edge.get("source", edge.get("source_id")), + "to": edge.get("target", edge.get("target_id")), + "label": edge.get("relation", ""), + "arrows": "to", + } + ) + seen_edge_ids.add(edge_id) stats = metadata.get("statistics", {}) total_memories = stats.get("total_memories", 0) @@ -151,7 +156,7 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: all_memories = memory_manager.graph_store.get_all_memories() nodes_dict = {} - edges_list = [] + edges_dict = {} memory_info = [] for memory in all_memories: @@ -173,8 +178,8 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: "title": f"{node.node_type.value}: {node.content}", } for edge in memory.edges: - edges_list.append( # noqa: PERF401 - { + if edge.id not in edges_dict: + edges_dict[edge.id] = { "id": edge.id, "from": edge.source_id, "to": edge.target_id, @@ -182,7 +187,8 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: "arrows": "to", "memory_id": memory.id, } - ) + + edges_list = list(edges_dict.values()) stats = memory_manager.get_statistics() return { From 0c41cd2a13d2a2884001600f57e806cfb322e7e0 Mon Sep 17 00:00:00 2001 From: minecraft1024a Date: Sat, 8 Nov 2025 10:15:50 +0800 Subject: [PATCH 3/3] =?UTF-8?q?feat(visualizer):=20=E5=BC=95=E5=85=A5?= =?UTF-8?q?=E6=A0=B8=E5=BF=83=E5=9B=BE=E6=8C=89=E9=9C=80=E5=8A=A0=E8=BD=BD?= =?UTF-8?q?=E5=92=8C=E8=8A=82=E7=82=B9=E6=89=A9=E5=B1=95=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 为了解决记忆图谱数据量过大导致前端加载缓慢和渲染卡顿的问题,本次更新引入了核心图按需加载和节点扩展机制。 主要变更包括: - **后端 (API):** - 新增 `/api/graph/core` 端点,该端点不再返回全量图数据,而是智能选取“度”最高的 Top N 核心节点作为初始视图,大幅减少初次加载的数据量。 - 新增 `/api/nodes/{node_id}/expand` 端点,允许前端在用户双击节点时,动态请求该节点的所有邻居节点和相关边,实现按需增量加载。 - 优化了数据加载逻辑,在内存中构建并缓存了节点字典和邻接表,以极高的效率支持节点扩展查询。 - **前端 (UI):** - 初始加载逻辑从请求 `/api/graph/full` 切换到新的 `/api/graph/core` 端点。 - 实现了双击节点触发 `expandNode` 函数的交互,调用后端接口获取并动态地将新节点和边合并到现有图中,而不是重新渲染整个图。 - 使用 `vis.DataSet` 来管理图数据,支持高效地动态添加和更新节点与边。 - 节点大小现在与其“度”(连接数)相关联,使得关键节点在视觉上更加突出。 --- src/api/memory_visualizer_router.py | 135 ++++++++++++++++++++------ src/api/templates/visualizer.html | 144 +++++++++++++++++++--------- 2 files changed, 207 insertions(+), 72 deletions(-) diff --git a/src/api/memory_visualizer_router.py b/src/api/memory_visualizer_router.py index e80e8ec0e..a60601c00 100644 --- a/src/api/memory_visualizer_router.py +++ b/src/api/memory_visualizer_router.py @@ -62,7 +62,10 @@ def find_available_data_files() -> List[Path]: def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any]: - """从磁盘加载图数据""" + """ + 从磁盘加载图数据,并构建索引以加速查询。 + 哼,别看我代码写得多,这叫专业!一次性把事情做对,就不用返工了。 + """ global graph_data_cache, current_data_file if file_path and file_path != current_data_file: @@ -77,12 +80,12 @@ def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any if not graph_file: available_files = find_available_data_files() if not available_files: - return {"error": "未找到数据文件", "nodes": [], "edges": [], "stats": {}} + return {"error": "未找到数据文件", "nodes": [], "edges": [], "stats": {}, "nodes_dict": {}, "adjacency_list": {}} graph_file = available_files[0] current_data_file = graph_file if not graph_file.exists(): - return {"error": f"文件不存在: {graph_file}", "nodes": [], "edges": [], "stats": {}} + return {"error": f"文件不存在: {graph_file}", "nodes": [], "edges": [], "stats": {}, "nodes_dict": {}, "adjacency_list": {}} with open(graph_file, "r", encoding="utf-8") as f: data = orjson.loads(f.read()) @@ -97,6 +100,7 @@ def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any "label": node.get("content", ""), "group": node.get("node_type", ""), "title": f"{node.get('node_type', '')}: {node.get('content', '')}", + "degree": 0, # 初始化度为0 } for node in nodes if node.get("id") @@ -104,26 +108,39 @@ def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any edges_list = [] seen_edge_ids = set() + adjacency_list = {node_id: [] for node_id in nodes_dict} + for edge in edges: edge_id = edge.get("id") - if edge_id and edge_id not in seen_edge_ids: - edges_list.append( - { - **edge, - "from": edge.get("source", edge.get("source_id")), - "to": edge.get("target", edge.get("target_id")), - "label": edge.get("relation", ""), - "arrows": "to", - } - ) + source_id = edge.get("source", edge.get("source_id")) + target_id = edge.get("target", edge.get("target_id")) + + if edge_id and edge_id not in seen_edge_ids and source_id in nodes_dict and target_id in nodes_dict: + formatted_edge = { + **edge, + "from": source_id, + "to": target_id, + "label": edge.get("relation", ""), + "arrows": "to", + } + edges_list.append(formatted_edge) seen_edge_ids.add(edge_id) + # 构建邻接表并计算度 + adjacency_list[source_id].append(formatted_edge) + adjacency_list[target_id].append(formatted_edge) + nodes_dict[source_id]["degree"] += 1 + nodes_dict[target_id]["degree"] += 1 + stats = metadata.get("statistics", {}) total_memories = stats.get("total_memories", 0) + # 缓存所有处理过的数据,包括索引 graph_data_cache = { "nodes": list(nodes_dict.values()), "edges": edges_list, + "nodes_dict": nodes_dict, # 缓存节点字典,方便快速查找 + "adjacency_list": adjacency_list, # 缓存邻接表,光速定位邻居 "memories": [], "stats": { "total_nodes": len(nodes_dict), @@ -138,11 +155,9 @@ def load_graph_data_from_file(file_path: Optional[Path] = None) -> Dict[str, Any except Exception as e: import traceback - traceback.print_exc() raise HTTPException(status_code=500, detail=f"加载图数据失败: {e}") - @router.get("/", response_class=HTMLResponse) async def index(request: Request): """主页面""" @@ -203,29 +218,91 @@ def _format_graph_data_from_manager(memory_manager) -> Dict[str, Any]: "current_file": "memory_manager (实时数据)", } - -@router.get("/api/graph/full") -async def get_full_graph(): - """获取完整记忆图数据""" +@router.get("/api/graph/core") +async def get_core_graph(limit: int = 100): + """ + 获取核心图数据。 + 这可比一下子把所有东西都丢给前端聪明多了,哼。 + """ try: - from src.memory_graph.manager_singleton import get_memory_manager + full_data = load_graph_data_from_file() + if "error" in full_data: + return JSONResponse(content={"success": False, "error": full_data["error"]}, status_code=404) - memory_manager = get_memory_manager() + # 智能选择核心节点: 优先选择度最高的节点 + # 这是一个简单的策略,但比随机选择要好得多 + all_nodes = full_data.get("nodes", []) + + # 按度(degree)降序排序,如果度相同,则按创建时间(如果可用)降序 + sorted_nodes = sorted( + all_nodes, + key=lambda n: (n.get("degree", 0), n.get("created_at", 0)), + reverse=True + ) + + core_nodes = sorted_nodes[:limit] + core_node_ids = {node["id"] for node in core_nodes} - data = {} - if memory_manager and memory_manager._initialized: - data = _format_graph_data_from_manager(memory_manager) - else: - # 如果内存管理器不可用,则从文件加载 - data = load_graph_data_from_file() + # 只包含核心节点之间的边,保持初始视图的整洁 + core_edges = [ + edge for edge in full_data.get("edges", []) + if edge.get("from") in core_node_ids and edge.get("to") in core_node_ids + ] + # 确保返回的数据结构和前端期望的一致 + data_to_send = { + "nodes": core_nodes, + "edges": core_edges, + "memories": [], # 初始加载不需要完整的记忆列表 + "stats": full_data.get("stats", {}), # 统计数据还是完整的 + "current_file": full_data.get("current_file", "") + } - return JSONResponse(content={"success": True, "data": data}) + return JSONResponse(content={"success": True, "data": data_to_send}) except Exception as e: import traceback - traceback.print_exc() return JSONResponse(content={"success": False, "error": str(e)}, status_code=500) +@router.get("/api/nodes/{node_id}/expand") +async def expand_node(node_id: str): + """ + 获取指定节点的所有邻居节点和相关的边。 + 看,这就是按需加载的魔法。我可真是个天才,哼! + """ + try: + full_data = load_graph_data_from_file() + if "error" in full_data: + return JSONResponse(content={"success": False, "error": full_data["error"]}, status_code=404) + + nodes_dict = full_data.get("nodes_dict", {}) + adjacency_list = full_data.get("adjacency_list", {}) + + if node_id not in nodes_dict: + return JSONResponse(content={"success": False, "error": "节点未找到"}, status_code=404) + + neighbor_edges = adjacency_list.get(node_id, []) + neighbor_node_ids = set() + for edge in neighbor_edges: + neighbor_node_ids.add(edge["from"]) + neighbor_node_ids.add(edge["to"]) + + # 从 nodes_dict 中获取完整的邻居节点信息 + neighbor_nodes = [nodes_dict[nid] for nid in neighbor_node_ids if nid in nodes_dict] + + return JSONResponse(content={ + "success": True, + "data": { + "nodes": neighbor_nodes, + "edges": neighbor_edges + } + }) + except Exception as e: + import traceback + traceback.print_exc() + return JSONResponse(content={"success": False, "error": str(e)}, status_code=500) + + + @router.get("/api/files") async def list_files_api(): diff --git a/src/api/templates/visualizer.html b/src/api/templates/visualizer.html index 47c105863..6a18d3a77 100644 --- a/src/api/templates/visualizer.html +++ b/src/api/templates/visualizer.html @@ -532,12 +532,17 @@