refactor(database): 将同步数据库操作迁移为异步操作
将整个项目的数据库操作从同步模式迁移为异步模式,主要涉及以下修改: - 将 `with get_db_session()` 改为 `async with get_db_session()` - 将同步的 SQLAlchemy 查询方法改为异步执行 - 更新相关的方法签名,添加 async/await 关键字 - 修复由于异步化导致的并发问题和性能问题 这些修改提高了数据库操作的并发性能,避免了阻塞主线程,提升了系统的整体响应能力。涉及修改的模块包括表情包管理、反提示注入统计、用户封禁管理、记忆系统、消息存储等多个核心组件。 BREAKING CHANGE: 所有涉及数据库操作的方法现在都需要使用异步调用,同步调用将不再工作
This commit is contained in:
@@ -970,7 +970,8 @@ class EntorhinalCortex:
|
||||
|
||||
# 获取数据库中所有节点和内存中所有节点
|
||||
async with get_db_session() as session:
|
||||
db_nodes = {node.concept: node for node in (await session.execute(select(GraphNodes))).scalars()}
|
||||
result = await session.execute(select(GraphNodes))
|
||||
db_nodes = {node.concept: node for node in result.scalars()}
|
||||
memory_nodes = list(self.memory_graph.G.nodes(data=True))
|
||||
|
||||
# 批量准备节点数据
|
||||
@@ -1067,7 +1068,8 @@ class EntorhinalCortex:
|
||||
await session.execute(delete(GraphNodes).where(GraphNodes.concept.in_(nodes_to_delete)))
|
||||
|
||||
# 处理边的信息
|
||||
db_edges = list((await session.execute(select(GraphEdges))).scalars())
|
||||
result = await session.execute(select(GraphEdges))
|
||||
db_edges = list(result.scalars())
|
||||
memory_edges = list(self.memory_graph.G.edges(data=True))
|
||||
|
||||
# 创建边的哈希值字典
|
||||
@@ -1251,7 +1253,8 @@ class EntorhinalCortex:
|
||||
|
||||
# 从数据库加载所有节点
|
||||
async with get_db_session() as session:
|
||||
nodes = list((await session.execute(select(GraphNodes))).scalars())
|
||||
result = await session.execute(select(GraphNodes))
|
||||
nodes = list(result.scalars())
|
||||
for node in nodes:
|
||||
concept = node.concept
|
||||
try:
|
||||
@@ -1286,7 +1289,8 @@ class EntorhinalCortex:
|
||||
continue
|
||||
|
||||
# 从数据库加载所有边
|
||||
edges = list((await session.execute(select(GraphEdges))).scalars())
|
||||
result = await session.execute(select(GraphEdges))
|
||||
edges = list(result.scalars())
|
||||
for edge in edges:
|
||||
source = edge.source
|
||||
target = edge.target
|
||||
|
||||
@@ -184,6 +184,11 @@ class AsyncMemoryQueue:
|
||||
from src.chat.memory_system.Hippocampus import hippocampus_manager
|
||||
|
||||
if hippocampus_manager._initialized:
|
||||
# 确保海马体对象已正确初始化
|
||||
if not hippocampus_manager._hippocampus.parahippocampal_gyrus:
|
||||
logger.warning("海马体对象未完全初始化,进行同步初始化")
|
||||
hippocampus_manager._hippocampus.initialize()
|
||||
|
||||
await hippocampus_manager.build_memory()
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -108,7 +108,7 @@ class InstantMemory:
|
||||
|
||||
@staticmethod
|
||||
async def store_memory(memory_item: MemoryItem):
|
||||
with get_db_session() as session:
|
||||
async with get_db_session() as session:
|
||||
memory = Memory(
|
||||
memory_id=memory_item.memory_id,
|
||||
chat_id=memory_item.chat_id,
|
||||
@@ -161,20 +161,21 @@ class InstantMemory:
|
||||
logger.info(f"start_time: {start_time}, end_time: {end_time}")
|
||||
# 检索包含关键词的记忆
|
||||
memories_set = set()
|
||||
with get_db_session() as session:
|
||||
async with get_db_session() as session:
|
||||
if start_time and end_time:
|
||||
start_ts = start_time.timestamp()
|
||||
end_ts = end_time.timestamp()
|
||||
|
||||
query = session.execute(
|
||||
query = (await session.execute(
|
||||
select(Memory).where(
|
||||
(Memory.chat_id == self.chat_id)
|
||||
& (Memory.create_time >= start_ts)
|
||||
& (Memory.create_time < end_ts)
|
||||
)
|
||||
).scalars()
|
||||
)).scalars()
|
||||
else:
|
||||
query = session.execute(select(Memory).where(Memory.chat_id == self.chat_id)).scalars()
|
||||
query = result = await session.execute(select(Memory).where(Memory.chat_id == self.chat_id))
|
||||
result.scalars()
|
||||
for mem in query:
|
||||
# 对每条记忆
|
||||
mem_keywords_str = mem.keywords or "[]"
|
||||
|
||||
Reference in New Issue
Block a user