fix: 修复批处理中的多次commit问题,bulk_create后清除缓存

This commit is contained in:
Windpicker-owo
2025-11-01 17:31:31 +08:00
parent 1857754bbe
commit f872a1ff82
2 changed files with 55 additions and 25 deletions

View File

@@ -266,7 +266,14 @@ class CRUDBase:
await session.refresh(instance) await session.refresh(instance)
# 注意commit在get_db_session的context manager退出时自动执行 # 注意commit在get_db_session的context manager退出时自动执行
# 但为了明确性这里不需要显式commit # 但为了明确性这里不需要显式commit
return instance
# 注意create不清除缓存因为
# 1. 新记录不会影响已有的单条查询缓存get/get_by
# 2. get_multi的缓存会自然过期TTL机制
# 3. 清除所有缓存代价太大,影响性能
# 如果需要强一致性应该在查询时设置use_cache=False
return instance
async def update( async def update(
self, self,
@@ -459,8 +466,15 @@ class CRUDBase:
for instance in instances: for instance in instances:
await session.refresh(instance) await session.refresh(instance)
return instances # 批量创建的缓存策略:
# bulk_create通常用于批量导入场景此时清除缓存是合理的
# 因为可能创建大量记录,缓存的列表查询会明显过期
cache = await get_cache()
await cache.clear()
logger.info(f"批量创建{len(instances)}{self.model_name}记录后已清除缓存")
return instances
async def bulk_update( async def bulk_update(
self, self,

View File

@@ -393,8 +393,10 @@ class AdaptiveBatchScheduler:
) -> None: ) -> None:
"""批量执行更新操作""" """批量执行更新操作"""
async with get_db_session() as session: async with get_db_session() as session:
for op in operations: results = []
try: try:
# 🔧 修复收集所有操作后一次性commit而不是循环中多次commit
for op in operations:
# 构建更新语句 # 构建更新语句
stmt = update(op.model_class) stmt = update(op.model_class)
for key, value in op.conditions.items(): for key, value in op.conditions.items():
@@ -404,23 +406,29 @@ class AdaptiveBatchScheduler:
if op.data: if op.data:
stmt = stmt.values(**op.data) stmt = stmt.values(**op.data)
# 执行更新 # 执行更新但不commit
result = await session.execute(stmt) result = await session.execute(stmt)
await session.commit() results.append((op, result.rowcount))
# 设置结果 # 所有操作成功后一次性commit
await session.commit()
# 设置所有操作的结果
for op, rowcount in results:
if op.future and not op.future.done(): if op.future and not op.future.done():
op.future.set_result(result.rowcount) op.future.set_result(rowcount)
if op.callback: if op.callback:
try: try:
op.callback(result.rowcount) op.callback(rowcount)
except Exception as e: except Exception as e:
logger.warning(f"回调执行失败: {e}") logger.warning(f"回调执行失败: {e}")
except Exception as e: except Exception as e:
logger.error(f"更新失败: {e}", exc_info=True) logger.error(f"批量更新失败: {e}", exc_info=True)
await session.rollback() await session.rollback()
# 所有操作都失败
for op in operations:
if op.future and not op.future.done(): if op.future and not op.future.done():
op.future.set_exception(e) op.future.set_exception(e)
@@ -430,31 +438,39 @@ class AdaptiveBatchScheduler:
) -> None: ) -> None:
"""批量执行删除操作""" """批量执行删除操作"""
async with get_db_session() as session: async with get_db_session() as session:
for op in operations: results = []
try: try:
# 🔧 修复收集所有操作后一次性commit而不是循环中多次commit
for op in operations:
# 构建删除语句 # 构建删除语句
stmt = delete(op.model_class) stmt = delete(op.model_class)
for key, value in op.conditions.items(): for key, value in op.conditions.items():
attr = getattr(op.model_class, key) attr = getattr(op.model_class, key)
stmt = stmt.where(attr == value) stmt = stmt.where(attr == value)
# 执行删除 # 执行删除但不commit
result = await session.execute(stmt) result = await session.execute(stmt)
await session.commit() results.append((op, result.rowcount))
# 设置结果 # 所有操作成功后一次性commit
await session.commit()
# 设置所有操作的结果
for op, rowcount in results:
if op.future and not op.future.done(): if op.future and not op.future.done():
op.future.set_result(result.rowcount) op.future.set_result(rowcount)
if op.callback: if op.callback:
try: try:
op.callback(result.rowcount) op.callback(rowcount)
except Exception as e: except Exception as e:
logger.warning(f"回调执行失败: {e}") logger.warning(f"回调执行失败: {e}")
except Exception as e: except Exception as e:
logger.error(f"删除失败: {e}", exc_info=True) logger.error(f"批量删除失败: {e}", exc_info=True)
await session.rollback() await session.rollback()
# 所有操作都失败
for op in operations:
if op.future and not op.future.done(): if op.future and not op.future.done():
op.future.set_exception(e) op.future.set_exception(e)