This commit is contained in:
Windpicker-owo
2025-12-17 11:45:15 +08:00
24 changed files with 624 additions and 180 deletions

View File

@@ -0,0 +1,303 @@
import asyncio
import sys
from pathlib import Path
# 添加项目根目录到 Python 路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
from src.common.logger import get_logger
from src.memory_graph.manager_singleton import get_unified_memory_manager
logger = get_logger("memory_transfer_check")
def print_section(title: str):
"""打印分节标题"""
print(f"\n{'=' * 60}")
print(f" {title}")
print(f"{'=' * 60}\n")
async def check_short_term_status():
"""检查短期记忆状态"""
print_section("1. 短期记忆状态检查")
manager = get_unified_memory_manager()
short_term = manager.short_term_manager
# 获取统计信息
stats = short_term.get_statistics()
print(f"📊 当前记忆数量: {stats['total_memories']}/{stats['max_memories']}")
# 计算占用率
if stats["max_memories"] > 0:
occupancy = stats["total_memories"] / stats["max_memories"]
print(f"📈 容量占用率: {occupancy:.1%}")
# 根据占用率给出建议
if occupancy >= 1.0:
print("⚠️ 警告:已达到容量上限!应该触发紧急转移")
elif occupancy >= 0.5:
print("✅ 占用率超过50%,符合自动转移条件")
else:
print(f" 占用率未达到50%阈值,当前 {occupancy:.1%}")
print(f"🎯 可转移记忆数: {stats['transferable_count']}")
print(f"📏 转移重要性阈值: {stats['transfer_threshold']}")
return stats
async def check_transfer_candidates():
"""检查当前可转移的候选记忆"""
print_section("2. 转移候选记忆分析")
manager = get_unified_memory_manager()
short_term = manager.short_term_manager
# 获取转移候选
candidates = short_term.get_memories_for_transfer()
print(f"🎫 当前转移候选: {len(candidates)}\n")
if not candidates:
print("❌ 没有记忆符合转移条件!")
print("\n可能原因:")
print(" 1. 所有记忆的重要性都低于阈值")
print(" 2. 短期记忆数量未超过容量限制")
print(" 3. 短期记忆列表为空")
return []
# 显示前5条候选的详细信息
print("前 5 条候选记忆:\n")
for i, mem in enumerate(candidates[:5], 1):
print(f"{i}. 记忆ID: {mem.id[:8]}...")
print(f" 重要性: {mem.importance:.3f}")
print(f" 内容: {mem.content[:50]}...")
print(f" 创建时间: {mem.created_at}")
print()
if len(candidates) > 5:
print(f"... 还有 {len(candidates) - 5} 条候选记忆\n")
# 分析重要性分布
importance_levels = {
"高 (>=0.8)": sum(1 for m in candidates if m.importance >= 0.8),
"中 (0.6-0.8)": sum(1 for m in candidates if 0.6 <= m.importance < 0.8),
"低 (<0.6)": sum(1 for m in candidates if m.importance < 0.6),
}
print("📊 重要性分布:")
for level, count in importance_levels.items():
print(f" {level}: {count}")
return candidates
async def check_auto_transfer_task():
"""检查自动转移任务状态"""
print_section("3. 自动转移任务状态")
manager = get_unified_memory_manager()
# 检查任务是否存在
if not hasattr(manager, "_auto_transfer_task") or manager._auto_transfer_task is None:
print("❌ 自动转移任务未创建!")
print("\n建议:调用 manager.initialize() 初始化系统")
return False
task = manager._auto_transfer_task
# 检查任务状态
if task.done():
print("❌ 自动转移任务已结束!")
try:
exception = task.exception()
if exception:
print(f"\n任务异常: {exception}")
except:
pass
print("\n建议:重启系统或手动重启任务")
return False
print("✅ 自动转移任务正在运行")
# 检查转移缓存
if hasattr(manager, "_transfer_cache"):
cache_size = len(manager._transfer_cache) if manager._transfer_cache else 0
print(f"📦 转移缓存: {cache_size} 条记忆")
# 检查上次转移时间
if hasattr(manager, "_last_transfer_time"):
from datetime import datetime
last_time = manager._last_transfer_time
if last_time:
time_diff = (datetime.now() - last_time).total_seconds()
print(f"⏱️ 距上次转移: {time_diff:.1f} 秒前")
return True
async def check_long_term_status():
"""检查长期记忆状态"""
print_section("4. 长期记忆图谱状态")
manager = get_unified_memory_manager()
long_term = manager.long_term_manager
# 获取图谱统计
stats = long_term.get_statistics()
print(f"👥 人物节点数: {stats.get('person_count', 0)}")
print(f"📅 事件节点数: {stats.get('event_count', 0)}")
print(f"🔗 关系边数: {stats.get('edge_count', 0)}")
print(f"💾 向量存储数: {stats.get('vector_count', 0)}")
return stats
async def manual_transfer_test():
"""手动触发转移测试"""
print_section("5. 手动转移测试")
manager = get_unified_memory_manager()
# 询问用户是否执行
print("⚠️ 即将手动触发一次记忆转移")
print("这将把当前符合条件的短期记忆转移到长期记忆")
response = input("\n是否继续? (y/n): ").strip().lower()
if response != "y":
print("❌ 已取消手动转移")
return None
print("\n🚀 开始手动转移...")
try:
# 执行手动转移
result = await manager.manual_transfer()
print("\n✅ 转移完成!")
print("\n转移结果:")
print(f" 已处理: {result.get('processed_count', 0)}")
print(f" 成功转移: {len(result.get('transferred_memory_ids', []))}")
print(f" 失败: {result.get('failed_count', 0)}")
print(f" 跳过: {result.get('skipped_count', 0)}")
if result.get("errors"):
print("\n错误信息:")
for error in result["errors"][:3]: # 只显示前3个错误
print(f" - {error}")
return result
except Exception as e:
print(f"\n❌ 转移失败: {e}")
logger.exception("手动转移失败")
return None
async def check_configuration():
"""检查相关配置"""
print_section("6. 配置参数检查")
from src.config.config import global_config
config = global_config.memory
print("📋 当前配置:")
print(f" 短期记忆容量: {config.short_term_max_memories}")
print(f" 转移重要性阈值: {config.short_term_transfer_threshold}")
print(f" 批量转移大小: {config.long_term_batch_size}")
print(f" 自动转移间隔: {config.long_term_auto_transfer_interval}")
print(f" 启用泄压清理: {config.short_term_enable_force_cleanup}")
# 给出配置建议
print("\n💡 配置建议:")
if config.short_term_transfer_threshold > 0.6:
print(" ⚠️ 转移阈值较高(>0.6),可能导致记忆难以转移")
print(" 建议:降低到 0.4-0.5")
if config.long_term_batch_size > 10:
print(" ⚠️ 批量大小较大(>10),可能延迟转移触发")
print(" 建议:设置为 5-10")
if config.long_term_auto_transfer_interval > 300:
print(" ⚠️ 转移间隔较长(>5分钟),可能导致转移不及时")
print(" 建议:设置为 60-180 秒")
async def main():
"""主函数"""
print("\n" + "=" * 60)
print(" MoFox-Bot 记忆转移诊断工具")
print("=" * 60)
try:
# 初始化管理器
print("\n⚙️ 正在初始化记忆管理器...")
manager = get_unified_memory_manager()
await manager.initialize()
print("✅ 初始化完成\n")
# 执行各项检查
await check_short_term_status()
candidates = await check_transfer_candidates()
task_running = await check_auto_transfer_task()
await check_long_term_status()
await check_configuration()
# 综合诊断
print_section("7. 综合诊断结果")
issues = []
if not candidates:
issues.append("❌ 没有符合条件的转移候选")
if not task_running:
issues.append("❌ 自动转移任务未运行")
if issues:
print("🚨 发现以下问题:\n")
for issue in issues:
print(f" {issue}")
print("\n建议操作:")
print(" 1. 检查短期记忆的重要性评分是否合理")
print(" 2. 降低配置中的转移阈值")
print(" 3. 查看日志文件排查错误")
print(" 4. 尝试手动触发转移测试")
else:
print("✅ 系统运行正常,转移机制已就绪")
if candidates:
print(f"\n当前有 {len(candidates)} 条记忆等待转移")
print("转移将在满足以下任一条件时自动触发:")
print(" • 转移缓存达到批量大小")
print(" • 短期记忆占用率超过 50%")
print(" • 距上次转移超过最大延迟")
print(" • 短期记忆达到容量上限")
# 询问是否手动触发转移
if candidates:
print()
await manual_transfer_test()
print_section("检查完成")
print("详细诊断报告: docs/memory_transfer_diagnostic_report.md")
except Exception as e:
print(f"\n❌ 检查过程出错: {e}")
logger.exception("检查脚本执行失败")
return 1
return 0
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)

View File

@@ -0,0 +1,74 @@
"""工具:清空短期记忆存储。
用法:
python scripts/clear_short_term_memory.py [--remove-file]
- 按配置的数据目录加载短期记忆管理器
- 清空内存缓存并写入空的 short_term_memory.json
- 可选:直接删除存储文件而不是写入空文件
"""
import argparse
import asyncio
import sys
from pathlib import Path
# 让从仓库根目录运行时能够正确导入模块
PROJECT_ROOT = Path(__file__).parent.parent
sys.path.insert(0, str(PROJECT_ROOT))
from src.config.config import global_config
from src.memory_graph.short_term_manager import ShortTermMemoryManager
def resolve_data_dir() -> Path:
"""从配置解析记忆数据目录,带安全默认值。"""
memory_cfg = getattr(global_config, "memory", None)
base_dir = getattr(memory_cfg, "data_dir", "data/memory_graph") if memory_cfg else "data/memory_graph"
return PROJECT_ROOT / base_dir
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="清空短期记忆 (示例: python scripts/clear_short_term_memory.py --remove-file)"
)
parser.add_argument(
"--remove-file",
action="store_true",
help="删除 short_term_memory.json 文件(默认写入空文件)",
)
return parser.parse_args()
async def clear_short_term_memories(remove_file: bool = False) -> None:
data_dir = resolve_data_dir()
storage_file = data_dir / "short_term_memory.json"
manager = ShortTermMemoryManager(data_dir=data_dir)
await manager.initialize()
removed_count = len(manager.memories)
# 清空内存状态
manager.memories.clear()
manager._memory_id_index.clear() # 内部索引缓存
manager._similarity_cache.clear() # 相似度缓存
if remove_file and storage_file.exists():
storage_file.unlink()
print(f"Removed storage file: {storage_file}")
else:
# 写入空文件,保留结构
await manager._save_to_disk()
print(f"Wrote empty short-term memory file: {storage_file}")
print(f"Cleared {removed_count} short-term memories")
async def main() -> None:
args = parse_args()
await clear_short_term_memories(remove_file=args.remove_file)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -12,17 +12,16 @@ from typing import Any, Optional, cast
import json_repair import json_repair
from PIL import Image from PIL import Image
from rich.traceback import install
from sqlalchemy import select from sqlalchemy import select
from src.chat.emoji_system.emoji_constants import EMOJI_DIR, EMOJI_REGISTERED_DIR, MAX_EMOJI_FOR_PROMPT from src.chat.emoji_system.emoji_constants import EMOJI_DIR, EMOJI_REGISTERED_DIR, MAX_EMOJI_FOR_PROMPT
from src.chat.emoji_system.emoji_entities import MaiEmoji from src.chat.emoji_system.emoji_entities import MaiEmoji
from src.chat.emoji_system.emoji_utils import ( from src.chat.emoji_system.emoji_utils import (
_emoji_objects_to_readable_list, _emoji_objects_to_readable_list,
_to_emoji_objects,
_ensure_emoji_dir, _ensure_emoji_dir,
clear_temp_emoji, _to_emoji_objects,
clean_unused_emojis, clean_unused_emojis,
clear_temp_emoji,
list_image_files, list_image_files,
) )
from src.chat.utils.utils_image import get_image_manager, image_path_to_base64 from src.chat.utils.utils_image import get_image_manager, image_path_to_base64

View File

@@ -578,8 +578,7 @@ class ExpressionLearner:
logger.info(f"相同情景覆盖:'{same_situation_expr.situation}' 的表达从 '{same_situation_expr.style}' 更新为 '{style_val}'") logger.info(f"相同情景覆盖:'{same_situation_expr.situation}' 的表达从 '{same_situation_expr.style}' 更新为 '{style_val}'")
# 更新映射 # 更新映射
old_key = (same_situation_expr.situation, same_situation_expr.style) old_key = (same_situation_expr.situation, same_situation_expr.style)
if old_key in exact_match_map: exact_match_map.pop(old_key, None)
del exact_match_map[old_key]
same_situation_expr.style = style_val same_situation_expr.style = style_val
same_situation_expr.count = same_situation_expr.count + 1 same_situation_expr.count = same_situation_expr.count + 1
same_situation_expr.last_active_time = current_time same_situation_expr.last_active_time = current_time
@@ -591,8 +590,7 @@ class ExpressionLearner:
logger.info(f"相同表达覆盖:'{same_style_expr.style}' 的情景从 '{same_style_expr.situation}' 更新为 '{situation}'") logger.info(f"相同表达覆盖:'{same_style_expr.style}' 的情景从 '{same_style_expr.situation}' 更新为 '{situation}'")
# 更新映射 # 更新映射
old_key = (same_style_expr.situation, same_style_expr.style) old_key = (same_style_expr.situation, same_style_expr.style)
if old_key in exact_match_map: exact_match_map.pop(old_key, None)
del exact_match_map[old_key]
same_style_expr.situation = situation same_style_expr.situation = situation
same_style_expr.count = same_style_expr.count + 1 same_style_expr.count = same_style_expr.count + 1
same_style_expr.last_active_time = current_time same_style_expr.last_active_time = current_time
@@ -627,8 +625,7 @@ class ExpressionLearner:
await session.delete(expr) await session.delete(expr)
# 从映射中移除 # 从映射中移除
key = (expr.situation, expr.style) key = (expr.situation, expr.style)
if key in exact_match_map: exact_match_map.pop(key, None)
del exact_match_map[key]
logger.debug(f"已删除 {len(all_current_exprs) - MAX_EXPRESSION_COUNT} 个低频表达方式") logger.debug(f"已删除 {len(all_current_exprs) - MAX_EXPRESSION_COUNT} 个低频表达方式")
# 提交数据库更改 # 提交数据库更改

View File

@@ -114,6 +114,39 @@ to_transfer = short_term_manager.get_memories_for_transfer()
- 🛡️ **泄压机制**:容量 100% 时删除低优先级记忆 - 🛡️ **泄压机制**:容量 100% 时删除低优先级记忆
- ⚙️ **配置**`short_term_max_memories = 30` - ⚙️ **配置**`short_term_max_memories = 30`
**溢出策略(新增)**
当短期记忆达到容量上限时,支持两种处理策略,可通过配置选择:
| 策略 | 说明 | 适用场景 | 配置值 |
|------|------|----------|--------|
| **一次性转移** | 容量满时,将**所有记忆**转移到长期存储然后删除低重要性记忆importance < 0.6 | 希望保留更多历史信息适合记忆密集型应用 | `transfer_all`默认 |
| **选择性清理** | 仅转移高重要性记忆直接删除低重要性记忆 | 希望快速释放空间适合性能优先场景 | `selective_cleanup` |
配置方式
```toml
[memory]
# 短期记忆溢出策略
short_term_overflow_strategy = "transfer_all" # 或 "selective_cleanup"
```
**行为差异示例**
```python
# 假设短期记忆已满30条其中
# - 20条高重要性≥0.6
# - 10条低重要性<0.6
# 策略1: transfer_all默认
# 1. 转移全部30条到长期记忆
# 2. 删除10条低重要性记忆
# 结果短期剩余20条长期增加30条
# 策略2: selective_cleanup
# 1. 仅转移20条高重要性到长期记忆
# 2. 直接删除10条低重要性记忆
# 结果短期剩余20条长期增加20条
```
### 第3层长期记忆 (Long-term Memory) ### 第3层长期记忆 (Long-term Memory)
**特点** **特点**
@@ -176,6 +209,7 @@ perceptual_activation_threshold = 3 # 转移激活阈值
# 短期记忆 # 短期记忆
short_term_max_memories = 30 # 容量上限 short_term_max_memories = 30 # 容量上限
short_term_transfer_threshold = 0.6 # 转移重要性阈值 short_term_transfer_threshold = 0.6 # 转移重要性阈值
short_term_overflow_strategy = "transfer_all" # 溢出策略transfer_all/selective_cleanup
short_term_enable_force_cleanup = true # 启用泄压 short_term_enable_force_cleanup = true # 启用泄压
short_term_cleanup_keep_ratio = 0.9 # 泄压保留比例 short_term_cleanup_keep_ratio = 0.9 # 泄压保留比例

View File

@@ -1037,12 +1037,15 @@ class LongTermMemoryManager:
async def _queue_embedding_generation(self, node_id: str, content: str) -> None: async def _queue_embedding_generation(self, node_id: str, content: str) -> None:
"""将节点加入embedding生成队列""" """将节点加入embedding生成队列"""
# 先在锁内写入,再在锁外触发批量处理,避免自锁
should_flush = False
async with self._embedding_lock: async with self._embedding_lock:
self._pending_embeddings.append((node_id, content)) self._pending_embeddings.append((node_id, content))
# 如果队列达到批次大小,立即处理
if len(self._pending_embeddings) >= self._embedding_batch_size: if len(self._pending_embeddings) >= self._embedding_batch_size:
await self._flush_pending_embeddings() should_flush = True
if should_flush:
await self._flush_pending_embeddings()
async def _flush_pending_embeddings(self) -> None: async def _flush_pending_embeddings(self) -> None:
"""批量处理待生成的embeddings""" """批量处理待生成的embeddings"""

View File

@@ -1,4 +1,3 @@
# ruff: noqa: G004, BLE001
# pylint: disable=logging-fstring-interpolation,broad-except,unused-argument # pylint: disable=logging-fstring-interpolation,broad-except,unused-argument
# pyright: reportOptionalMemberAccess=false # pyright: reportOptionalMemberAccess=false
""" """

View File

@@ -166,6 +166,7 @@ async def initialize_unified_memory_manager():
# 短期记忆配置 # 短期记忆配置
short_term_max_memories=getattr(config, "short_term_max_memories", 30), short_term_max_memories=getattr(config, "short_term_max_memories", 30),
short_term_transfer_threshold=getattr(config, "short_term_transfer_threshold", 0.6), short_term_transfer_threshold=getattr(config, "short_term_transfer_threshold", 0.6),
short_term_overflow_strategy=getattr(config, "short_term_overflow_strategy", "transfer_all"),
short_term_enable_force_cleanup=getattr(config, "short_term_enable_force_cleanup", True), short_term_enable_force_cleanup=getattr(config, "short_term_enable_force_cleanup", True),
short_term_cleanup_keep_ratio=getattr(config, "short_term_cleanup_keep_ratio", 0.9), short_term_cleanup_keep_ratio=getattr(config, "short_term_cleanup_keep_ratio", 0.9),
# 长期记忆配置 # 长期记忆配置

View File

@@ -45,6 +45,7 @@ class ShortTermMemoryManager:
llm_temperature: float = 0.2, llm_temperature: float = 0.2,
enable_force_cleanup: bool = False, enable_force_cleanup: bool = False,
cleanup_keep_ratio: float = 0.9, cleanup_keep_ratio: float = 0.9,
overflow_strategy: str = "transfer_all",
): ):
""" """
初始化短期记忆层管理器 初始化短期记忆层管理器
@@ -56,6 +57,9 @@ class ShortTermMemoryManager:
llm_temperature: LLM 决策的温度参数 llm_temperature: LLM 决策的温度参数
enable_force_cleanup: 是否启用泄压功能 enable_force_cleanup: 是否启用泄压功能
cleanup_keep_ratio: 泄压时保留容量的比例默认0.9表示保留90% cleanup_keep_ratio: 泄压时保留容量的比例默认0.9表示保留90%
overflow_strategy: 短期记忆溢出策略
- "transfer_all": 一次性转移所有记忆到长期记忆,并删除不重要的短期记忆(默认)
- "selective_cleanup": 选择性清理,仅转移重要记忆,直接删除低重要性记忆
""" """
self.data_dir = data_dir or Path("data/memory_graph") self.data_dir = data_dir or Path("data/memory_graph")
self.data_dir.mkdir(parents=True, exist_ok=True) self.data_dir.mkdir(parents=True, exist_ok=True)
@@ -66,6 +70,7 @@ class ShortTermMemoryManager:
self.llm_temperature = llm_temperature self.llm_temperature = llm_temperature
self.enable_force_cleanup = enable_force_cleanup self.enable_force_cleanup = enable_force_cleanup
self.cleanup_keep_ratio = cleanup_keep_ratio self.cleanup_keep_ratio = cleanup_keep_ratio
self.overflow_strategy = overflow_strategy # 新增:溢出策略
# 核心数据 # 核心数据
self.memories: list[ShortTermMemory] = [] self.memories: list[ShortTermMemory] = []
@@ -82,6 +87,7 @@ class ShortTermMemoryManager:
logger.info( logger.info(
f"短期记忆管理器已创建 (max_memories={max_memories}, " f"短期记忆管理器已创建 (max_memories={max_memories}, "
f"transfer_threshold={transfer_importance_threshold:.2f}, " f"transfer_threshold={transfer_importance_threshold:.2f}, "
f"overflow_strategy={overflow_strategy}, "
f"force_cleanup={'on' if enable_force_cleanup else 'off'})" f"force_cleanup={'on' if enable_force_cleanup else 'off'})"
) )
@@ -703,6 +709,8 @@ class ShortTermMemoryManager:
""" """
清除已转移到长期记忆的记忆 清除已转移到长期记忆的记忆
"transfer_all" 策略下,还会删除不重要的短期记忆以释放空间
Args: Args:
memory_ids: 已转移的记忆ID列表 memory_ids: 已转移的记忆ID列表
""" """
@@ -717,6 +725,32 @@ class ShortTermMemoryManager:
logger.info(f"清除 {len(memory_ids)} 条已转移的短期记忆") logger.info(f"清除 {len(memory_ids)} 条已转移的短期记忆")
# 在 "transfer_all" 策略下,进一步删除不重要的短期记忆
if self.overflow_strategy == "transfer_all":
# 计算需要删除的低重要性记忆数量
low_importance_memories = [
mem for mem in self.memories
if mem.importance < self.transfer_importance_threshold
]
if low_importance_memories:
# 按重要性和创建时间排序,删除最不重要的
low_importance_memories.sort(key=lambda m: (m.importance, m.created_at))
# 删除所有低重要性记忆
to_delete = {mem.id for mem in low_importance_memories}
self.memories = [mem for mem in self.memories if mem.id not in to_delete]
# 更新索引
for mem_id in to_delete:
self._memory_id_index.pop(mem_id, None)
self._similarity_cache.pop(mem_id, None)
logger.info(
f"transfer_all 策略: 额外删除了 {len(to_delete)} 条低重要性记忆 "
f"(重要性 < {self.transfer_importance_threshold:.2f})"
)
# 异步保存 # 异步保存
asyncio.create_task(self._save_to_disk()) asyncio.create_task(self._save_to_disk())

View File

@@ -44,6 +44,7 @@ class UnifiedMemoryManager:
# 短期记忆配置 # 短期记忆配置
short_term_max_memories: int = 30, short_term_max_memories: int = 30,
short_term_transfer_threshold: float = 0.6, short_term_transfer_threshold: float = 0.6,
short_term_overflow_strategy: str = "transfer_all",
short_term_enable_force_cleanup: bool = False, short_term_enable_force_cleanup: bool = False,
short_term_cleanup_keep_ratio: float = 0.9, short_term_cleanup_keep_ratio: float = 0.9,
# 长期记忆配置 # 长期记忆配置
@@ -98,6 +99,7 @@ class UnifiedMemoryManager:
"short_term": { "short_term": {
"max_memories": short_term_max_memories, "max_memories": short_term_max_memories,
"transfer_importance_threshold": short_term_transfer_threshold, "transfer_importance_threshold": short_term_transfer_threshold,
"overflow_strategy": short_term_overflow_strategy,
"enable_force_cleanup": short_term_enable_force_cleanup, "enable_force_cleanup": short_term_enable_force_cleanup,
"cleanup_keep_ratio": short_term_cleanup_keep_ratio, "cleanup_keep_ratio": short_term_cleanup_keep_ratio,
}, },

View File

@@ -3,7 +3,6 @@ MaiZone麦麦空间- 重构版
""" """
import asyncio import asyncio
from pathlib import Path
from src.common.logger import get_logger from src.common.logger import get_logger
from src.plugin_system import BasePlugin, ComponentInfo, register_plugin from src.plugin_system import BasePlugin, ComponentInfo, register_plugin

View File

@@ -258,7 +258,7 @@ class ContentService:
- 运动风:"masterpiece, best quality, 1girl, sportswear, running in park, energetic, morning light, trees background, dynamic pose, healthy lifestyle" - 运动风:"masterpiece, best quality, 1girl, sportswear, running in park, energetic, morning light, trees background, dynamic pose, healthy lifestyle"
- 咖啡馆:"masterpiece, best quality, 1girl, sitting in cozy cafe, holding coffee cup, warm lighting, wooden table, books beside, peaceful atmosphere" - 咖啡馆:"masterpiece, best quality, 1girl, sitting in cozy cafe, holding coffee cup, warm lighting, wooden table, books beside, peaceful atmosphere"
""" """
output_format = '''{"text": "说说正文内容", "image": {"prompt": "详细的英文提示词(包含画质+主体+场景+氛围+光线+色彩)", "negative_prompt": "负面词", "include_character": true/false, "aspect_ratio": "方图/横图/竖图"}}''' output_format = """{"text": "说说正文内容", "image": {"prompt": "详细的英文提示词(包含画质+主体+场景+氛围+光线+色彩)", "negative_prompt": "负面词", "include_character": true/false, "aspect_ratio": "方图/横图/竖图"}}"""
elif ai_image_enabled and provider == "siliconflow": elif ai_image_enabled and provider == "siliconflow":
novelai_guide = """ novelai_guide = """
**配图说明:** **配图说明:**
@@ -277,7 +277,7 @@ class ContentService:
- "sunset over the calm ocean, golden hour, orange and purple sky, gentle waves, peaceful and serene mood, wide angle view" - "sunset over the calm ocean, golden hour, orange and purple sky, gentle waves, peaceful and serene mood, wide angle view"
- "cherry blossoms in spring, soft pink petals falling, blue sky, sunlight filtering through branches, peaceful park scene, gentle breeze" - "cherry blossoms in spring, soft pink petals falling, blue sky, sunlight filtering through branches, peaceful park scene, gentle breeze"
""" """
output_format = '''{"text": "说说正文内容", "image": {"prompt": "详细的英文描述(主体+场景+氛围+光线+细节)"}}''' output_format = """{"text": "说说正文内容", "image": {"prompt": "详细的英文描述(主体+场景+氛围+光线+细节)"}}"""
prompt = f""" prompt = f"""
{personality_desc} {personality_desc}

View File

@@ -2,14 +2,11 @@
NovelAI图片生成服务 - 空间插件专用 NovelAI图片生成服务 - 空间插件专用
独立实现,不依赖其他插件 独立实现,不依赖其他插件
""" """
import asyncio import io
import base64
import random import random
import uuid import uuid
import zipfile import zipfile
import io
from pathlib import Path from pathlib import Path
from typing import Optional
import aiohttp import aiohttp
from PIL import Image from PIL import Image
@@ -60,11 +57,11 @@ class MaiZoneNovelAIService:
async def generate_image_from_prompt_data( async def generate_image_from_prompt_data(
self, self,
prompt: str, prompt: str,
negative_prompt: Optional[str] = None, negative_prompt: str | None = None,
include_character: bool = False, include_character: bool = False,
width: int = 1024, width: int = 1024,
height: int = 1024 height: int = 1024
) -> tuple[bool, Optional[Path], str]: ) -> tuple[bool, Path | None, str]:
"""根据提示词生成图片 """根据提示词生成图片
Args: Args:
@@ -85,7 +82,7 @@ class MaiZoneNovelAIService:
final_prompt = prompt final_prompt = prompt
if include_character and self.character_prompt: if include_character and self.character_prompt:
final_prompt = f"{self.character_prompt}, {prompt}" final_prompt = f"{self.character_prompt}, {prompt}"
logger.info(f"包含角色形象,添加角色提示词") logger.info("包含角色形象,添加角色提示词")
# 合并负面提示词 # 合并负面提示词
final_negative = self.base_negative_prompt final_negative = self.base_negative_prompt
@@ -95,7 +92,7 @@ class MaiZoneNovelAIService:
else: else:
final_negative = negative_prompt final_negative = negative_prompt
logger.info(f"🎨 开始生成图片...") logger.info("🎨 开始生成图片...")
logger.info(f" 尺寸: {width}x{height}") logger.info(f" 尺寸: {width}x{height}")
logger.info(f" 正面提示词: {final_prompt[:100]}...") logger.info(f" 正面提示词: {final_prompt[:100]}...")
logger.info(f" 负面提示词: {final_negative[:100]}...") logger.info(f" 负面提示词: {final_negative[:100]}...")
@@ -118,7 +115,7 @@ class MaiZoneNovelAIService:
except Exception as e: except Exception as e:
logger.error(f"生成图片时出错: {e}", exc_info=True) logger.error(f"生成图片时出错: {e}", exc_info=True)
return False, None, f"生成失败: {str(e)}" return False, None, f"生成失败: {e!s}"
def _build_payload(self, prompt: str, negative_prompt: str, width: int, height: int) -> dict: def _build_payload(self, prompt: str, negative_prompt: str, width: int, height: int) -> dict:
"""构建NovelAI API请求payload""" """构建NovelAI API请求payload"""
@@ -197,7 +194,7 @@ class MaiZoneNovelAIService:
return payload return payload
async def _call_novelai_api(self, payload: dict) -> Optional[bytes]: async def _call_novelai_api(self, payload: dict) -> bytes | None:
"""调用NovelAI API""" """调用NovelAI API"""
headers = { headers = {
"Authorization": f"Bearer {self.api_key}", "Authorization": f"Bearer {self.api_key}",
@@ -228,10 +225,10 @@ class MaiZoneNovelAIService:
logger.info(f"收到响应数据: {len(img_data)} bytes") logger.info(f"收到响应数据: {len(img_data)} bytes")
# 检查是否是ZIP文件 # 检查是否是ZIP文件
if img_data[:4] == b'PK\x03\x04': if img_data[:4] == b"PK\x03\x04":
logger.info("检测到ZIP格式解压中...") logger.info("检测到ZIP格式解压中...")
return self._extract_from_zip(img_data) return self._extract_from_zip(img_data)
elif img_data[:4] == b'\x89PNG': elif img_data[:4] == b"\x89PNG":
logger.info("检测到PNG格式") logger.info("检测到PNG格式")
return img_data return img_data
else: else:
@@ -242,12 +239,12 @@ class MaiZoneNovelAIService:
logger.error(f"API调用失败: {e}", exc_info=True) logger.error(f"API调用失败: {e}", exc_info=True)
return None return None
def _extract_from_zip(self, zip_data: bytes) -> Optional[bytes]: def _extract_from_zip(self, zip_data: bytes) -> bytes | None:
"""从ZIP中提取PNG""" """从ZIP中提取PNG"""
try: try:
with zipfile.ZipFile(io.BytesIO(zip_data)) as zf: with zipfile.ZipFile(io.BytesIO(zip_data)) as zf:
for filename in zf.namelist(): for filename in zf.namelist():
if filename.lower().endswith('.png'): if filename.lower().endswith(".png"):
img_data = zf.read(filename) img_data = zf.read(filename)
logger.info(f"从ZIP提取: {filename} ({len(img_data)} bytes)") logger.info(f"从ZIP提取: {filename} ({len(img_data)} bytes)")
return img_data return img_data
@@ -257,7 +254,7 @@ class MaiZoneNovelAIService:
logger.error(f"解压ZIP失败: {e}") logger.error(f"解压ZIP失败: {e}")
return None return None
async def _save_image(self, image_data: bytes) -> Optional[Path]: async def _save_image(self, image_data: bytes) -> Path | None:
"""保存图片到本地""" """保存图片到本地"""
try: try:
filename = f"novelai_{uuid.uuid4().hex[:12]}.png" filename = f"novelai_{uuid.uuid4().hex[:12]}.png"

View File

@@ -5,7 +5,6 @@ QQ空间服务模块
import asyncio import asyncio
import base64 import base64
import os
import random import random
import time import time
from collections.abc import Callable from collections.abc import Callable
@@ -114,7 +113,7 @@ class QZoneService:
} }
width, height = size_map.get(aspect_ratio, (1024, 1024)) width, height = size_map.get(aspect_ratio, (1024, 1024))
logger.info(f"🎨 开始生成NovelAI配图...") logger.info("🎨 开始生成NovelAI配图...")
success, img_path, msg = await novelai_service.generate_image_from_prompt_data( success, img_path, msg = await novelai_service.generate_image_from_prompt_data(
prompt=image_info.get("prompt", ""), prompt=image_info.get("prompt", ""),
negative_prompt=image_info.get("negative_prompt"), negative_prompt=image_info.get("negative_prompt"),
@@ -125,7 +124,7 @@ class QZoneService:
if success and img_path: if success and img_path:
image_path = img_path image_path = img_path
logger.info(f"✅ NovelAI配图生成成功") logger.info("✅ NovelAI配图生成成功")
else: else:
logger.warning(f"⚠️ NovelAI配图生成失败: {msg}") logger.warning(f"⚠️ NovelAI配图生成失败: {msg}")
else: else:
@@ -143,9 +142,9 @@ class QZoneService:
) )
if success and img_path: if success and img_path:
image_path = img_path image_path = img_path
logger.info(f"✅ 硅基流动配图生成成功") logger.info("✅ 硅基流动配图生成成功")
else: else:
logger.warning(f"⚠️ 硅基流动配图生成失败") logger.warning("⚠️ 硅基流动配图生成失败")
except Exception as e: except Exception as e:
logger.error(f"硅基流动配图生成出错: {e}", exc_info=True) logger.error(f"硅基流动配图生成出错: {e}", exc_info=True)
else: else:
@@ -167,7 +166,7 @@ class QZoneService:
try: try:
with open(image_path, "rb") as f: with open(image_path, "rb") as f:
images_bytes.append(f.read()) images_bytes.append(f.read())
logger.info(f"添加AI配图到说说") logger.info("添加AI配图到说说")
except Exception as e: except Exception as e:
logger.error(f"读取AI配图失败: {e}") logger.error(f"读取AI配图失败: {e}")

View File

@@ -312,6 +312,9 @@ short_term_transfer_threshold = 0.6 # 转移到长期记忆的重要性阈值
short_term_enable_force_cleanup = true # 开启压力泄压(建议高频场景开启) short_term_enable_force_cleanup = true # 开启压力泄压(建议高频场景开启)
short_term_search_top_k = 5 # 搜索时返回的最大数量 short_term_search_top_k = 5 # 搜索时返回的最大数量
short_term_decay_factor = 0.98 # 衰减因子 short_term_decay_factor = 0.98 # 衰减因子
short_term_overflow_strategy = "transfer_all" # 短期记忆溢出策略
# "transfer_all": 一次性转移所有记忆到长期记忆,然后删除低重要性记忆(默认推荐)
# "selective_cleanup": 选择性清理,仅转移高重要性记忆,直接删除低重要性记忆
# 长期记忆层配置 # 长期记忆层配置
use_judge = true # 使用评判模型决定是否检索长期记忆 use_judge = true # 使用评判模型决定是否检索长期记忆