feat(memory): 添加配置系统支持,移除旧memory配置

重大改进:
- 在 bot_config_template.toml 添加 [memory_graph] 配置段
- 移除旧的 [memory] 配置段(~85行)
- 更新 MemoryGraphConfig 支持从 bot_config 加载
- 更新 MemoryManager 使用配置参数
- 更新 manager_singleton 支持配置初始化

配置系统:
- enable: 启用/禁用记忆图系统
- data_dir: 数据存储目录
- 向量存储: collection_name, db_path
- 检索: top_k, min_importance, similarity_threshold
- 智能查询优化: enable_query_optimization
- 整合: enabled, interval_hours, similarity_threshold, time_window
- 遗忘: enabled, activation_threshold, min_importance
- 激活: decay_rate, propagation_strength, propagation_depth
- 性能: max_nodes_per_memory, max_related_memories

配置加载流程:
1. MemoryGraphConfig.from_bot_config(global_config)
2. 使用 getattr 安全获取配置值
3. 提供默认值作为后备

使用配置:
- MemoryManager 从 config 读取所有参数
- maintenance 使用 consolidation_* 参数
- auto_forget_memories 使用 forgetting_* 参数
- activate_memory 使用 activation_* 参数

向后兼容:
- [memory_legacy] 保留旧配置供参考
- [vector_db] 保留给其他系统使用
- 新系统使用独立 chromadb 实例

完成 Step 4: 添加配置支持
This commit is contained in:
Windpicker-owo
2025-11-05 20:22:08 +08:00
parent 75951e8732
commit 9deb778e8f
4 changed files with 194 additions and 94 deletions

View File

@@ -66,6 +66,41 @@ class StorageConfig:
class MemoryGraphConfig:
"""记忆图系统总配置"""
# 基础配置
enable: bool = True # 是否启用记忆图系统
data_dir: Path = field(default_factory=lambda: Path("data/memory_graph"))
# 向量存储配置
vector_collection_name: str = "memory_nodes"
vector_db_path: Path = field(default_factory=lambda: Path("data/memory_graph/chroma_db"))
# 检索配置
search_top_k: int = 10
search_min_importance: float = 0.3
search_similarity_threshold: float = 0.5
enable_query_optimization: bool = True
# 整合配置
consolidation_enabled: bool = True
consolidation_interval_hours: float = 1.0
consolidation_similarity_threshold: float = 0.85
consolidation_time_window_hours: int = 24
# 遗忘配置
forgetting_enabled: bool = True
forgetting_activation_threshold: float = 0.1
forgetting_min_importance: float = 0.8
# 激活配置
activation_decay_rate: float = 0.9
activation_propagation_strength: float = 0.5
activation_propagation_depth: int = 1
# 性能配置
max_memory_nodes_per_memory: int = 10
max_related_memories: int = 5
# 旧配置(向后兼容)
consolidation: ConsolidationConfig = field(default_factory=ConsolidationConfig)
retrieval: RetrievalConfig = field(default_factory=RetrievalConfig)
node_merger: NodeMergerConfig = field(default_factory=NodeMergerConfig)
@@ -89,10 +124,74 @@ class MemoryGraphConfig:
enable_debug_logging: bool = False
enable_visualization: bool = False # 是否启用记忆可视化
@classmethod
def from_bot_config(cls, bot_config) -> MemoryGraphConfig:
"""从bot_config加载配置"""
try:
# 尝试获取新配置
if hasattr(bot_config, 'memory_graph'):
mg_config = bot_config.memory_graph
config = cls(
enable=getattr(mg_config, 'enable', True),
data_dir=Path(getattr(mg_config, 'data_dir', 'data/memory_graph')),
vector_collection_name=getattr(mg_config, 'vector_collection_name', 'memory_nodes'),
vector_db_path=Path(getattr(mg_config, 'vector_db_path', 'data/memory_graph/chroma_db')),
search_top_k=getattr(mg_config, 'search_top_k', 10),
search_min_importance=getattr(mg_config, 'search_min_importance', 0.3),
search_similarity_threshold=getattr(mg_config, 'search_similarity_threshold', 0.5),
enable_query_optimization=getattr(mg_config, 'enable_query_optimization', True),
consolidation_enabled=getattr(mg_config, 'consolidation_enabled', True),
consolidation_interval_hours=getattr(mg_config, 'consolidation_interval_hours', 1.0),
consolidation_similarity_threshold=getattr(mg_config, 'consolidation_similarity_threshold', 0.85),
consolidation_time_window_hours=getattr(mg_config, 'consolidation_time_window_hours', 24),
forgetting_enabled=getattr(mg_config, 'forgetting_enabled', True),
forgetting_activation_threshold=getattr(mg_config, 'forgetting_activation_threshold', 0.1),
forgetting_min_importance=getattr(mg_config, 'forgetting_min_importance', 0.8),
activation_decay_rate=getattr(mg_config, 'activation_decay_rate', 0.9),
activation_propagation_strength=getattr(mg_config, 'activation_propagation_strength', 0.5),
activation_propagation_depth=getattr(mg_config, 'activation_propagation_depth', 1),
max_memory_nodes_per_memory=getattr(mg_config, 'max_memory_nodes_per_memory', 10),
max_related_memories=getattr(mg_config, 'max_related_memories', 5),
)
return config
else:
# 没有找到memory_graph配置使用默认值
return cls()
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(f"从bot_config加载memory_graph配置失败使用默认配置: {e}")
return cls()
@classmethod
def from_dict(cls, config_dict: Dict) -> MemoryGraphConfig:
"""从字典创建配置"""
return cls(
# 新配置字段
enable=config_dict.get("enable", True),
data_dir=Path(config_dict.get("data_dir", "data/memory_graph")),
vector_collection_name=config_dict.get("vector_collection_name", "memory_nodes"),
vector_db_path=Path(config_dict.get("vector_db_path", "data/memory_graph/chroma_db")),
search_top_k=config_dict.get("search_top_k", 10),
search_min_importance=config_dict.get("search_min_importance", 0.3),
search_similarity_threshold=config_dict.get("search_similarity_threshold", 0.5),
enable_query_optimization=config_dict.get("enable_query_optimization", True),
consolidation_enabled=config_dict.get("consolidation_enabled", True),
consolidation_interval_hours=config_dict.get("consolidation_interval_hours", 1.0),
consolidation_similarity_threshold=config_dict.get("consolidation_similarity_threshold", 0.85),
consolidation_time_window_hours=config_dict.get("consolidation_time_window_hours", 24),
forgetting_enabled=config_dict.get("forgetting_enabled", True),
forgetting_activation_threshold=config_dict.get("forgetting_activation_threshold", 0.1),
forgetting_min_importance=config_dict.get("forgetting_min_importance", 0.8),
activation_decay_rate=config_dict.get("activation_decay_rate", 0.9),
activation_propagation_strength=config_dict.get("activation_propagation_strength", 0.5),
activation_propagation_depth=config_dict.get("activation_propagation_depth", 1),
max_memory_nodes_per_memory=config_dict.get("max_memory_nodes_per_memory", 10),
max_related_memories=config_dict.get("max_related_memories", 5),
# 旧配置字段(向后兼容)
consolidation=ConsolidationConfig(**config_dict.get("consolidation", {})),
retrieval=RetrievalConfig(**config_dict.get("retrieval", {})),
node_merger=NodeMergerConfig(**config_dict.get("node_merger", {})),

View File

@@ -68,10 +68,10 @@ class MemoryManager:
self._initialized = False
self._last_maintenance = datetime.now()
self._maintenance_task: Optional[asyncio.Task] = None
self._maintenance_interval_hours = 1 # 默认每小时执行一次维护
self._maintenance_interval_hours = self.config.consolidation_interval_hours # 从配置读取
self._maintenance_schedule_id: Optional[str] = None # 调度任务ID
logger.info(f"记忆管理器已创建 (data_dir={data_dir})")
logger.info(f"记忆管理器已创建 (data_dir={data_dir}, enable={self.config.enable})")
async def initialize(self) -> None:
"""
@@ -556,7 +556,7 @@ class MemoryManager:
# 计算时间衰减
last_access_dt = datetime.fromisoformat(last_access)
hours_passed = (now - last_access_dt).total_seconds() / 3600
decay_factor = 0.9 ** (hours_passed / 24) # 每天衰减 10%
decay_factor = self.config.activation_decay_rate ** (hours_passed / 24)
current_activation = activation_info.get("level", 0.0) * decay_factor
else:
current_activation = 0.0
@@ -573,12 +573,15 @@ class MemoryManager:
memory.metadata["activation"] = activation_info
memory.last_accessed = now
# 激活传播:激活相关记忆(强度减半)
# 激活传播:激活相关记忆
if strength > 0.1: # 只有足够强的激活才传播
related_memories = self._get_related_memories(memory_id)
propagation_strength = strength * 0.5
related_memories = self._get_related_memories(
memory_id,
max_depth=self.config.activation_propagation_depth
)
propagation_strength = strength * self.config.activation_propagation_strength
for related_id in related_memories[:5]: # 最多传播到 5 个相关记忆
for related_id in related_memories[:self.config.max_related_memories]:
await self.activate_memory(related_id, propagation_strength)
# 保存更新
@@ -677,7 +680,7 @@ class MemoryManager:
continue
# 跳过高重要性记忆
if memory.importance >= 0.8:
if memory.importance >= self.config.forgetting_min_importance:
continue
# 计算当前激活度
@@ -875,15 +878,19 @@ class MemoryManager:
}
# 1. 记忆整理(合并相似记忆)
consolidate_result = await self.consolidate_memories(
similarity_threshold=0.85,
time_window_hours=24
)
result["consolidated"] = consolidate_result.get("merged_count", 0)
if self.config.consolidation_enabled:
consolidate_result = await self.consolidate_memories(
similarity_threshold=self.config.consolidation_similarity_threshold,
time_window_hours=self.config.consolidation_time_window_hours
)
result["consolidated"] = consolidate_result.get("merged_count", 0)
# 2. 自动遗忘
forgotten_count = await self.auto_forget_memories(threshold=0.1)
result["forgotten"] = forgotten_count
if self.config.forgetting_enabled:
forgotten_count = await self.auto_forget_memories(
threshold=self.config.forgetting_activation_threshold
)
result["forgotten"] = forgotten_count
# 3. 清理非常旧的已遗忘记忆(可选)
# TODO: 实现清理逻辑

View File

@@ -19,12 +19,16 @@ _memory_manager: Optional[MemoryManager] = None
_initialized: bool = False
async def initialize_memory_manager(data_dir: Optional[Path | str] = None) -> MemoryManager:
async def initialize_memory_manager(
data_dir: Optional[Path | str] = None,
config = None,
) -> Optional[MemoryManager]:
"""
初始化全局 MemoryManager
Args:
data_dir: 数据目录,默认使用 data/memory_graph
config: MemoryGraphConfig 或 bot_config 实例
Returns:
MemoryManager 实例
@@ -36,14 +40,40 @@ async def initialize_memory_manager(data_dir: Optional[Path | str] = None) -> Me
return _memory_manager
try:
from src.memory_graph.config import MemoryGraphConfig
# 处理配置
if config is None:
# 尝试从全局配置加载
try:
from src.config.config import global_config
memory_config = MemoryGraphConfig.from_bot_config(global_config)
logger.info("从 bot_config 加载 memory_graph 配置")
except Exception as e:
logger.warning(f"无法从 bot_config 加载配置,使用默认配置: {e}")
memory_config = MemoryGraphConfig()
elif isinstance(config, MemoryGraphConfig):
memory_config = config
else:
# 假设是 bot_config
memory_config = MemoryGraphConfig.from_bot_config(config)
# 检查是否启用
if not memory_config.enable:
logger.info("记忆图系统已在配置中禁用")
_initialized = False
_memory_manager = None
return None
# 处理数据目录
if data_dir is None:
data_dir = Path("data/memory_graph")
data_dir = memory_config.data_dir
elif isinstance(data_dir, str):
data_dir = Path(data_dir)
logger.info(f"正在初始化全局 MemoryManager (data_dir={data_dir})...")
_memory_manager = MemoryManager(data_dir=data_dir)
_memory_manager = MemoryManager(config=memory_config, data_dir=data_dir)
await _memory_manager.initialize()
_initialized = True