更新KGManager和知识库配置,统一使用global_config的lpmm_knowledge属性,移除未使用的MemoryActiveManager导入

This commit is contained in:
墨梓柒
2025-08-03 11:27:34 +08:00
parent 5725481097
commit 42e00dd0aa
3 changed files with 15 additions and 23 deletions

View File

@@ -20,7 +20,7 @@ from quick_algo import di_graph, pagerank
from .utils.hash import get_sha256
from .embedding_store import EmbeddingManager, EmbeddingStoreItem
from .lpmmconfig import global_config
from src.config.config import global_config
from .global_logger import logger
@@ -179,14 +179,14 @@ class KGManager:
assert isinstance(ent, EmbeddingStoreItem)
# 查询相似实体
similar_ents = embedding_manager.entities_embedding_store.search_top_k(
ent.embedding, global_config["rag"]["params"]["synonym_search_top_k"]
ent.embedding, global_config.lpmm_knowledge.rag_synonym_search_top_k
)
res_ent = [] # Debug
for res_ent_hash, similarity in similar_ents:
if res_ent_hash == ent_hash:
# 避免自连接
continue
if similarity < global_config["rag"]["params"]["synonym_threshold"]:
if similarity < global_config.lpmm_knowledge.rag_synonym_threshold:
# 相似度阈值
continue
node_to_node[(res_ent_hash, ent_hash)] = similarity
@@ -369,7 +369,7 @@ class KGManager:
for ent_hash in ent_weights.keys():
ent_weights[ent_hash] = 1.0
else:
down_edge = global_config["qa"]["params"]["paragraph_node_weight"]
down_edge = global_config.lpmm_knowledge.qa_paragraph_node_weight
# 缩放取值区间至[down_edge, 1]
for ent_hash, score in ent_weights.items():
# 缩放相似度
@@ -378,7 +378,7 @@ class KGManager:
) + down_edge
# 取平均相似度的top_k实体
top_k = global_config["qa"]["params"]["ent_filter_top_k"]
top_k = global_config.lpmm_knowledge.qa_ent_filter_top_k
if len(ent_mean_scores) > top_k:
# 从大到小排序取后len - k个
ent_mean_scores = {k: v for k, v in sorted(ent_mean_scores.items(), key=lambda item: item[1], reverse=True)}
@@ -407,7 +407,7 @@ class KGManager:
for pg_hash, score in pg_sim_scores.items():
pg_weights[pg_hash] = (
score * global_config["qa"]["params"]["paragraph_node_weight"]
score * global_config.lpmm_knowledge.qa_paragraph_node_weight
) # 文段权重 = 归一化相似度 * 文段节点权重参数
del pg_sim_scores
@@ -420,7 +420,7 @@ class KGManager:
self.graph,
personalization=ppr_node_weights,
max_iter=100,
alpha=global_config["qa"]["params"]["ppr_damping"],
alpha=global_config.lpmm_knowledge.qa_ppr_damping,
)
# 获取最终结果

View File

@@ -1,11 +1,8 @@
from src.chat.knowledge.lpmmconfig import global_config
from src.chat.knowledge.embedding_store import EmbeddingManager
from src.chat.knowledge.llm_client import LLMClient
from src.chat.knowledge.mem_active_manager import MemoryActiveManager
from src.chat.knowledge.qa_manager import QAManager
from src.chat.knowledge.kg_manager import KGManager
from src.chat.knowledge.global_logger import logger
from src.config.config import global_config as bot_global_config
from src.config.config import global_config
import os
INVALID_ENTITY = [
@@ -34,15 +31,9 @@ qa_manager = None
inspire_manager = None
# 检查LPMM知识库是否启用
if bot_global_config.lpmm_knowledge.enable:
if global_config.lpmm_knowledge.enable:
logger.info("正在初始化Mai-LPMM")
logger.info("创建LLM客户端")
llm_client_list = {}
for key in global_config["llm_providers"]:
llm_client_list[key] = LLMClient(
global_config["llm_providers"][key]["base_url"], # type: ignore
global_config["llm_providers"][key]["api_key"], # type: ignore
)
# 初始化Embedding库
embed_manager = EmbeddingManager()
@@ -78,11 +69,11 @@ if bot_global_config.lpmm_knowledge.enable:
kg_manager,
)
# 记忆激活(用于记忆库)
inspire_manager = MemoryActiveManager(
embed_manager,
llm_client_list[global_config["embedding"]["provider"]],
)
# # 记忆激活(用于记忆库)
# inspire_manager = MemoryActiveManager(
# embed_manager,
# llm_client_list[global_config["embedding"]["provider"]],
# )
else:
logger.info("LPMM知识库已禁用跳过初始化")
# 创建空的占位符对象,避免导入错误

View File

@@ -1,3 +1,4 @@
raise DeprecationWarning("MemoryActiveManager is not used yet, please do not import it")
from .lpmmconfig import global_config
from .embedding_store import EmbeddingManager
from .llm_client import LLMClient