更新KGManager和知识库配置,统一使用global_config的lpmm_knowledge属性,移除未使用的MemoryActiveManager导入
This commit is contained in:
@@ -20,7 +20,7 @@ from quick_algo import di_graph, pagerank
|
|||||||
|
|
||||||
from .utils.hash import get_sha256
|
from .utils.hash import get_sha256
|
||||||
from .embedding_store import EmbeddingManager, EmbeddingStoreItem
|
from .embedding_store import EmbeddingManager, EmbeddingStoreItem
|
||||||
from .lpmmconfig import global_config
|
from src.config.config import global_config
|
||||||
|
|
||||||
from .global_logger import logger
|
from .global_logger import logger
|
||||||
|
|
||||||
@@ -179,14 +179,14 @@ class KGManager:
|
|||||||
assert isinstance(ent, EmbeddingStoreItem)
|
assert isinstance(ent, EmbeddingStoreItem)
|
||||||
# 查询相似实体
|
# 查询相似实体
|
||||||
similar_ents = embedding_manager.entities_embedding_store.search_top_k(
|
similar_ents = embedding_manager.entities_embedding_store.search_top_k(
|
||||||
ent.embedding, global_config["rag"]["params"]["synonym_search_top_k"]
|
ent.embedding, global_config.lpmm_knowledge.rag_synonym_search_top_k
|
||||||
)
|
)
|
||||||
res_ent = [] # Debug
|
res_ent = [] # Debug
|
||||||
for res_ent_hash, similarity in similar_ents:
|
for res_ent_hash, similarity in similar_ents:
|
||||||
if res_ent_hash == ent_hash:
|
if res_ent_hash == ent_hash:
|
||||||
# 避免自连接
|
# 避免自连接
|
||||||
continue
|
continue
|
||||||
if similarity < global_config["rag"]["params"]["synonym_threshold"]:
|
if similarity < global_config.lpmm_knowledge.rag_synonym_threshold:
|
||||||
# 相似度阈值
|
# 相似度阈值
|
||||||
continue
|
continue
|
||||||
node_to_node[(res_ent_hash, ent_hash)] = similarity
|
node_to_node[(res_ent_hash, ent_hash)] = similarity
|
||||||
@@ -369,7 +369,7 @@ class KGManager:
|
|||||||
for ent_hash in ent_weights.keys():
|
for ent_hash in ent_weights.keys():
|
||||||
ent_weights[ent_hash] = 1.0
|
ent_weights[ent_hash] = 1.0
|
||||||
else:
|
else:
|
||||||
down_edge = global_config["qa"]["params"]["paragraph_node_weight"]
|
down_edge = global_config.lpmm_knowledge.qa_paragraph_node_weight
|
||||||
# 缩放取值区间至[down_edge, 1]
|
# 缩放取值区间至[down_edge, 1]
|
||||||
for ent_hash, score in ent_weights.items():
|
for ent_hash, score in ent_weights.items():
|
||||||
# 缩放相似度
|
# 缩放相似度
|
||||||
@@ -378,7 +378,7 @@ class KGManager:
|
|||||||
) + down_edge
|
) + down_edge
|
||||||
|
|
||||||
# 取平均相似度的top_k实体
|
# 取平均相似度的top_k实体
|
||||||
top_k = global_config["qa"]["params"]["ent_filter_top_k"]
|
top_k = global_config.lpmm_knowledge.qa_ent_filter_top_k
|
||||||
if len(ent_mean_scores) > top_k:
|
if len(ent_mean_scores) > top_k:
|
||||||
# 从大到小排序,取后len - k个
|
# 从大到小排序,取后len - k个
|
||||||
ent_mean_scores = {k: v for k, v in sorted(ent_mean_scores.items(), key=lambda item: item[1], reverse=True)}
|
ent_mean_scores = {k: v for k, v in sorted(ent_mean_scores.items(), key=lambda item: item[1], reverse=True)}
|
||||||
@@ -407,7 +407,7 @@ class KGManager:
|
|||||||
|
|
||||||
for pg_hash, score in pg_sim_scores.items():
|
for pg_hash, score in pg_sim_scores.items():
|
||||||
pg_weights[pg_hash] = (
|
pg_weights[pg_hash] = (
|
||||||
score * global_config["qa"]["params"]["paragraph_node_weight"]
|
score * global_config.lpmm_knowledge.qa_paragraph_node_weight
|
||||||
) # 文段权重 = 归一化相似度 * 文段节点权重参数
|
) # 文段权重 = 归一化相似度 * 文段节点权重参数
|
||||||
del pg_sim_scores
|
del pg_sim_scores
|
||||||
|
|
||||||
@@ -420,7 +420,7 @@ class KGManager:
|
|||||||
self.graph,
|
self.graph,
|
||||||
personalization=ppr_node_weights,
|
personalization=ppr_node_weights,
|
||||||
max_iter=100,
|
max_iter=100,
|
||||||
alpha=global_config["qa"]["params"]["ppr_damping"],
|
alpha=global_config.lpmm_knowledge.qa_ppr_damping,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 获取最终结果
|
# 获取最终结果
|
||||||
|
|||||||
@@ -1,11 +1,8 @@
|
|||||||
from src.chat.knowledge.lpmmconfig import global_config
|
|
||||||
from src.chat.knowledge.embedding_store import EmbeddingManager
|
from src.chat.knowledge.embedding_store import EmbeddingManager
|
||||||
from src.chat.knowledge.llm_client import LLMClient
|
|
||||||
from src.chat.knowledge.mem_active_manager import MemoryActiveManager
|
|
||||||
from src.chat.knowledge.qa_manager import QAManager
|
from src.chat.knowledge.qa_manager import QAManager
|
||||||
from src.chat.knowledge.kg_manager import KGManager
|
from src.chat.knowledge.kg_manager import KGManager
|
||||||
from src.chat.knowledge.global_logger import logger
|
from src.chat.knowledge.global_logger import logger
|
||||||
from src.config.config import global_config as bot_global_config
|
from src.config.config import global_config
|
||||||
import os
|
import os
|
||||||
|
|
||||||
INVALID_ENTITY = [
|
INVALID_ENTITY = [
|
||||||
@@ -34,15 +31,9 @@ qa_manager = None
|
|||||||
inspire_manager = None
|
inspire_manager = None
|
||||||
|
|
||||||
# 检查LPMM知识库是否启用
|
# 检查LPMM知识库是否启用
|
||||||
if bot_global_config.lpmm_knowledge.enable:
|
if global_config.lpmm_knowledge.enable:
|
||||||
logger.info("正在初始化Mai-LPMM")
|
logger.info("正在初始化Mai-LPMM")
|
||||||
logger.info("创建LLM客户端")
|
logger.info("创建LLM客户端")
|
||||||
llm_client_list = {}
|
|
||||||
for key in global_config["llm_providers"]:
|
|
||||||
llm_client_list[key] = LLMClient(
|
|
||||||
global_config["llm_providers"][key]["base_url"], # type: ignore
|
|
||||||
global_config["llm_providers"][key]["api_key"], # type: ignore
|
|
||||||
)
|
|
||||||
|
|
||||||
# 初始化Embedding库
|
# 初始化Embedding库
|
||||||
embed_manager = EmbeddingManager()
|
embed_manager = EmbeddingManager()
|
||||||
@@ -78,11 +69,11 @@ if bot_global_config.lpmm_knowledge.enable:
|
|||||||
kg_manager,
|
kg_manager,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 记忆激活(用于记忆库)
|
# # 记忆激活(用于记忆库)
|
||||||
inspire_manager = MemoryActiveManager(
|
# inspire_manager = MemoryActiveManager(
|
||||||
embed_manager,
|
# embed_manager,
|
||||||
llm_client_list[global_config["embedding"]["provider"]],
|
# llm_client_list[global_config["embedding"]["provider"]],
|
||||||
)
|
# )
|
||||||
else:
|
else:
|
||||||
logger.info("LPMM知识库已禁用,跳过初始化")
|
logger.info("LPMM知识库已禁用,跳过初始化")
|
||||||
# 创建空的占位符对象,避免导入错误
|
# 创建空的占位符对象,避免导入错误
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
raise DeprecationWarning("MemoryActiveManager is not used yet, please do not import it")
|
||||||
from .lpmmconfig import global_config
|
from .lpmmconfig import global_config
|
||||||
from .embedding_store import EmbeddingManager
|
from .embedding_store import EmbeddingManager
|
||||||
from .llm_client import LLMClient
|
from .llm_client import LLMClient
|
||||||
|
|||||||
Reference in New Issue
Block a user