Merge pull request #1094 from CNMrSunshine/dev

修复配置文件中禁用LPMM无效的问题 一个工作记忆问题 和麦麦重复回复的问题
This commit is contained in:
SengokuCola
2025-07-06 11:42:24 +08:00
committed by GitHub
6 changed files with 91 additions and 51 deletions

View File

@@ -71,6 +71,7 @@ class WorkingMemoryProcessor(BaseProcessor):
"""
working_memory = None
chat_info = ""
chat_obs = None
try:
for observation in observations:
if isinstance(observation, WorkingMemoryObservation):
@@ -79,10 +80,15 @@ class WorkingMemoryProcessor(BaseProcessor):
chat_info = observation.get_observe_info()
chat_obs = observation
# 检查是否有待压缩内容
if chat_obs.compressor_prompt:
if chat_obs and chat_obs.compressor_prompt:
logger.debug(f"{self.log_prefix} 压缩聊天记忆")
await self.compress_chat_memory(working_memory, chat_obs)
# 检查working_memory是否为None
if working_memory is None:
logger.debug(f"{self.log_prefix} 没有找到工作记忆观察,跳过处理")
return []
all_memory = working_memory.get_all_memories()
if not all_memory:
logger.debug(f"{self.log_prefix} 目前没有工作记忆,跳过提取")
@@ -183,6 +189,11 @@ class WorkingMemoryProcessor(BaseProcessor):
working_memory: 工作记忆对象
obs: 聊天观察对象
"""
# 检查working_memory是否为None
if working_memory is None:
logger.warning(f"{self.log_prefix} 工作记忆对象为None无法压缩聊天记忆")
return
try:
summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt)
if not summary_result:
@@ -235,6 +246,11 @@ class WorkingMemoryProcessor(BaseProcessor):
memory_id1: 第一个记忆ID
memory_id2: 第二个记忆ID
"""
# 检查working_memory是否为None
if working_memory is None:
logger.warning(f"{self.log_prefix} 工作记忆对象为None无法合并记忆")
return
try:
merged_memory = await working_memory.merge_memory(memory_id1, memory_id2)
logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}")

View File

@@ -29,6 +29,7 @@ def init_prompt():
{chat_context_description},以下是具体的聊天内容:
{chat_content_block}
{moderation_prompt}
现在请你根据聊天内容选择合适的action:
{action_options_text}

View File

@@ -5,60 +5,68 @@ from src.chat.knowledge.mem_active_manager import MemoryActiveManager
from src.chat.knowledge.qa_manager import QAManager
from src.chat.knowledge.kg_manager import KGManager
from src.chat.knowledge.global_logger import logger
from src.config.config import global_config as bot_global_config
# try:
# import quick_algo
# except ImportError:
# print("quick_algo not found, please install it first")
logger.info("正在初始化Mai-LPMM\n")
logger.info("创建LLM客户端")
llm_client_list = dict()
for key in global_config["llm_providers"]:
llm_client_list[key] = LLMClient(
global_config["llm_providers"][key]["base_url"],
global_config["llm_providers"][key]["api_key"],
# 检查LPMM知识库是否启用
if bot_global_config.lpmm_knowledge.enable:
logger.info("正在初始化Mai-LPMM\n")
logger.info("创建LLM客户端")
llm_client_list = dict()
for key in global_config["llm_providers"]:
llm_client_list[key] = LLMClient(
global_config["llm_providers"][key]["base_url"],
global_config["llm_providers"][key]["api_key"],
)
# 初始化Embedding库
embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
logger.info("正在从文件加载Embedding库")
try:
embed_manager.load_from_file()
except Exception as e:
logger.warning("此消息不会影响正常使用从文件加载Embedding库时{}".format(e))
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("Embedding库加载完成")
# 初始化KG
kg_manager = KGManager()
logger.info("正在从文件加载KG")
try:
kg_manager.load_from_file()
except Exception as e:
logger.warning("此消息不会影响正常使用从文件加载KG时{}".format(e))
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("KG加载完成")
logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}")
logger.info(f"KG边数量{len(kg_manager.graph.get_edge_list())}")
# 数据比对Embedding库与KG的段落hash集合
for pg_hash in kg_manager.stored_paragraph_hashes:
key = PG_NAMESPACE + "-" + pg_hash
if key not in embed_manager.stored_pg_hashes:
logger.warning(f"KG中存在Embedding库中不存在的段落{key}")
# 问答系统(用于知识库)
qa_manager = QAManager(
embed_manager,
kg_manager,
llm_client_list[global_config["embedding"]["provider"]],
llm_client_list[global_config["qa"]["llm"]["provider"]],
llm_client_list[global_config["qa"]["llm"]["provider"]],
)
# 初始化Embedding库
embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
logger.info("正在从文件加载Embedding库")
try:
embed_manager.load_from_file()
except Exception as e:
logger.warning("此消息不会影响正常使用从文件加载Embedding库时{}".format(e))
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("Embedding库加载完成")
# 初始化KG
kg_manager = KGManager()
logger.info("正在从文件加载KG")
try:
kg_manager.load_from_file()
except Exception as e:
logger.warning("此消息不会影响正常使用从文件加载KG时{}".format(e))
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("KG加载完成")
logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}")
logger.info(f"KG边数量{len(kg_manager.graph.get_edge_list())}")
# 数据比对Embedding库与KG的段落hash集合
for pg_hash in kg_manager.stored_paragraph_hashes:
key = PG_NAMESPACE + "-" + pg_hash
if key not in embed_manager.stored_pg_hashes:
logger.warning(f"KG中存在Embedding库中不存在的段落{key}")
# 问答系统(用于知识库)
qa_manager = QAManager(
embed_manager,
kg_manager,
llm_client_list[global_config["embedding"]["provider"]],
llm_client_list[global_config["qa"]["llm"]["provider"]],
llm_client_list[global_config["qa"]["llm"]["provider"]],
)
# 记忆激活(用于记忆库)
inspire_manager = MemoryActiveManager(
embed_manager,
llm_client_list[global_config["embedding"]["provider"]],
)
# 记忆激活(用于记忆库)
inspire_manager = MemoryActiveManager(
embed_manager,
llm_client_list[global_config["embedding"]["provider"]],
)
else:
logger.info("LPMM知识库已禁用跳过初始化")
# 创建空的占位符对象,避免导入错误
qa_manager = None
inspire_manager = None

View File

@@ -956,6 +956,11 @@ async def get_prompt_info(message: str, threshold: float):
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
# 从LPMM知识库获取知识
try:
# 检查LPMM知识库是否启用
if qa_manager is None:
logger.debug("LPMM知识库已禁用跳过知识获取")
return ""
found_knowledge_from_lpmm = qa_manager.get_knowledge(message)
end_time = time.time()

View File

@@ -35,6 +35,11 @@ class KnowledgeFetcher:
logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识")
try:
# 检查LPMM知识库是否启用
if qa_manager is None:
logger.debug(f"[私聊][{self.private_name}]LPMM知识库已禁用跳过知识获取")
return "未找到匹配的知识"
knowledge_info = qa_manager.get_knowledge(query)
logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}")
return knowledge_info

View File

@@ -36,6 +36,11 @@ class SearchKnowledgeFromLPMMTool(BaseTool):
query = function_args.get("query")
# threshold = function_args.get("threshold", 0.4)
# 检查LPMM知识库是否启用
if qa_manager is None:
logger.debug("LPMM知识库已禁用跳过知识获取")
return {"type": "info", "id": query, "content": "LPMM知识库已禁用"}
# 调用知识库搜索
knowledge_info = qa_manager.get_knowledge(query)