Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into dev
This commit is contained in:
@@ -71,6 +71,7 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
"""
|
||||
working_memory = None
|
||||
chat_info = ""
|
||||
chat_obs = None
|
||||
try:
|
||||
for observation in observations:
|
||||
if isinstance(observation, WorkingMemoryObservation):
|
||||
@@ -79,10 +80,15 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
chat_info = observation.get_observe_info()
|
||||
chat_obs = observation
|
||||
# 检查是否有待压缩内容
|
||||
if chat_obs.compressor_prompt:
|
||||
if chat_obs and chat_obs.compressor_prompt:
|
||||
logger.debug(f"{self.log_prefix} 压缩聊天记忆")
|
||||
await self.compress_chat_memory(working_memory, chat_obs)
|
||||
|
||||
# 检查working_memory是否为None
|
||||
if working_memory is None:
|
||||
logger.debug(f"{self.log_prefix} 没有找到工作记忆观察,跳过处理")
|
||||
return []
|
||||
|
||||
all_memory = working_memory.get_all_memories()
|
||||
if not all_memory:
|
||||
logger.debug(f"{self.log_prefix} 目前没有工作记忆,跳过提取")
|
||||
@@ -183,6 +189,11 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
working_memory: 工作记忆对象
|
||||
obs: 聊天观察对象
|
||||
"""
|
||||
# 检查working_memory是否为None
|
||||
if working_memory is None:
|
||||
logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法压缩聊天记忆")
|
||||
return
|
||||
|
||||
try:
|
||||
summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt)
|
||||
if not summary_result:
|
||||
@@ -235,6 +246,11 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
memory_id1: 第一个记忆ID
|
||||
memory_id2: 第二个记忆ID
|
||||
"""
|
||||
# 检查working_memory是否为None
|
||||
if working_memory is None:
|
||||
logger.warning(f"{self.log_prefix} 工作记忆对象为None,无法合并记忆")
|
||||
return
|
||||
|
||||
try:
|
||||
merged_memory = await working_memory.merge_memory(memory_id1, memory_id2)
|
||||
logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}")
|
||||
|
||||
@@ -29,6 +29,7 @@ def init_prompt():
|
||||
{chat_context_description},以下是具体的聊天内容:
|
||||
{chat_content_block}
|
||||
{moderation_prompt}
|
||||
|
||||
现在请你根据聊天内容选择合适的action:
|
||||
|
||||
{action_options_text}
|
||||
|
||||
@@ -5,60 +5,67 @@ from src.chat.knowledge.mem_active_manager import MemoryActiveManager
|
||||
from src.chat.knowledge.qa_manager import QAManager
|
||||
from src.chat.knowledge.kg_manager import KGManager
|
||||
from src.chat.knowledge.global_logger import logger
|
||||
from src.config.config import global_config as bot_global_config
|
||||
# try:
|
||||
# import quick_algo
|
||||
# except ImportError:
|
||||
# print("quick_algo not found, please install it first")
|
||||
|
||||
logger.info("正在初始化Mai-LPMM\n")
|
||||
logger.info("创建LLM客户端")
|
||||
llm_client_list = dict()
|
||||
for key in global_config["llm_providers"]:
|
||||
# 检查LPMM知识库是否启用
|
||||
if bot_global_config.lpmm_knowledge.enable:
|
||||
logger.info("正在初始化Mai-LPMM\n")
|
||||
logger.info("创建LLM客户端")
|
||||
llm_client_list = dict()
|
||||
for key in global_config["llm_providers"]:
|
||||
llm_client_list[key] = LLMClient(
|
||||
global_config["llm_providers"][key]["base_url"],
|
||||
global_config["llm_providers"][key]["api_key"],
|
||||
)
|
||||
|
||||
# 初始化Embedding库
|
||||
embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
|
||||
logger.info("正在从文件加载Embedding库")
|
||||
try:
|
||||
# 初始化Embedding库
|
||||
embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
|
||||
logger.info("正在从文件加载Embedding库")
|
||||
try:
|
||||
embed_manager.load_from_file()
|
||||
except Exception as e:
|
||||
except Exception as e:
|
||||
logger.warning("此消息不会影响正常使用:从文件加载Embedding库时,{}".format(e))
|
||||
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
|
||||
logger.info("Embedding库加载完成")
|
||||
# 初始化KG
|
||||
kg_manager = KGManager()
|
||||
logger.info("正在从文件加载KG")
|
||||
try:
|
||||
logger.info("Embedding库加载完成")
|
||||
# 初始化KG
|
||||
kg_manager = KGManager()
|
||||
logger.info("正在从文件加载KG")
|
||||
try:
|
||||
kg_manager.load_from_file()
|
||||
except Exception as e:
|
||||
except Exception as e:
|
||||
logger.warning("此消息不会影响正常使用:从文件加载KG时,{}".format(e))
|
||||
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
|
||||
logger.info("KG加载完成")
|
||||
logger.info("KG加载完成")
|
||||
|
||||
logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}")
|
||||
logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}")
|
||||
logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}")
|
||||
logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}")
|
||||
|
||||
|
||||
# 数据比对:Embedding库与KG的段落hash集合
|
||||
for pg_hash in kg_manager.stored_paragraph_hashes:
|
||||
# 数据比对:Embedding库与KG的段落hash集合
|
||||
for pg_hash in kg_manager.stored_paragraph_hashes:
|
||||
key = PG_NAMESPACE + "-" + pg_hash
|
||||
if key not in embed_manager.stored_pg_hashes:
|
||||
logger.warning(f"KG中存在Embedding库中不存在的段落:{key}")
|
||||
|
||||
# 问答系统(用于知识库)
|
||||
qa_manager = QAManager(
|
||||
# 问答系统(用于知识库)
|
||||
qa_manager = QAManager(
|
||||
embed_manager,
|
||||
kg_manager,
|
||||
llm_client_list[global_config["embedding"]["provider"]],
|
||||
llm_client_list[global_config["qa"]["llm"]["provider"]],
|
||||
llm_client_list[global_config["qa"]["llm"]["provider"]],
|
||||
)
|
||||
)
|
||||
|
||||
# 记忆激活(用于记忆库)
|
||||
inspire_manager = MemoryActiveManager(
|
||||
# 记忆激活(用于记忆库)
|
||||
inspire_manager = MemoryActiveManager(
|
||||
embed_manager,
|
||||
llm_client_list[global_config["embedding"]["provider"]],
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.info("LPMM知识库已禁用,跳过初始化")
|
||||
# 创建空的占位符对象,避免导入错误
|
||||
qa_manager = None
|
||||
inspire_manager = None
|
||||
|
||||
@@ -108,7 +108,7 @@ class MessageRecv(Message):
|
||||
self.detailed_plain_text = message_dict.get("detailed_plain_text", "")
|
||||
self.is_emoji = False
|
||||
self.is_picid = False
|
||||
self.is_mentioned = 0.0
|
||||
self.is_mentioned = None
|
||||
self.priority_mode = "interest"
|
||||
self.priority_info = None
|
||||
|
||||
@@ -152,14 +152,10 @@ class MessageRecv(Message):
|
||||
elif segment.type == "mention_bot":
|
||||
self.is_mentioned = float(segment.data)
|
||||
return ""
|
||||
elif segment.type == "set_priority_mode":
|
||||
# 处理设置优先级模式的消息段
|
||||
if isinstance(segment.data, str):
|
||||
self.priority_mode = segment.data
|
||||
return ""
|
||||
elif segment.type == "priority_info":
|
||||
if isinstance(segment.data, dict):
|
||||
# 处理优先级信息
|
||||
self.priority_mode = "priority"
|
||||
self.priority_info = segment.data
|
||||
"""
|
||||
{
|
||||
|
||||
@@ -492,7 +492,11 @@ class NormalChat:
|
||||
# 检查是否有用户满足关系构建条件
|
||||
asyncio.create_task(self._check_relation_building_conditions(message))
|
||||
|
||||
await self.reply_one_message(message)
|
||||
do_reply = await self.reply_one_message(message)
|
||||
response_set = do_reply if do_reply else []
|
||||
factor = 0.5
|
||||
cnt = sum([len(r) for r in response_set])
|
||||
await asyncio.sleep(max(1, factor * cnt - 3)) # 等待tts
|
||||
|
||||
# 等待一段时间再检查队列
|
||||
await asyncio.sleep(1)
|
||||
|
||||
@@ -956,6 +956,11 @@ async def get_prompt_info(message: str, threshold: float):
|
||||
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
|
||||
# 从LPMM知识库获取知识
|
||||
try:
|
||||
# 检查LPMM知识库是否启用
|
||||
if qa_manager is None:
|
||||
logger.debug("LPMM知识库已禁用,跳过知识获取")
|
||||
return ""
|
||||
|
||||
found_knowledge_from_lpmm = qa_manager.get_knowledge(message)
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
@@ -47,7 +47,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
reply_probability = 0.0
|
||||
is_at = False
|
||||
is_mentioned = False
|
||||
|
||||
if message.is_mentioned is not None:
|
||||
return bool(message.is_mentioned), message.is_mentioned
|
||||
if (
|
||||
message.message_info.additional_config is not None
|
||||
and message.message_info.additional_config.get("is_mentioned") is not None
|
||||
|
||||
@@ -35,6 +35,11 @@ class KnowledgeFetcher:
|
||||
|
||||
logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识")
|
||||
try:
|
||||
# 检查LPMM知识库是否启用
|
||||
if qa_manager is None:
|
||||
logger.debug(f"[私聊][{self.private_name}]LPMM知识库已禁用,跳过知识获取")
|
||||
return "未找到匹配的知识"
|
||||
|
||||
knowledge_info = qa_manager.get_knowledge(query)
|
||||
logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}")
|
||||
return knowledge_info
|
||||
|
||||
@@ -36,6 +36,11 @@ class SearchKnowledgeFromLPMMTool(BaseTool):
|
||||
query = function_args.get("query")
|
||||
# threshold = function_args.get("threshold", 0.4)
|
||||
|
||||
# 检查LPMM知识库是否启用
|
||||
if qa_manager is None:
|
||||
logger.debug("LPMM知识库已禁用,跳过知识获取")
|
||||
return {"type": "info", "id": query, "content": "LPMM知识库已禁用"}
|
||||
|
||||
# 调用知识库搜索
|
||||
|
||||
knowledge_info = qa_manager.get_knowledge(query)
|
||||
|
||||
Reference in New Issue
Block a user