This commit is contained in:
SengokuCola
2025-07-06 11:47:09 +08:00
9 changed files with 99 additions and 59 deletions

View File

@@ -71,6 +71,7 @@ class WorkingMemoryProcessor(BaseProcessor):
""" """
working_memory = None working_memory = None
chat_info = "" chat_info = ""
chat_obs = None
try: try:
for observation in observations: for observation in observations:
if isinstance(observation, WorkingMemoryObservation): if isinstance(observation, WorkingMemoryObservation):
@@ -79,10 +80,15 @@ class WorkingMemoryProcessor(BaseProcessor):
chat_info = observation.get_observe_info() chat_info = observation.get_observe_info()
chat_obs = observation chat_obs = observation
# 检查是否有待压缩内容 # 检查是否有待压缩内容
if chat_obs.compressor_prompt: if chat_obs and chat_obs.compressor_prompt:
logger.debug(f"{self.log_prefix} 压缩聊天记忆") logger.debug(f"{self.log_prefix} 压缩聊天记忆")
await self.compress_chat_memory(working_memory, chat_obs) await self.compress_chat_memory(working_memory, chat_obs)
# 检查working_memory是否为None
if working_memory is None:
logger.debug(f"{self.log_prefix} 没有找到工作记忆观察,跳过处理")
return []
all_memory = working_memory.get_all_memories() all_memory = working_memory.get_all_memories()
if not all_memory: if not all_memory:
logger.debug(f"{self.log_prefix} 目前没有工作记忆,跳过提取") logger.debug(f"{self.log_prefix} 目前没有工作记忆,跳过提取")
@@ -183,6 +189,11 @@ class WorkingMemoryProcessor(BaseProcessor):
working_memory: 工作记忆对象 working_memory: 工作记忆对象
obs: 聊天观察对象 obs: 聊天观察对象
""" """
# 检查working_memory是否为None
if working_memory is None:
logger.warning(f"{self.log_prefix} 工作记忆对象为None无法压缩聊天记忆")
return
try: try:
summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt) summary_result, _ = await self.llm_model.generate_response_async(obs.compressor_prompt)
if not summary_result: if not summary_result:
@@ -235,6 +246,11 @@ class WorkingMemoryProcessor(BaseProcessor):
memory_id1: 第一个记忆ID memory_id1: 第一个记忆ID
memory_id2: 第二个记忆ID memory_id2: 第二个记忆ID
""" """
# 检查working_memory是否为None
if working_memory is None:
logger.warning(f"{self.log_prefix} 工作记忆对象为None无法合并记忆")
return
try: try:
merged_memory = await working_memory.merge_memory(memory_id1, memory_id2) merged_memory = await working_memory.merge_memory(memory_id1, memory_id2)
logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}") logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.brief}")

View File

@@ -29,6 +29,7 @@ def init_prompt():
{chat_context_description},以下是具体的聊天内容: {chat_context_description},以下是具体的聊天内容:
{chat_content_block} {chat_content_block}
{moderation_prompt} {moderation_prompt}
现在请你根据聊天内容选择合适的action: 现在请你根据聊天内容选择合适的action:
{action_options_text} {action_options_text}

View File

@@ -5,11 +5,14 @@ from src.chat.knowledge.mem_active_manager import MemoryActiveManager
from src.chat.knowledge.qa_manager import QAManager from src.chat.knowledge.qa_manager import QAManager
from src.chat.knowledge.kg_manager import KGManager from src.chat.knowledge.kg_manager import KGManager
from src.chat.knowledge.global_logger import logger from src.chat.knowledge.global_logger import logger
from src.config.config import global_config as bot_global_config
# try: # try:
# import quick_algo # import quick_algo
# except ImportError: # except ImportError:
# print("quick_algo not found, please install it first") # print("quick_algo not found, please install it first")
# 检查LPMM知识库是否启用
if bot_global_config.lpmm_knowledge.enable:
logger.info("正在初始化Mai-LPMM\n") logger.info("正在初始化Mai-LPMM\n")
logger.info("创建LLM客户端") logger.info("创建LLM客户端")
llm_client_list = dict() llm_client_list = dict()
@@ -41,7 +44,6 @@ logger.info("KG加载完成")
logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}") logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}")
logger.info(f"KG边数量{len(kg_manager.graph.get_edge_list())}") logger.info(f"KG边数量{len(kg_manager.graph.get_edge_list())}")
# 数据比对Embedding库与KG的段落hash集合 # 数据比对Embedding库与KG的段落hash集合
for pg_hash in kg_manager.stored_paragraph_hashes: for pg_hash in kg_manager.stored_paragraph_hashes:
key = PG_NAMESPACE + "-" + pg_hash key = PG_NAMESPACE + "-" + pg_hash
@@ -62,3 +64,8 @@ inspire_manager = MemoryActiveManager(
embed_manager, embed_manager,
llm_client_list[global_config["embedding"]["provider"]], llm_client_list[global_config["embedding"]["provider"]],
) )
else:
logger.info("LPMM知识库已禁用跳过初始化")
# 创建空的占位符对象,避免导入错误
qa_manager = None
inspire_manager = None

View File

@@ -108,7 +108,7 @@ class MessageRecv(Message):
self.detailed_plain_text = message_dict.get("detailed_plain_text", "") self.detailed_plain_text = message_dict.get("detailed_plain_text", "")
self.is_emoji = False self.is_emoji = False
self.is_picid = False self.is_picid = False
self.is_mentioned = 0.0 self.is_mentioned = None
self.priority_mode = "interest" self.priority_mode = "interest"
self.priority_info = None self.priority_info = None
@@ -152,14 +152,10 @@ class MessageRecv(Message):
elif segment.type == "mention_bot": elif segment.type == "mention_bot":
self.is_mentioned = float(segment.data) self.is_mentioned = float(segment.data)
return "" return ""
elif segment.type == "set_priority_mode":
# 处理设置优先级模式的消息段
if isinstance(segment.data, str):
self.priority_mode = segment.data
return ""
elif segment.type == "priority_info": elif segment.type == "priority_info":
if isinstance(segment.data, dict): if isinstance(segment.data, dict):
# 处理优先级信息 # 处理优先级信息
self.priority_mode = "priority"
self.priority_info = segment.data self.priority_info = segment.data
""" """
{ {

View File

@@ -492,7 +492,11 @@ class NormalChat:
# 检查是否有用户满足关系构建条件 # 检查是否有用户满足关系构建条件
asyncio.create_task(self._check_relation_building_conditions(message)) asyncio.create_task(self._check_relation_building_conditions(message))
await self.reply_one_message(message) do_reply = await self.reply_one_message(message)
response_set = do_reply if do_reply else []
factor = 0.5
cnt = sum([len(r) for r in response_set])
await asyncio.sleep(max(1, factor * cnt - 3)) # 等待tts
# 等待一段时间再检查队列 # 等待一段时间再检查队列
await asyncio.sleep(1) await asyncio.sleep(1)

View File

@@ -956,6 +956,11 @@ async def get_prompt_info(message: str, threshold: float):
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
# 从LPMM知识库获取知识 # 从LPMM知识库获取知识
try: try:
# 检查LPMM知识库是否启用
if qa_manager is None:
logger.debug("LPMM知识库已禁用跳过知识获取")
return ""
found_knowledge_from_lpmm = qa_manager.get_knowledge(message) found_knowledge_from_lpmm = qa_manager.get_knowledge(message)
end_time = time.time() end_time = time.time()

View File

@@ -47,7 +47,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
reply_probability = 0.0 reply_probability = 0.0
is_at = False is_at = False
is_mentioned = False is_mentioned = False
if message.is_mentioned is not None:
return bool(message.is_mentioned), message.is_mentioned
if ( if (
message.message_info.additional_config is not None message.message_info.additional_config is not None
and message.message_info.additional_config.get("is_mentioned") is not None and message.message_info.additional_config.get("is_mentioned") is not None

View File

@@ -35,6 +35,11 @@ class KnowledgeFetcher:
logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识") logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识")
try: try:
# 检查LPMM知识库是否启用
if qa_manager is None:
logger.debug(f"[私聊][{self.private_name}]LPMM知识库已禁用跳过知识获取")
return "未找到匹配的知识"
knowledge_info = qa_manager.get_knowledge(query) knowledge_info = qa_manager.get_knowledge(query)
logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}") logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}")
return knowledge_info return knowledge_info

View File

@@ -36,6 +36,11 @@ class SearchKnowledgeFromLPMMTool(BaseTool):
query = function_args.get("query") query = function_args.get("query")
# threshold = function_args.get("threshold", 0.4) # threshold = function_args.get("threshold", 0.4)
# 检查LPMM知识库是否启用
if qa_manager is None:
logger.debug("LPMM知识库已禁用跳过知识获取")
return {"type": "info", "id": query, "content": "LPMM知识库已禁用"}
# 调用知识库搜索 # 调用知识库搜索
knowledge_info = qa_manager.get_knowledge(query) knowledge_info = qa_manager.get_knowledge(query)