Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into dev
This commit is contained in:
18
bot.py
18
bot.py
@@ -7,12 +7,16 @@ from pathlib import Path
|
||||
import time
|
||||
import platform
|
||||
from dotenv import load_dotenv
|
||||
from src.common.logger import get_module_logger
|
||||
from src.common.logger import get_module_logger, LogConfig, CONFIRM_STYLE_CONFIG
|
||||
from src.common.crash_logger import install_crash_handler
|
||||
from src.main import MainSystem
|
||||
|
||||
logger = get_module_logger("main_bot")
|
||||
|
||||
confirm_logger_config = LogConfig(
|
||||
console_format=CONFIRM_STYLE_CONFIG["console_format"],
|
||||
file_format=CONFIRM_STYLE_CONFIG["file_format"],
|
||||
)
|
||||
confirm_logger = get_module_logger("main_bot", config=confirm_logger_config)
|
||||
# 获取没有加载env时的环境变量
|
||||
env_mask = {key: os.getenv(key) for key in os.environ}
|
||||
|
||||
@@ -166,8 +170,8 @@ def check_eula():
|
||||
|
||||
# 如果EULA或隐私条款有更新,提示用户重新确认
|
||||
if eula_updated or privacy_updated:
|
||||
print("EULA或隐私条款内容已更新,请在阅读后重新确认,继续运行视为同意更新后的以上两款协议")
|
||||
print(
|
||||
confirm_logger.critical("EULA或隐私条款内容已更新,请在阅读后重新确认,继续运行视为同意更新后的以上两款协议")
|
||||
confirm_logger.critical(
|
||||
f'输入"同意"或"confirmed"或设置环境变量"EULA_AGREE={eula_new_hash}"和"PRIVACY_AGREE={privacy_new_hash}"继续运行'
|
||||
)
|
||||
while True:
|
||||
@@ -176,14 +180,14 @@ def check_eula():
|
||||
# print("确认成功,继续运行")
|
||||
# print(f"确认成功,继续运行{eula_updated} {privacy_updated}")
|
||||
if eula_updated:
|
||||
print(f"更新EULA确认文件{eula_new_hash}")
|
||||
logger.info(f"更新EULA确认文件{eula_new_hash}")
|
||||
eula_confirm_file.write_text(eula_new_hash, encoding="utf-8")
|
||||
if privacy_updated:
|
||||
print(f"更新隐私条款确认文件{privacy_new_hash}")
|
||||
logger.info(f"更新隐私条款确认文件{privacy_new_hash}")
|
||||
privacy_confirm_file.write_text(privacy_new_hash, encoding="utf-8")
|
||||
break
|
||||
else:
|
||||
print('请输入"同意"或"confirmed"以继续运行')
|
||||
confirm_logger.critical('请输入"同意"或"confirmed"以继续运行')
|
||||
return
|
||||
elif eula_confirmed and privacy_confirmed:
|
||||
return
|
||||
|
||||
@@ -290,6 +290,12 @@ WILLING_STYLE_CONFIG = {
|
||||
},
|
||||
}
|
||||
|
||||
CONFIRM_STYLE_CONFIG = {
|
||||
"console_format": (
|
||||
"<RED>{message}</RED>"
|
||||
), # noqa: E501
|
||||
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | EULA与PRIVACY确认 | {message}"),
|
||||
}
|
||||
|
||||
# 根据SIMPLE_OUTPUT选择配置
|
||||
MEMORY_STYLE_CONFIG = MEMORY_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MEMORY_STYLE_CONFIG["advanced"]
|
||||
|
||||
@@ -9,6 +9,7 @@ from src.common.logger import get_module_logger, LogConfig, HEARTFLOW_STYLE_CONF
|
||||
from src.individuality.individuality import Individuality
|
||||
import time
|
||||
import random
|
||||
from typing import Dict, Any
|
||||
|
||||
heartflow_config = LogConfig(
|
||||
# 使用海马体专用样式
|
||||
@@ -39,7 +40,7 @@ class Heartflow:
|
||||
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
|
||||
)
|
||||
|
||||
self._subheartflows = {}
|
||||
self._subheartflows: Dict[Any, SubHeartflow] = {}
|
||||
self.active_subheartflows_nums = 0
|
||||
|
||||
async def _cleanup_inactive_subheartflows(self):
|
||||
|
||||
@@ -142,7 +142,11 @@ class AutoSpeakManager:
|
||||
message_manager.add_message(thinking_message)
|
||||
|
||||
# 生成自主发言内容
|
||||
try:
|
||||
response, raw_content = await self.gpt.generate_response(message)
|
||||
except Exception as e:
|
||||
logger.error(f"生成自主发言内容时发生错误: {e}")
|
||||
return False
|
||||
|
||||
if response:
|
||||
message_set = MessageSet(None, think_id) # 不需要chat_stream
|
||||
|
||||
@@ -59,11 +59,7 @@ class ReasoningChat:
|
||||
|
||||
return thinking_id
|
||||
|
||||
async def _send_response_messages(self,
|
||||
message,
|
||||
chat,
|
||||
response_set:List[str],
|
||||
thinking_id) -> MessageSending:
|
||||
async def _send_response_messages(self, message, chat, response_set: List[str], thinking_id) -> MessageSending:
|
||||
"""发送回复消息"""
|
||||
container = message_manager.get_container(chat.stream_id)
|
||||
thinking_message = None
|
||||
@@ -248,11 +244,15 @@ class ReasoningChat:
|
||||
|
||||
# 生成回复
|
||||
timer1 = time.time()
|
||||
try:
|
||||
response_set = await self.gpt.generate_response(message, thinking_id)
|
||||
timer2 = time.time()
|
||||
timing_results["生成回复"] = timer2 - timer1
|
||||
|
||||
info_catcher.catch_after_generate_response(timing_results["生成回复"])
|
||||
except Exception as e:
|
||||
logger.error(f"回复生成出现错误:str{e}")
|
||||
response_set = None
|
||||
|
||||
if not response_set:
|
||||
logger.info("为什么生成回复失败?")
|
||||
@@ -266,7 +266,6 @@ class ReasoningChat:
|
||||
|
||||
info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
|
||||
|
||||
|
||||
info_catcher.done_catch()
|
||||
|
||||
# 处理表情包
|
||||
|
||||
@@ -934,9 +934,437 @@ class EntorhinalCortex:
|
||||
logger.success(f"[数据库] 同步了 {len(memory_nodes)} 个节点和 {len(memory_edges)} 条边")
|
||||
|
||||
|
||||
# 海马体
|
||||
class Hippocampus:
|
||||
def __init__(self):
|
||||
self.memory_graph = Memory_graph()
|
||||
self.llm_topic_judge = None
|
||||
self.llm_summary_by_topic = None
|
||||
self.entorhinal_cortex = None
|
||||
self.parahippocampal_gyrus = None
|
||||
self.config = None
|
||||
|
||||
def initialize(self, global_config):
|
||||
self.config = MemoryConfig.from_global_config(global_config)
|
||||
# 初始化子组件
|
||||
self.entorhinal_cortex = EntorhinalCortex(self)
|
||||
self.parahippocampal_gyrus = ParahippocampalGyrus(self)
|
||||
# 从数据库加载记忆图
|
||||
self.entorhinal_cortex.sync_memory_from_db()
|
||||
self.llm_topic_judge = LLM_request(self.config.llm_topic_judge, request_type="memory")
|
||||
self.llm_summary_by_topic = LLM_request(self.config.llm_summary_by_topic, request_type="memory")
|
||||
|
||||
def get_all_node_names(self) -> list:
|
||||
"""获取记忆图中所有节点的名字列表"""
|
||||
return list(self.memory_graph.G.nodes())
|
||||
|
||||
def calculate_node_hash(self, concept, memory_items) -> int:
|
||||
"""计算节点的特征值"""
|
||||
if not isinstance(memory_items, list):
|
||||
memory_items = [memory_items] if memory_items else []
|
||||
sorted_items = sorted(memory_items)
|
||||
content = f"{concept}:{'|'.join(sorted_items)}"
|
||||
return hash(content)
|
||||
|
||||
def calculate_edge_hash(self, source, target) -> int:
|
||||
"""计算边的特征值"""
|
||||
nodes = sorted([source, target])
|
||||
return hash(f"{nodes[0]}:{nodes[1]}")
|
||||
|
||||
def find_topic_llm(self, text, topic_num):
|
||||
prompt = (
|
||||
f"这是一段文字:{text}。请你从这段话中总结出最多{topic_num}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,"
|
||||
f"将主题用逗号隔开,并加上<>,例如<主题1>,<主题2>......尽可能精简。只需要列举最多{topic_num}个话题就好,不要有序号,不要告诉我其他内容。"
|
||||
f"如果确定找不出主题或者没有明显主题,返回<none>。"
|
||||
)
|
||||
return prompt
|
||||
|
||||
def topic_what(self, text, topic, time_info):
|
||||
prompt = (
|
||||
f'这是一段文字,{time_info}:{text}。我想让你基于这段文字来概括"{topic}"这个概念,帮我总结成一句自然的话,'
|
||||
f"可以包含时间和人物,以及具体的观点。只输出这句话就好"
|
||||
)
|
||||
return prompt
|
||||
|
||||
def calculate_topic_num(self, text, compress_rate):
|
||||
"""计算文本的话题数量"""
|
||||
information_content = calculate_information_content(text)
|
||||
topic_by_length = text.count("\n") * compress_rate
|
||||
topic_by_information_content = max(1, min(5, int((information_content - 3) * 2)))
|
||||
topic_num = int((topic_by_length + topic_by_information_content) / 2)
|
||||
logger.debug(
|
||||
f"topic_by_length: {topic_by_length}, topic_by_information_content: {topic_by_information_content}, "
|
||||
f"topic_num: {topic_num}"
|
||||
)
|
||||
return topic_num
|
||||
|
||||
def get_memory_from_keyword(self, keyword: str, max_depth: int = 2) -> list:
|
||||
"""从关键词获取相关记忆。
|
||||
|
||||
Args:
|
||||
keyword (str): 关键词
|
||||
max_depth (int, optional): 记忆检索深度,默认为2。1表示只获取直接相关的记忆,2表示获取间接相关的记忆。
|
||||
|
||||
Returns:
|
||||
list: 记忆列表,每个元素是一个元组 (topic, memory_items, similarity)
|
||||
- topic: str, 记忆主题
|
||||
- memory_items: list, 该主题下的记忆项列表
|
||||
- similarity: float, 与关键词的相似度
|
||||
"""
|
||||
if not keyword:
|
||||
return []
|
||||
|
||||
# 获取所有节点
|
||||
all_nodes = list(self.memory_graph.G.nodes())
|
||||
memories = []
|
||||
|
||||
# 计算关键词的词集合
|
||||
keyword_words = set(jieba.cut(keyword))
|
||||
|
||||
# 遍历所有节点,计算相似度
|
||||
for node in all_nodes:
|
||||
node_words = set(jieba.cut(node))
|
||||
all_words = keyword_words | node_words
|
||||
v1 = [1 if word in keyword_words else 0 for word in all_words]
|
||||
v2 = [1 if word in node_words else 0 for word in all_words]
|
||||
similarity = cosine_similarity(v1, v2)
|
||||
|
||||
# 如果相似度超过阈值,获取该节点的记忆
|
||||
if similarity >= 0.3: # 可以调整这个阈值
|
||||
node_data = self.memory_graph.G.nodes[node]
|
||||
memory_items = node_data.get("memory_items", [])
|
||||
if not isinstance(memory_items, list):
|
||||
memory_items = [memory_items] if memory_items else []
|
||||
|
||||
memories.append((node, memory_items, similarity))
|
||||
|
||||
# 按相似度降序排序
|
||||
memories.sort(key=lambda x: x[2], reverse=True)
|
||||
return memories
|
||||
|
||||
async def get_memory_from_text(
|
||||
self,
|
||||
text: str,
|
||||
max_memory_num: int = 3,
|
||||
max_memory_length: int = 2,
|
||||
max_depth: int = 3,
|
||||
fast_retrieval: bool = False,
|
||||
) -> list:
|
||||
"""从文本中提取关键词并获取相关记忆。
|
||||
|
||||
Args:
|
||||
text (str): 输入文本
|
||||
num (int, optional): 需要返回的记忆数量。默认为5。
|
||||
max_depth (int, optional): 记忆检索深度。默认为2。
|
||||
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
|
||||
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
|
||||
如果为False,使用LLM提取关键词,速度较慢但更准确。
|
||||
|
||||
Returns:
|
||||
list: 记忆列表,每个元素是一个元组 (topic, memory_items, similarity)
|
||||
- topic: str, 记忆主题
|
||||
- memory_items: list, 该主题下的记忆项列表
|
||||
- similarity: float, 与文本的相似度
|
||||
"""
|
||||
if not text:
|
||||
return []
|
||||
|
||||
if fast_retrieval:
|
||||
# 使用jieba分词提取关键词
|
||||
words = jieba.cut(text)
|
||||
# 过滤掉停用词和单字词
|
||||
keywords = [word for word in words if len(word) > 1]
|
||||
# 去重
|
||||
keywords = list(set(keywords))
|
||||
# 限制关键词数量
|
||||
keywords = keywords[:5]
|
||||
else:
|
||||
# 使用LLM提取关键词
|
||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||
# logger.info(f"提取关键词数量: {topic_num}")
|
||||
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
|
||||
|
||||
# 提取关键词
|
||||
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
||||
if not keywords:
|
||||
keywords = []
|
||||
else:
|
||||
keywords = [
|
||||
keyword.strip()
|
||||
for keyword in ",".join(keywords).replace(",", ",").replace("、", ",").replace(" ", ",").split(",")
|
||||
if keyword.strip()
|
||||
]
|
||||
|
||||
# logger.info(f"提取的关键词: {', '.join(keywords)}")
|
||||
|
||||
# 过滤掉不存在于记忆图中的关键词
|
||||
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
|
||||
if not valid_keywords:
|
||||
logger.info("没有找到有效的关键词节点")
|
||||
return []
|
||||
|
||||
logger.info(f"有效的关键词: {', '.join(valid_keywords)}")
|
||||
|
||||
# 从每个关键词获取记忆
|
||||
all_memories = []
|
||||
activate_map = {} # 存储每个词的累计激活值
|
||||
|
||||
# 对每个关键词进行扩散式检索
|
||||
for keyword in valid_keywords:
|
||||
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
|
||||
# 初始化激活值
|
||||
activation_values = {keyword: 1.0}
|
||||
# 记录已访问的节点
|
||||
visited_nodes = {keyword}
|
||||
# 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
|
||||
nodes_to_process = [(keyword, 1.0, 0)]
|
||||
|
||||
while nodes_to_process:
|
||||
current_node, current_activation, current_depth = nodes_to_process.pop(0)
|
||||
|
||||
# 如果激活值小于0或超过最大深度,停止扩散
|
||||
if current_activation <= 0 or current_depth >= max_depth:
|
||||
continue
|
||||
|
||||
# 获取当前节点的所有邻居
|
||||
neighbors = list(self.memory_graph.G.neighbors(current_node))
|
||||
|
||||
for neighbor in neighbors:
|
||||
if neighbor in visited_nodes:
|
||||
continue
|
||||
|
||||
# 获取连接强度
|
||||
edge_data = self.memory_graph.G[current_node][neighbor]
|
||||
strength = edge_data.get("strength", 1)
|
||||
|
||||
# 计算新的激活值
|
||||
new_activation = current_activation - (1 / strength)
|
||||
|
||||
if new_activation > 0:
|
||||
activation_values[neighbor] = new_activation
|
||||
visited_nodes.add(neighbor)
|
||||
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
|
||||
logger.debug(
|
||||
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
|
||||
) # noqa: E501
|
||||
|
||||
# 更新激活映射
|
||||
for node, activation_value in activation_values.items():
|
||||
if activation_value > 0:
|
||||
if node in activate_map:
|
||||
activate_map[node] += activation_value
|
||||
else:
|
||||
activate_map[node] = activation_value
|
||||
|
||||
# 输出激活映射
|
||||
# logger.info("激活映射统计:")
|
||||
# for node, total_activation in sorted(activate_map.items(), key=lambda x: x[1], reverse=True):
|
||||
# logger.info(f"节点 '{node}': 累计激活值 = {total_activation:.2f}")
|
||||
|
||||
# 基于激活值平方的独立概率选择
|
||||
remember_map = {}
|
||||
# logger.info("基于激活值平方的归一化选择:")
|
||||
|
||||
# 计算所有激活值的平方和
|
||||
total_squared_activation = sum(activation**2 for activation in activate_map.values())
|
||||
if total_squared_activation > 0:
|
||||
# 计算归一化的激活值
|
||||
normalized_activations = {
|
||||
node: (activation**2) / total_squared_activation for node, activation in activate_map.items()
|
||||
}
|
||||
|
||||
# 按归一化激活值排序并选择前max_memory_num个
|
||||
sorted_nodes = sorted(normalized_activations.items(), key=lambda x: x[1], reverse=True)[:max_memory_num]
|
||||
|
||||
# 将选中的节点添加到remember_map
|
||||
for node, normalized_activation in sorted_nodes:
|
||||
remember_map[node] = activate_map[node] # 使用原始激活值
|
||||
logger.debug(
|
||||
f"节点 '{node}' (归一化激活值: {normalized_activation:.2f}, 激活值: {activate_map[node]:.2f})"
|
||||
)
|
||||
else:
|
||||
logger.info("没有有效的激活值")
|
||||
|
||||
# 从选中的节点中提取记忆
|
||||
all_memories = []
|
||||
# logger.info("开始从选中的节点中提取记忆:")
|
||||
for node, activation in remember_map.items():
|
||||
logger.debug(f"处理节点 '{node}' (激活值: {activation:.2f}):")
|
||||
node_data = self.memory_graph.G.nodes[node]
|
||||
memory_items = node_data.get("memory_items", [])
|
||||
if not isinstance(memory_items, list):
|
||||
memory_items = [memory_items] if memory_items else []
|
||||
|
||||
if memory_items:
|
||||
logger.debug(f"节点包含 {len(memory_items)} 条记忆")
|
||||
# 计算每条记忆与输入文本的相似度
|
||||
memory_similarities = []
|
||||
for memory in memory_items:
|
||||
# 计算与输入文本的相似度
|
||||
memory_words = set(jieba.cut(memory))
|
||||
text_words = set(jieba.cut(text))
|
||||
all_words = memory_words | text_words
|
||||
v1 = [1 if word in memory_words else 0 for word in all_words]
|
||||
v2 = [1 if word in text_words else 0 for word in all_words]
|
||||
similarity = cosine_similarity(v1, v2)
|
||||
memory_similarities.append((memory, similarity))
|
||||
|
||||
# 按相似度排序
|
||||
memory_similarities.sort(key=lambda x: x[1], reverse=True)
|
||||
# 获取最匹配的记忆
|
||||
top_memories = memory_similarities[:max_memory_length]
|
||||
|
||||
# 添加到结果中
|
||||
for memory, similarity in top_memories:
|
||||
all_memories.append((node, [memory], similarity))
|
||||
# logger.info(f"选中记忆: {memory} (相似度: {similarity:.2f})")
|
||||
else:
|
||||
logger.info("节点没有记忆")
|
||||
|
||||
# 去重(基于记忆内容)
|
||||
logger.debug("开始记忆去重:")
|
||||
seen_memories = set()
|
||||
unique_memories = []
|
||||
for topic, memory_items, activation_value in all_memories:
|
||||
memory = memory_items[0] # 因为每个topic只有一条记忆
|
||||
if memory not in seen_memories:
|
||||
seen_memories.add(memory)
|
||||
unique_memories.append((topic, memory_items, activation_value))
|
||||
logger.debug(f"保留记忆: {memory} (来自节点: {topic}, 激活值: {activation_value:.2f})")
|
||||
else:
|
||||
logger.debug(f"跳过重复记忆: {memory} (来自节点: {topic})")
|
||||
|
||||
# 转换为(关键词, 记忆)格式
|
||||
result = []
|
||||
for topic, memory_items, _ in unique_memories:
|
||||
memory = memory_items[0] # 因为每个topic只有一条记忆
|
||||
result.append((topic, memory))
|
||||
logger.info(f"选中记忆: {memory} (来自节点: {topic})")
|
||||
|
||||
return result
|
||||
|
||||
async def get_activate_from_text(self, text: str, max_depth: int = 3, fast_retrieval: bool = False) -> float:
|
||||
"""从文本中提取关键词并获取相关记忆。
|
||||
|
||||
Args:
|
||||
text (str): 输入文本
|
||||
num (int, optional): 需要返回的记忆数量。默认为5。
|
||||
max_depth (int, optional): 记忆检索深度。默认为2。
|
||||
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
|
||||
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
|
||||
如果为False,使用LLM提取关键词,速度较慢但更准确。
|
||||
|
||||
Returns:
|
||||
float: 激活节点数与总节点数的比值
|
||||
"""
|
||||
if not text:
|
||||
return 0
|
||||
|
||||
if fast_retrieval:
|
||||
# 使用jieba分词提取关键词
|
||||
words = jieba.cut(text)
|
||||
# 过滤掉停用词和单字词
|
||||
keywords = [word for word in words if len(word) > 1]
|
||||
# 去重
|
||||
keywords = list(set(keywords))
|
||||
# 限制关键词数量
|
||||
keywords = keywords[:5]
|
||||
else:
|
||||
# 使用LLM提取关键词
|
||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||
# logger.info(f"提取关键词数量: {topic_num}")
|
||||
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
|
||||
|
||||
# 提取关键词
|
||||
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
||||
if not keywords:
|
||||
keywords = []
|
||||
else:
|
||||
keywords = [
|
||||
keyword.strip()
|
||||
for keyword in ",".join(keywords).replace(",", ",").replace("、", ",").replace(" ", ",").split(",")
|
||||
if keyword.strip()
|
||||
]
|
||||
|
||||
# logger.info(f"提取的关键词: {', '.join(keywords)}")
|
||||
|
||||
# 过滤掉不存在于记忆图中的关键词
|
||||
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
|
||||
if not valid_keywords:
|
||||
logger.info("没有找到有效的关键词节点")
|
||||
return 0
|
||||
|
||||
logger.info(f"有效的关键词: {', '.join(valid_keywords)}")
|
||||
|
||||
# 从每个关键词获取记忆
|
||||
activate_map = {} # 存储每个词的累计激活值
|
||||
|
||||
# 对每个关键词进行扩散式检索
|
||||
for keyword in valid_keywords:
|
||||
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
|
||||
# 初始化激活值
|
||||
activation_values = {keyword: 1.0}
|
||||
# 记录已访问的节点
|
||||
visited_nodes = {keyword}
|
||||
# 待处理的节点队列,每个元素是(节点, 激活值, 当前深度)
|
||||
nodes_to_process = [(keyword, 1.0, 0)]
|
||||
|
||||
while nodes_to_process:
|
||||
current_node, current_activation, current_depth = nodes_to_process.pop(0)
|
||||
|
||||
# 如果激活值小于0或超过最大深度,停止扩散
|
||||
if current_activation <= 0 or current_depth >= max_depth:
|
||||
continue
|
||||
|
||||
# 获取当前节点的所有邻居
|
||||
neighbors = list(self.memory_graph.G.neighbors(current_node))
|
||||
|
||||
for neighbor in neighbors:
|
||||
if neighbor in visited_nodes:
|
||||
continue
|
||||
|
||||
# 获取连接强度
|
||||
edge_data = self.memory_graph.G[current_node][neighbor]
|
||||
strength = edge_data.get("strength", 1)
|
||||
|
||||
# 计算新的激活值
|
||||
new_activation = current_activation - (1 / strength)
|
||||
|
||||
if new_activation > 0:
|
||||
activation_values[neighbor] = new_activation
|
||||
visited_nodes.add(neighbor)
|
||||
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
|
||||
# logger.debug(
|
||||
# f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})") # noqa: E501
|
||||
|
||||
# 更新激活映射
|
||||
for node, activation_value in activation_values.items():
|
||||
if activation_value > 0:
|
||||
if node in activate_map:
|
||||
activate_map[node] += activation_value
|
||||
else:
|
||||
activate_map[node] = activation_value
|
||||
|
||||
# 输出激活映射
|
||||
# logger.info("激活映射统计:")
|
||||
# for node, total_activation in sorted(activate_map.items(), key=lambda x: x[1], reverse=True):
|
||||
# logger.info(f"节点 '{node}': 累计激活值 = {total_activation:.2f}")
|
||||
|
||||
# 计算激活节点数与总节点数的比值
|
||||
total_activation = sum(activate_map.values())
|
||||
logger.info(f"总激活值: {total_activation:.2f}")
|
||||
total_nodes = len(self.memory_graph.G.nodes())
|
||||
# activated_nodes = len(activate_map)
|
||||
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
|
||||
activation_ratio = activation_ratio * 60
|
||||
logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
|
||||
|
||||
return activation_ratio
|
||||
|
||||
|
||||
# 负责整合,遗忘,合并记忆
|
||||
class ParahippocampalGyrus:
|
||||
def __init__(self, hippocampus):
|
||||
def __init__(self, hippocampus: Hippocampus):
|
||||
self.hippocampus = hippocampus
|
||||
self.memory_graph = hippocampus.memory_graph
|
||||
self.config = hippocampus.config
|
||||
@@ -1025,8 +1453,12 @@ class ParahippocampalGyrus:
|
||||
tasks = []
|
||||
for topic in filtered_topics:
|
||||
topic_what_prompt = self.hippocampus.topic_what(input_text, topic, time_info)
|
||||
try:
|
||||
task = self.hippocampus.llm_summary_by_topic.generate_response_async(topic_what_prompt)
|
||||
tasks.append((topic.strip(), task))
|
||||
except Exception as e:
|
||||
logger.error(f"生成话题 '{topic}' 的摘要时发生错误: {e}")
|
||||
continue
|
||||
|
||||
# 等待所有任务完成
|
||||
compressed_memory = set()
|
||||
@@ -1075,7 +1507,11 @@ class ParahippocampalGyrus:
|
||||
logger.debug(f"进度: [{bar}] {progress:.1f}% ({i}/{len(memory_samples)})")
|
||||
|
||||
compress_rate = self.config.memory_compress_rate
|
||||
try:
|
||||
compressed_memory, similar_topics_dict = await self.memory_compress(messages, compress_rate)
|
||||
except Exception as e:
|
||||
logger.error(f"压缩记忆时发生错误: {e}")
|
||||
continue
|
||||
logger.debug(f"压缩后记忆数量: {compressed_memory},似曾相识的话题: {similar_topics_dict}")
|
||||
|
||||
current_time = datetime.datetime.now().timestamp()
|
||||
@@ -1247,7 +1683,6 @@ class ParahippocampalGyrus:
|
||||
logger.info(f"[遗忘] 总耗时: {end_time - start_time:.2f}秒")
|
||||
|
||||
|
||||
|
||||
class HippocampusManager:
|
||||
_instance = None
|
||||
_hippocampus = None
|
||||
@@ -1318,13 +1753,14 @@ class HippocampusManager:
|
||||
if not self._initialized:
|
||||
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
|
||||
try:
|
||||
response = await self._hippocampus.get_memory_from_text(text, max_memory_num, max_memory_length, max_depth, fast_retrieval)
|
||||
response = await self._hippocampus.get_memory_from_text(
|
||||
text, max_memory_num, max_memory_length, max_depth, fast_retrieval
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"文本激活记忆失败: {e}")
|
||||
response = []
|
||||
return response
|
||||
|
||||
|
||||
async def get_activate_from_text(self, text: str, max_depth: int = 3, fast_retrieval: bool = False) -> float:
|
||||
"""从文本中获取激活值的公共接口"""
|
||||
if not self._initialized:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
from ...common.database import db
|
||||
@@ -7,19 +8,34 @@ from src.common.logger import get_module_logger
|
||||
|
||||
logger = get_module_logger("message_storage")
|
||||
|
||||
|
||||
class MessageStorage:
|
||||
async def store_message(self, message: Union[MessageSending, MessageRecv], chat_stream: ChatStream) -> None:
|
||||
"""存储消息到数据库"""
|
||||
try:
|
||||
# 莫越权 救世啊
|
||||
pattern = r"<MainRule>.*?</MainRule>|<schedule>.*?</schedule>|<UserMessage>.*?</UserMessage>"
|
||||
|
||||
processed_plain_text = message.processed_plain_text
|
||||
if processed_plain_text:
|
||||
filtered_processed_plain_text = re.sub(pattern, "", processed_plain_text, flags=re.DOTALL)
|
||||
else:
|
||||
filtered_processed_plain_text = ""
|
||||
|
||||
detailed_plain_text = message.detailed_plain_text
|
||||
if detailed_plain_text:
|
||||
filtered_detailed_plain_text = re.sub(pattern, "", detailed_plain_text, flags=re.DOTALL)
|
||||
else:
|
||||
filtered_detailed_plain_text = ""
|
||||
|
||||
message_data = {
|
||||
"message_id": message.message_info.message_id,
|
||||
"time": message.message_info.time,
|
||||
"chat_id": chat_stream.stream_id,
|
||||
"chat_info": chat_stream.to_dict(),
|
||||
"user_info": message.message_info.user_info.to_dict(),
|
||||
"processed_plain_text": message.processed_plain_text,
|
||||
"detailed_plain_text": message.detailed_plain_text,
|
||||
# 使用过滤后的文本
|
||||
"processed_plain_text": filtered_processed_plain_text,
|
||||
"detailed_plain_text": filtered_detailed_plain_text,
|
||||
"memorized_times": message.memorized_times,
|
||||
}
|
||||
db.messages.insert_one(message_data)
|
||||
|
||||
Reference in New Issue
Block a user