v0.3.3 异步处理记忆,修复了GUI
This commit is contained in:
@@ -7,6 +7,23 @@ import threading
|
||||
import queue
|
||||
import sys
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# 获取当前文件的目录
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# 获取项目根目录
|
||||
root_dir = os.path.abspath(os.path.join(current_dir, '..', '..'))
|
||||
|
||||
# 加载环境变量
|
||||
if os.path.exists(os.path.join(root_dir, '.env.dev')):
|
||||
load_dotenv(os.path.join(root_dir, '.env.dev'))
|
||||
print("成功加载开发环境配置")
|
||||
elif os.path.exists(os.path.join(root_dir, '.env.prod')):
|
||||
load_dotenv(os.path.join(root_dir, '.env.prod'))
|
||||
print("成功加载生产环境配置")
|
||||
else:
|
||||
print("未找到环境配置文件")
|
||||
sys.exit(1)
|
||||
|
||||
from pymongo import MongoClient
|
||||
from typing import Optional
|
||||
@@ -14,14 +31,23 @@ from typing import Optional
|
||||
class Database:
|
||||
_instance: Optional["Database"] = None
|
||||
|
||||
def __init__(self, host: str, port: int, db_name: str):
|
||||
def __init__(self, host: str, port: int, db_name: str, username: str = None, password: str = None, auth_source: str = None):
|
||||
if username and password:
|
||||
self.client = MongoClient(
|
||||
host=host,
|
||||
port=port,
|
||||
username=username,
|
||||
password=password,
|
||||
authSource=auth_source or 'admin'
|
||||
)
|
||||
else:
|
||||
self.client = MongoClient(host, port)
|
||||
self.db = self.client[db_name]
|
||||
|
||||
@classmethod
|
||||
def initialize(cls, host: str, port: int, db_name: str) -> "Database":
|
||||
def initialize(cls, host: str, port: int, db_name: str, username: str = None, password: str = None, auth_source: str = None) -> "Database":
|
||||
if cls._instance is None:
|
||||
cls._instance = cls(host, port, db_name)
|
||||
cls._instance = cls(host, port, db_name, username, password, auth_source)
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -90,7 +90,7 @@ async def monitor_relationships():
|
||||
async def build_memory_task():
|
||||
"""每30秒执行一次记忆构建"""
|
||||
print("\033[1;32m[记忆构建]\033[0m 开始构建记忆...")
|
||||
hippocampus.build_memory(chat_size=12)
|
||||
await hippocampus.build_memory(chat_size=30)
|
||||
print("\033[1;32m[记忆构建]\033[0m 记忆构建完成")
|
||||
|
||||
|
||||
|
||||
@@ -136,3 +136,4 @@ llm_config.DEEP_SEEK_BASE_URL = config.deep_seek_base_url
|
||||
if not global_config.enable_advance_output:
|
||||
# logger.remove()
|
||||
pass
|
||||
|
||||
|
||||
@@ -72,12 +72,15 @@ class PromptBuilder:
|
||||
# print(f"\033[1;32m[前额叶]\033[0m 合并所有需要的记忆2: {list(overlapping_second_layer)}")
|
||||
|
||||
# 使用集合去重
|
||||
all_memories = list(set(all_first_layer_items) | set(overlapping_second_layer))
|
||||
# 从每个来源随机选择2条记忆(如果有的话)
|
||||
selected_first_layer = random.sample(all_first_layer_items, min(2, len(all_first_layer_items))) if all_first_layer_items else []
|
||||
selected_second_layer = random.sample(list(overlapping_second_layer), min(2, len(overlapping_second_layer))) if overlapping_second_layer else []
|
||||
|
||||
# 合并并去重
|
||||
all_memories = list(set(selected_first_layer + selected_second_layer))
|
||||
if all_memories:
|
||||
print(f"\033[1;32m[前额叶]\033[0m 合并所有需要的记忆: {all_memories}")
|
||||
|
||||
if all_memories: # 只在列表非空时选择随机项
|
||||
random_item = choice(all_memories)
|
||||
random_item = " ".join(all_memories)
|
||||
memory_prompt = f"看到这些聊天,你想起来{random_item}\n"
|
||||
else:
|
||||
memory_prompt = "" # 如果没有记忆,则返回空字符串
|
||||
|
||||
@@ -3,6 +3,8 @@ import requests
|
||||
from typing import Tuple, Union
|
||||
import time
|
||||
from nonebot import get_driver
|
||||
import aiohttp
|
||||
import asyncio
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -15,7 +17,7 @@ class LLMModel:
|
||||
self.api_key = config.siliconflow_key
|
||||
self.base_url = config.siliconflow_base_url
|
||||
|
||||
def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
async def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
@@ -34,32 +36,32 @@ class LLMModel:
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15 # 基础等待时间(秒)
|
||||
base_wait_time = 15
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 429:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(api_url, headers=headers, json=data) as response:
|
||||
if response.status == 429:
|
||||
wait_time = base_wait_time * (2 ** retry) # 指数退避
|
||||
print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = response.json()
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
print(f"请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||
time.sleep(wait_time)
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
return f"请求失败: {str(e)}", ""
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@ from typing import Tuple, Union
|
||||
import time
|
||||
from ..chat.config import BotConfig
|
||||
from nonebot import get_driver
|
||||
import aiohttp
|
||||
import asyncio
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -21,7 +23,7 @@ class LLMModel:
|
||||
|
||||
print(f"API URL: {self.base_url}") # 打印 base_url 用于调试
|
||||
|
||||
def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
async def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
@@ -44,28 +46,28 @@ class LLMModel:
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 429:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(api_url, headers=headers, json=data) as response:
|
||||
if response.status == 429:
|
||||
wait_time = base_wait_time * (2 ** retry) # 指数退避
|
||||
print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = response.json()
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
print(f"请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||
time.sleep(wait_time)
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
return f"请求失败: {str(e)}", ""
|
||||
|
||||
|
||||
@@ -193,7 +193,25 @@ class Hippocampus:
|
||||
chat_text.append(chat_)
|
||||
return chat_text
|
||||
|
||||
def build_memory(self,chat_size=12):
|
||||
async def memory_compress(self, input_text, rate=1):
|
||||
information_content = calculate_information_content(input_text)
|
||||
print(f"文本的信息量(熵): {information_content:.4f} bits")
|
||||
topic_num = max(1, min(5, int(information_content * rate / 4)))
|
||||
topic_prompt = find_topic(input_text, topic_num)
|
||||
topic_response = await self.llm_model.generate_response(topic_prompt)
|
||||
# 检查 topic_response 是否为元组
|
||||
if isinstance(topic_response, tuple):
|
||||
topics = topic_response[0].split(",") # 假设第一个元素是我们需要的字符串
|
||||
else:
|
||||
topics = topic_response.split(",")
|
||||
compressed_memory = set()
|
||||
for topic in topics:
|
||||
topic_what_prompt = topic_what(input_text,topic)
|
||||
topic_what_response = await self.llm_model_small.generate_response(topic_what_prompt)
|
||||
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储
|
||||
return compressed_memory
|
||||
|
||||
async def build_memory(self,chat_size=12):
|
||||
#最近消息获取频率
|
||||
time_frequency = {'near':1,'mid':2,'far':2}
|
||||
memory_sample = self.get_memory_sample(chat_size,time_frequency)
|
||||
@@ -208,9 +226,7 @@ class Hippocampus:
|
||||
if input_text:
|
||||
# 生成压缩后记忆
|
||||
first_memory = set()
|
||||
first_memory = self.memory_compress(input_text, 2.5)
|
||||
# 延时防止访问超频
|
||||
# time.sleep(5)
|
||||
first_memory = await self.memory_compress(input_text, 2.5)
|
||||
#将记忆加入到图谱中
|
||||
for topic, memory in first_memory:
|
||||
topics = segment_text(topic)
|
||||
@@ -225,26 +241,6 @@ class Hippocampus:
|
||||
print(f"空消息 跳过")
|
||||
self.memory_graph.save_graph_to_db()
|
||||
|
||||
def memory_compress(self, input_text, rate=1):
|
||||
information_content = calculate_information_content(input_text)
|
||||
print(f"文本的信息量(熵): {information_content:.4f} bits")
|
||||
topic_num = max(1, min(5, int(information_content * rate / 4)))
|
||||
# print(topic_num)
|
||||
topic_prompt = find_topic(input_text, topic_num)
|
||||
topic_response = self.llm_model.generate_response(topic_prompt)
|
||||
# 检查 topic_response 是否为元组
|
||||
if isinstance(topic_response, tuple):
|
||||
topics = topic_response[0].split(",") # 假设第一个元素是我们需要的字符串
|
||||
else:
|
||||
topics = topic_response.split(",")
|
||||
# print(topics)
|
||||
compressed_memory = set()
|
||||
for topic in topics:
|
||||
topic_what_prompt = topic_what(input_text,topic)
|
||||
topic_what_response = self.llm_model_small.generate_response(topic_what_prompt)
|
||||
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储
|
||||
return compressed_memory
|
||||
|
||||
|
||||
def segment_text(text):
|
||||
seg_text = list(jieba.cut(text))
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import requests
|
||||
import aiohttp
|
||||
from typing import Tuple, Union
|
||||
from nonebot import get_driver
|
||||
|
||||
@@ -22,7 +23,7 @@ class LLMModel:
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
|
||||
def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
async def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
@@ -41,17 +42,18 @@ class LLMModel:
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(api_url, headers=headers, json=data) as response:
|
||||
response.raise_for_status() # 检查响应状态
|
||||
|
||||
result = response.json()
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content # 返回内容和推理内容
|
||||
return "没有返回结果", "" # 返回两个值
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
except Exception as e:
|
||||
return f"请求失败: {str(e)}", "" # 返回错误信息和空字符串
|
||||
|
||||
# 示例用法
|
||||
|
||||
Reference in New Issue
Block a user