Merge remote-tracking branch 'upstream/debug'
This commit is contained in:
@@ -1,66 +0,0 @@
|
||||
import os
|
||||
import requests
|
||||
from typing import Tuple, Union
|
||||
import time
|
||||
from nonebot import get_driver
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
class LLMModel:
|
||||
# def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs):
|
||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
self.api_key = config.siliconflow_key
|
||||
self.base_url = config.siliconflow_base_url
|
||||
|
||||
def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# 构建请求体
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.5,
|
||||
**self.params
|
||||
}
|
||||
|
||||
# 发送请求到完整的chat/completions端点
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15 # 基础等待时间(秒)
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 429:
|
||||
wait_time = base_wait_time * (2 ** retry) # 指数退避
|
||||
print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
print(f"请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
return f"请求失败: {str(e)}", ""
|
||||
|
||||
return "达到最大重试次数,请求仍然失败", ""
|
||||
@@ -2,15 +2,17 @@ import os
|
||||
import requests
|
||||
from typing import Tuple, Union
|
||||
import time
|
||||
from ..chat.config import BotConfig
|
||||
from nonebot import get_driver
|
||||
import aiohttp
|
||||
import asyncio
|
||||
from src.plugins.chat.config import BotConfig, global_config
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
class LLMModel:
|
||||
# def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs):
|
||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
|
||||
def __init__(self, model_name=global_config.SILICONFLOW_MODEL_V3, **kwargs):
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
self.api_key = config.siliconflow_key
|
||||
@@ -21,7 +23,7 @@ class LLMModel:
|
||||
|
||||
print(f"API URL: {self.base_url}") # 打印 base_url 用于调试
|
||||
|
||||
def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
async def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
@@ -44,28 +46,28 @@ class LLMModel:
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(api_url, headers=headers, json=data) as response:
|
||||
if response.status == 429:
|
||||
wait_time = base_wait_time * (2 ** retry) # 指数退避
|
||||
print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
if response.status_code == 429:
|
||||
wait_time = base_wait_time * (2 ** retry) # 指数退避
|
||||
print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
print(f"请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||
time.sleep(wait_time)
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
return f"请求失败: {str(e)}", ""
|
||||
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import jieba
|
||||
from .llm_module import LLMModel
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import math
|
||||
from collections import Counter
|
||||
import datetime
|
||||
import random
|
||||
import time
|
||||
from ..chat.config import global_config
|
||||
import sys
|
||||
from ...common.database import Database # 使用正确的导入语法
|
||||
from ..chat.utils import calculate_information_content, get_cloest_chat_from_db
|
||||
|
||||
from ..models.utils_model import LLM_request
|
||||
class Memory_graph:
|
||||
def __init__(self):
|
||||
self.G = nx.Graph() # 使用 networkx 的图结构
|
||||
@@ -169,8 +166,8 @@ class Memory_graph:
|
||||
class Hippocampus:
|
||||
def __init__(self,memory_graph:Memory_graph):
|
||||
self.memory_graph = memory_graph
|
||||
self.llm_model = LLMModel()
|
||||
self.llm_model_small = LLMModel(model_name="deepseek-ai/DeepSeek-V2.5")
|
||||
self.llm_model = LLM_request(model = global_config.llm_normal,temperature=0.5)
|
||||
self.llm_model_small = LLM_request(model = global_config.llm_normal_minor,temperature=0.5)
|
||||
|
||||
def get_memory_sample(self,chat_size=20,time_frequency:dict={'near':2,'mid':4,'far':3}):
|
||||
current_timestamp = datetime.datetime.now().timestamp()
|
||||
@@ -193,6 +190,24 @@ class Hippocampus:
|
||||
chat_text.append(chat_)
|
||||
return chat_text
|
||||
|
||||
async def memory_compress(self, input_text, rate=1):
|
||||
information_content = calculate_information_content(input_text)
|
||||
print(f"文本的信息量(熵): {information_content:.4f} bits")
|
||||
topic_num = max(1, min(5, int(information_content * rate / 4)))
|
||||
topic_prompt = find_topic(input_text, topic_num)
|
||||
topic_response = await self.llm_model.generate_response(topic_prompt)
|
||||
# 检查 topic_response 是否为元组
|
||||
if isinstance(topic_response, tuple):
|
||||
topics = topic_response[0].split(",") # 假设第一个元素是我们需要的字符串
|
||||
else:
|
||||
topics = topic_response.split(",")
|
||||
compressed_memory = set()
|
||||
for topic in topics:
|
||||
topic_what_prompt = topic_what(input_text,topic)
|
||||
topic_what_response = await self.llm_model_small.generate_response(topic_what_prompt)
|
||||
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储
|
||||
return compressed_memory
|
||||
|
||||
async def build_memory(self,chat_size=12):
|
||||
#最近消息获取频率
|
||||
time_frequency = {'near':1,'mid':2,'far':2}
|
||||
@@ -208,9 +223,7 @@ class Hippocampus:
|
||||
if input_text:
|
||||
# 生成压缩后记忆
|
||||
first_memory = set()
|
||||
first_memory = self.memory_compress(input_text, 2.5)
|
||||
# 延时防止访问超频
|
||||
# time.sleep(5)
|
||||
first_memory = await self.memory_compress(input_text, 2.5)
|
||||
#将记忆加入到图谱中
|
||||
for topic, memory in first_memory:
|
||||
topics = segment_text(topic)
|
||||
@@ -224,28 +237,6 @@ class Hippocampus:
|
||||
else:
|
||||
print(f"空消息 跳过")
|
||||
self.memory_graph.save_graph_to_db()
|
||||
|
||||
def memory_compress(self, input_text, rate=1):
|
||||
information_content = calculate_information_content(input_text)
|
||||
print(f"文本的信息量(熵): {information_content:.4f} bits")
|
||||
topic_num = max(1, min(5, int(information_content * rate / 4)))
|
||||
# print(topic_num)
|
||||
topic_prompt = find_topic(input_text, topic_num)
|
||||
topic_response = self.llm_model.generate_response(topic_prompt)
|
||||
# 检查 topic_response 是否为元组
|
||||
if isinstance(topic_response, tuple):
|
||||
topics = topic_response[0].split(",") # 假设第一个元素是我们需要的字符串
|
||||
else:
|
||||
topics = topic_response.split(",")
|
||||
# print(topics)
|
||||
compressed_memory = set()
|
||||
for topic in topics:
|
||||
if topic=='' or '[' in topic:
|
||||
continue
|
||||
topic_what_prompt = topic_what(input_text,topic)
|
||||
topic_what_response = self.llm_model_small.generate_response(topic_what_prompt)
|
||||
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储
|
||||
return compressed_memory
|
||||
|
||||
|
||||
def segment_text(text):
|
||||
|
||||
Reference in New Issue
Block a user