新增思维流

This commit is contained in:
SengokuCola
2025-03-23 17:47:17 +08:00
parent c635c9f185
commit 9f87cab148
9 changed files with 402 additions and 9 deletions

View File

@@ -0,0 +1,109 @@
from .outer_world import outer_world
import asyncio
from .offline_llm import LLMModel
class CuttentState:
def __init__(self):
self.willing = 0
self.mood = 'TODO'
self.current_state_info = ""
class InnerWorld:
def __init__(self):
self.inner_world_info = ""
self.current_state : CuttentState = CuttentState()
class BRain:
def __init__(self):
self.current_mind = ""
self.past_mind = []
self.inner_world = InnerWorld()
self.llm_model = LLMModel("Pro/Qwen/Qwen2.5-7B-Instruct")
if not self.current_mind:
self.current_mind = "你是麦麦,曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧,你现在正在浏览qq群想着qq群里发生的事情人们在谈论什么以及和你有什么关系你应该怎样在qq群中回复和发言。现在请麦麦你继续思考"
async def brain_start_working(self):
while True:
await self.do_a_thinking()
await asyncio.sleep(10)
async def do_a_thinking(self):
print("麦麦脑袋转起来了")
current_thinking = self.current_mind
outer_world_info = self.build_outer_world_info()
inner_world_info = self.build_inner_world_info(self.inner_world)
current_state_info = self.build_current_state_info(self.inner_world.current_state)
# prompt += f"这是你当前的脑内状态{current_state_info}\n\n"
prompt = f"这是你刚刚接触的内容:{outer_world_info}\n\n"
# prompt += f"这是你当前的脑内状态{inner_world_info}\n\n"
prompt += f"这是你之前的想法{current_thinking}\n\n"
prompt += f"现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,注重当前的思考:"
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
self.update_current_mind(reponse)
self.current_mind = reponse
print(f"麦麦的脑内状态:{self.current_mind}")
async def do_after_reply(self,reply_content,chat_talking_prompt):
print("麦麦脑袋转起来了")
current_thinking = self.current_mind
outer_world_info = self.build_outer_world_info()
inner_world_info = self.build_inner_world_info(self.inner_world)
current_state_info = self.build_current_state_info(self.inner_world.current_state)
# prompt += f"这是你当前的脑内状态{current_state_info}\n\n"
prompt = f"这是你刚刚接触的内容:{outer_world_info}\n\n"
# prompt += f"这是你当前的脑内状态{inner_world_info}\n\n"
prompt += f"这是你之前想要回复的内容:{chat_talking_prompt}\n\n"
prompt += f"这是你之前的想法{current_thinking}\n\n"
prompt += f"这是你自己刚刚回复的内容{reply_content}\n\n"
prompt += f"现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白:"
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
self.update_current_mind(reponse)
self.current_mind = reponse
print(f"麦麦的脑内状态:{self.current_mind}")
def update_current_state_from_current_mind(self):
self.inner_world.current_state.willing += 0.01
def build_current_state_info(self,current_state):
current_state_info = current_state.current_state_info
return current_state_info
def build_inner_world_info(self,inner_world):
inner_world_info = inner_world.inner_world_info
return inner_world_info
def build_outer_world_info(self):
outer_world_info = outer_world.outer_world_info
return outer_world_info
def update_current_mind(self,reponse):
self.past_mind.append(self.current_mind)
self.current_mind = reponse
brain = BRain()
async def main():
# 创建两个任务
brain_task = asyncio.create_task(brain.brain_start_working())
outer_world_task = asyncio.create_task(outer_world.open_eyes())
# 等待两个任务
await asyncio.gather(brain_task, outer_world_task)
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,123 @@
import asyncio
import os
import time
from typing import Tuple, Union
import aiohttp
import requests
from src.common.logger import get_module_logger
logger = get_module_logger("offline_llm")
class LLMModel:
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
self.api_key = os.getenv("SILICONFLOW_KEY")
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
if not self.api_key or not self.base_url:
raise ValueError("环境变量未正确加载SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15 # 基础等待时间(秒)
for retry in range(max_retries):
try:
response = requests.post(api_url, headers=headers, json=data)
if response.status_code == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
time.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
time.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""
async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""异步方式根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15
async with aiohttp.ClientSession() as session:
for retry in range(max_retries):
try:
async with session.post(api_url, headers=headers, json=data) as response:
if response.status == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = await response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""

View File

@@ -0,0 +1,111 @@
#定义了来自外部世界的信息
import asyncio
from datetime import datetime
from src.common.database import db
from .offline_llm import LLMModel
#存储一段聊天的大致内容
class Talking_info:
def __init__(self,chat_id):
self.chat_id = chat_id
self.talking_message = []
self.talking_message_str = ""
self.talking_summary = ""
self.last_message_time = None # 记录最新消息的时间
self.llm_summary = LLMModel("Pro/Qwen/Qwen2.5-7B-Instruct")
def update_talking_message(self):
#从数据库取最近30条该聊天流的消息
messages = db.messages.find({"chat_id": self.chat_id}).sort("time", -1).limit(15)
self.talking_message = []
self.talking_message_str = ""
for message in messages:
self.talking_message.append(message)
self.talking_message_str += message["detailed_plain_text"]
async def update_talking_summary(self,new_summary=""):
#基于已经有的talking_summary和新的talking_message生成一个summary
prompt = f"聊天内容:{self.talking_message_str}\n\n"
prompt += f"以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n\n"
prompt += f"总结:"
self.talking_summary, reasoning_content = await self.llm_summary.generate_response_async(prompt)
class SheduleInfo:
def __init__(self):
self.shedule_info = ""
class OuterWorld:
def __init__(self):
self.talking_info_list = [] #装的一堆talking_info
self.shedule_info = "无日程"
self.interest_info = "麦麦你好"
self.outer_world_info = ""
self.start_time = int(datetime.now().timestamp())
self.llm_summary = LLMModel("Qwen/Qwen2.5-32B-Instruct")
async def open_eyes(self):
while True:
await asyncio.sleep(60)
print("更新所有聊天信息")
await self.update_all_talking_info()
print("更新outer_world_info")
await self.update_outer_world_info()
print(self.outer_world_info)
for talking_info in self.talking_info_list:
# print(talking_info.talking_message_str)
# print(talking_info.talking_summary)
pass
async def update_outer_world_info(self):
print("总结当前outer_world_info")
all_talking_summary = ""
for talking_info in self.talking_info_list:
all_talking_summary += talking_info.talking_summary
prompt = f"聊天内容:{all_talking_summary}\n\n"
prompt += f"以上是多个群里在进行的聊天,请你对所有聊天内容进行总结,总结内容要包含聊天的大致内容,以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n\n"
prompt += f"总结:"
self.outer_world_info, reasoning_content = await self.llm_summary.generate_response_async(prompt)
async def update_talking_info(self,chat_id):
# 查找现有的talking_info
talking_info = next((info for info in self.talking_info_list if info.chat_id == chat_id), None)
if talking_info is None:
print("新聊天流")
talking_info = Talking_info(chat_id)
talking_info.update_talking_message()
await talking_info.update_talking_summary()
self.talking_info_list.append(talking_info)
else:
print("旧聊天流")
talking_info.update_talking_message()
await talking_info.update_talking_summary()
async def update_all_talking_info(self):
all_streams = db.chat_streams.find({})
update_tasks = []
for data in all_streams:
stream_id = data.get("stream_id")
# print(stream_id)
last_active_time = data.get("last_active_time")
if last_active_time > self.start_time or 1:
update_tasks.append(self.update_talking_info(stream_id))
# 并行执行所有更新任务
if update_tasks:
await asyncio.gather(*update_tasks)
outer_world = OuterWorld()
if __name__ == "__main__":
asyncio.run(outer_world.open_eyes())