我要曹飞一切之thinkflow创世纪
This commit is contained in:
@@ -18,7 +18,7 @@ from ..memory_system.memory import hippocampus
|
||||
from .message_sender import message_manager, message_sender
|
||||
from .storage import MessageStorage
|
||||
from src.common.logger import get_module_logger
|
||||
from src.think_flow_demo.current_mind import brain
|
||||
# from src.think_flow_demo.current_mind import subheartflow
|
||||
from src.think_flow_demo.outer_world import outer_world
|
||||
|
||||
logger = get_module_logger("chat_init")
|
||||
@@ -46,12 +46,11 @@ scheduler = require("nonebot_plugin_apscheduler").scheduler
|
||||
|
||||
|
||||
async def start_think_flow():
|
||||
"""启动大脑和外部世界"""
|
||||
"""启动外部世界"""
|
||||
try:
|
||||
brain_task = asyncio.create_task(brain.brain_start_working())
|
||||
outer_world_task = asyncio.create_task(outer_world.open_eyes())
|
||||
logger.success("大脑和外部世界启动成功")
|
||||
return brain_task, outer_world_task
|
||||
return outer_world_task
|
||||
except Exception as e:
|
||||
logger.error(f"启动大脑和外部世界失败: {e}")
|
||||
raise
|
||||
|
||||
@@ -32,7 +32,7 @@ from .utils_user import get_user_nickname, get_user_cardname
|
||||
from ..willing.willing_manager import willing_manager # 导入意愿管理器
|
||||
from .message_base import UserInfo, GroupInfo, Seg
|
||||
|
||||
from src.think_flow_demo.current_mind import brain
|
||||
from src.think_flow_demo.heartflow import subheartflow_manager
|
||||
from src.think_flow_demo.outer_world import outer_world
|
||||
|
||||
from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
|
||||
@@ -93,6 +93,12 @@ class ChatBot:
|
||||
group_info=groupinfo, # 我嘞个gourp_info
|
||||
)
|
||||
message.update_chat_stream(chat)
|
||||
|
||||
#创建 心流 观察
|
||||
await outer_world.check_and_add_new_observe()
|
||||
subheartflow_manager.create_subheartflow(chat.stream_id)
|
||||
|
||||
|
||||
await relationship_manager.update_relationship(
|
||||
chat_stream=chat,
|
||||
)
|
||||
@@ -185,7 +191,7 @@ class ChatBot:
|
||||
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
||||
)
|
||||
|
||||
await brain.do_after_reply(response,chat_talking_prompt)
|
||||
await subheartflow_manager.get_subheartflow(stream_id).do_after_reply(response,chat_talking_prompt)
|
||||
# print(f"有response: {response}")
|
||||
container = message_manager.get_container(chat.stream_id)
|
||||
thinking_message = None
|
||||
@@ -308,11 +314,11 @@ class ChatBot:
|
||||
|
||||
raw_message = f"[戳了戳]{global_config.BOT_NICKNAME}" # 默认类型
|
||||
if info := event.model_extra["raw_info"]:
|
||||
poke_type = info[2].get("txt", "戳了戳") # 戳戳类型,例如“拍一拍”、“揉一揉”、“捏一捏”
|
||||
poke_type = info[2].get("txt", "戳了戳") # 戳戳类型,例如"拍一拍"、"揉一揉"、"捏一捏"
|
||||
custom_poke_message = info[4].get("txt", "") # 自定义戳戳消息,若不存在会为空字符串
|
||||
raw_message = f"[{poke_type}]{global_config.BOT_NICKNAME}{custom_poke_message}"
|
||||
|
||||
raw_message += "(这是一个类似摸摸头的友善行为,而不是恶意行为,请不要作出攻击发言)"
|
||||
raw_message += ",作为一个类似摸摸头的友善行为"
|
||||
|
||||
user_info = UserInfo(
|
||||
user_id=event.user_id,
|
||||
|
||||
@@ -12,7 +12,7 @@ from .chat_stream import chat_manager
|
||||
from .relationship_manager import relationship_manager
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
from src.think_flow_demo.current_mind import brain
|
||||
from src.think_flow_demo.heartflow import subheartflow_manager
|
||||
from src.think_flow_demo.outer_world import outer_world
|
||||
|
||||
logger = get_module_logger("prompt")
|
||||
@@ -36,8 +36,8 @@ class PromptBuilder:
|
||||
limit=global_config.MAX_CONTEXT_SIZE,
|
||||
)
|
||||
|
||||
outer_world_info = outer_world.outer_world_info
|
||||
current_mind_info = brain.current_mind
|
||||
# outer_world_info = outer_world.outer_world_info
|
||||
current_mind_info = subheartflow_manager.get_subheartflow(stream_id).current_mind
|
||||
|
||||
relation_prompt = ""
|
||||
for person in who_chat_in_group:
|
||||
@@ -183,7 +183,7 @@ class PromptBuilder:
|
||||
prompt_check_if_response = ""
|
||||
|
||||
|
||||
print(prompt)
|
||||
# print(prompt)
|
||||
|
||||
return prompt, prompt_check_if_response
|
||||
|
||||
|
||||
@@ -799,7 +799,7 @@ class Hippocampus:
|
||||
"""
|
||||
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, 4))
|
||||
# 使用正则表达式提取<>中的内容
|
||||
print(f"话题: {topics_response[0]}")
|
||||
# print(f"话题: {topics_response[0]}")
|
||||
topics = re.findall(r'<([^>]+)>', topics_response[0])
|
||||
|
||||
# 如果没有找到<>包裹的内容,返回['none']
|
||||
@@ -884,7 +884,7 @@ class Hippocampus:
|
||||
"""计算输入文本对记忆的激活程度"""
|
||||
# 识别主题
|
||||
identified_topics = await self._identify_topics(text)
|
||||
print(f"识别主题: {identified_topics}")
|
||||
# print(f"识别主题: {identified_topics}")
|
||||
|
||||
if identified_topics[0] == "none":
|
||||
return 0
|
||||
|
||||
@@ -42,8 +42,8 @@ class WillingManager:
|
||||
interested_rate = interested_rate * config.response_interested_rate_amplifier
|
||||
|
||||
|
||||
if interested_rate > 0.3:
|
||||
current_willing += interested_rate - 0.2
|
||||
if interested_rate > 0.4:
|
||||
current_willing += interested_rate - 0.3
|
||||
|
||||
if is_mentioned_bot and current_willing < 1.0:
|
||||
current_willing += 1
|
||||
|
||||
@@ -1,47 +1,60 @@
|
||||
from .outer_world import outer_world
|
||||
import asyncio
|
||||
from .offline_llm import LLMModel
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.plugins.models.utils_model import LLM_request
|
||||
from src.plugins.chat.config import global_config
|
||||
|
||||
class CuttentState:
|
||||
def __init__(self):
|
||||
self.willing = 0
|
||||
self.mood = 'TODO'
|
||||
self.current_state_info = ""
|
||||
|
||||
class InnerWorld:
|
||||
def __init__(self):
|
||||
self.inner_world_info = ""
|
||||
self.current_state : CuttentState = CuttentState()
|
||||
self.mood_manager = MoodManager()
|
||||
self.mood = self.mood_manager.get_prompt()
|
||||
|
||||
def update_current_state_info(self):
|
||||
self.current_state_info = self.mood_manager.get_current_mood()
|
||||
|
||||
|
||||
class BRain:
|
||||
class SubHeartflow:
|
||||
def __init__(self):
|
||||
self.current_mind = ""
|
||||
self.past_mind = []
|
||||
self.inner_world = InnerWorld()
|
||||
self.llm_model = LLMModel("Pro/Qwen/Qwen2.5-7B-Instruct")
|
||||
if not self.current_mind:
|
||||
self.current_mind = "你是麦麦,曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧,你现在正在浏览qq群,想着qq群里发生的事情,人们在谈论什么,以及和你有什么关系,你应该怎样在qq群中回复和发言。现在请麦麦你继续思考"
|
||||
self.current_state : CuttentState = CuttentState()
|
||||
self.llm_model = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=600, request_type="sub_heart_flow")
|
||||
self.outer_world = None
|
||||
|
||||
async def brain_start_working(self):
|
||||
self.observe_chat_id = None
|
||||
|
||||
if not self.current_mind:
|
||||
self.current_mind = "你什么也没想"
|
||||
|
||||
def assign_observe(self,stream_id):
|
||||
self.outer_world = outer_world.get_world_by_stream_id(stream_id)
|
||||
self.observe_chat_id = stream_id
|
||||
|
||||
async def subheartflow_start_working(self):
|
||||
while True:
|
||||
await self.do_a_thinking()
|
||||
await asyncio.sleep(10)
|
||||
await asyncio.sleep(30)
|
||||
|
||||
async def do_a_thinking(self):
|
||||
print("麦麦脑袋转起来了")
|
||||
current_thinking = self.current_mind
|
||||
outer_world_info = self.build_outer_world_info()
|
||||
inner_world_info = self.build_inner_world_info(self.inner_world)
|
||||
current_state_info = self.build_current_state_info(self.inner_world.current_state)
|
||||
print("麦麦小脑袋转起来了")
|
||||
self.current_state.update_current_state_info()
|
||||
|
||||
personality_info = open("src/think_flow_demo/personality_info.txt", "r", encoding="utf-8").read()
|
||||
current_thinking_info = self.current_mind
|
||||
mood_info = self.current_state.mood
|
||||
related_memory_info = 'memory'
|
||||
message_stream_info = self.outer_world.talking_summary
|
||||
|
||||
# prompt += f"这是你当前的脑内状态{current_state_info}\n\n"
|
||||
prompt = f"这是你刚刚接触的内容:{outer_world_info}\n\n"
|
||||
# prompt += f"这是你当前的脑内状态{inner_world_info}\n\n"
|
||||
prompt += f"这是你之前的想法{current_thinking}\n\n"
|
||||
|
||||
prompt += f"现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,注重当前的思考:"
|
||||
prompt = f""
|
||||
prompt += f"{personality_info}\n"
|
||||
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{message_stream_info}\n"
|
||||
prompt += f"你想起来{related_memory_info}。"
|
||||
prompt += f"刚刚你的想法是{current_thinking_info}。"
|
||||
prompt += f"你现在{mood_info}。"
|
||||
prompt += f"现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,不要思考太多:"
|
||||
|
||||
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
|
||||
@@ -52,19 +65,25 @@ class BRain:
|
||||
|
||||
async def do_after_reply(self,reply_content,chat_talking_prompt):
|
||||
print("麦麦脑袋转起来了")
|
||||
current_thinking = self.current_mind
|
||||
outer_world_info = self.build_outer_world_info()
|
||||
inner_world_info = self.build_inner_world_info(self.inner_world)
|
||||
current_state_info = self.build_current_state_info(self.inner_world.current_state)
|
||||
self.current_state.update_current_state_info()
|
||||
|
||||
personality_info = open("src/think_flow_demo/personality_info.txt", "r", encoding="utf-8").read()
|
||||
current_thinking_info = self.current_mind
|
||||
mood_info = self.current_state.mood
|
||||
related_memory_info = 'memory'
|
||||
message_stream_info = self.outer_world.talking_summary
|
||||
message_new_info = chat_talking_prompt
|
||||
reply_info = reply_content
|
||||
|
||||
# prompt += f"这是你当前的脑内状态{current_state_info}\n\n"
|
||||
prompt = f"这是你刚刚接触的内容:{outer_world_info}\n\n"
|
||||
# prompt += f"这是你当前的脑内状态{inner_world_info}\n\n"
|
||||
prompt += f"这是你之前想要回复的内容:{chat_talking_prompt}\n\n"
|
||||
prompt += f"这是你之前的想法{current_thinking}\n\n"
|
||||
prompt += f"这是你自己刚刚回复的内容{reply_content}\n\n"
|
||||
prompt += f"现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白:"
|
||||
prompt = f""
|
||||
prompt += f"{personality_info}\n"
|
||||
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{message_stream_info}\n"
|
||||
prompt += f"你想起来{related_memory_info}。"
|
||||
prompt += f"刚刚你的想法是{current_thinking_info}。"
|
||||
prompt += f"你现在看到了网友们发的新消息:{message_new_info}\n"
|
||||
prompt += f"你刚刚回复了群友们:{reply_info}"
|
||||
prompt += f"你现在{mood_info}。"
|
||||
prompt += f"现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白,不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,以及你回复的内容,不要思考太多:"
|
||||
|
||||
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
|
||||
@@ -73,17 +92,6 @@ class BRain:
|
||||
self.current_mind = reponse
|
||||
print(f"麦麦的脑内状态:{self.current_mind}")
|
||||
|
||||
def update_current_state_from_current_mind(self):
|
||||
self.inner_world.current_state.willing += 0.01
|
||||
|
||||
|
||||
def build_current_state_info(self,current_state):
|
||||
current_state_info = current_state.current_state_info
|
||||
return current_state_info
|
||||
|
||||
def build_inner_world_info(self,inner_world):
|
||||
inner_world_info = inner_world.inner_world_info
|
||||
return inner_world_info
|
||||
|
||||
def build_outer_world_info(self):
|
||||
outer_world_info = outer_world.outer_world_info
|
||||
@@ -94,16 +102,5 @@ class BRain:
|
||||
self.current_mind = reponse
|
||||
|
||||
|
||||
brain = BRain()
|
||||
|
||||
async def main():
|
||||
# 创建两个任务
|
||||
brain_task = asyncio.create_task(brain.brain_start_working())
|
||||
outer_world_task = asyncio.create_task(outer_world.open_eyes())
|
||||
|
||||
# 等待两个任务
|
||||
await asyncio.gather(brain_task, outer_world_task)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
# subheartflow = SubHeartflow()
|
||||
|
||||
|
||||
21
src/think_flow_demo/heartflow.py
Normal file
21
src/think_flow_demo/heartflow.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from .current_mind import SubHeartflow
|
||||
|
||||
class SubHeartflowManager:
|
||||
def __init__(self):
|
||||
self._subheartflows = {}
|
||||
|
||||
def create_subheartflow(self, observe_chat_id):
|
||||
"""创建一个新的SubHeartflow实例"""
|
||||
if observe_chat_id not in self._subheartflows:
|
||||
subheartflow = SubHeartflow()
|
||||
subheartflow.assign_observe(observe_chat_id)
|
||||
subheartflow.subheartflow_start_working()
|
||||
self._subheartflows[observe_chat_id] = subheartflow
|
||||
return self._subheartflows[observe_chat_id]
|
||||
|
||||
def get_subheartflow(self, observe_chat_id):
|
||||
"""获取指定ID的SubHeartflow实例"""
|
||||
return self._subheartflows.get(observe_chat_id)
|
||||
|
||||
# 创建一个全局的管理器实例
|
||||
subheartflow_manager = SubHeartflowManager()
|
||||
@@ -1,8 +1,11 @@
|
||||
#定义了来自外部世界的信息
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
from src.plugins.models.utils_model import LLM_request
|
||||
from src.plugins.chat.config import global_config
|
||||
import sys
|
||||
from src.common.database import db
|
||||
from .offline_llm import LLMModel
|
||||
|
||||
#存储一段聊天的大致内容
|
||||
class Talking_info:
|
||||
def __init__(self,chat_id):
|
||||
@@ -10,26 +13,72 @@ class Talking_info:
|
||||
self.talking_message = []
|
||||
self.talking_message_str = ""
|
||||
self.talking_summary = ""
|
||||
self.last_message_time = None # 记录最新消息的时间
|
||||
self.last_observe_time = int(datetime.now().timestamp()) #初始化为当前时间
|
||||
self.observe_times = 0
|
||||
self.activate = 360
|
||||
|
||||
self.llm_summary = LLMModel("Pro/Qwen/Qwen2.5-7B-Instruct")
|
||||
self.oberve_interval = 3
|
||||
|
||||
def update_talking_message(self):
|
||||
#从数据库取最近30条该聊天流的消息
|
||||
messages = db.messages.find({"chat_id": self.chat_id}).sort("time", -1).limit(15)
|
||||
self.talking_message = []
|
||||
self.talking_message_str = ""
|
||||
for message in messages:
|
||||
self.talking_message.append(message)
|
||||
self.talking_message_str += message["detailed_plain_text"]
|
||||
self.llm_summary = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=300, request_type="outer_world")
|
||||
|
||||
async def update_talking_summary(self,new_summary=""):
|
||||
async def start_observe(self):
|
||||
while True:
|
||||
if self.activate <= 0:
|
||||
print(f"聊天 {self.chat_id} 活跃度不足,进入休眠状态")
|
||||
await self.waiting_for_activate()
|
||||
print(f"聊天 {self.chat_id} 被重新激活")
|
||||
await self.observe_world()
|
||||
await asyncio.sleep(self.oberve_interval)
|
||||
|
||||
async def waiting_for_activate(self):
|
||||
while True:
|
||||
# 检查从上次观察时间之后的新消息数量
|
||||
new_messages_count = db.messages.count_documents({
|
||||
"chat_id": self.chat_id,
|
||||
"time": {"$gt": self.last_observe_time}
|
||||
})
|
||||
|
||||
if new_messages_count > 10:
|
||||
self.activate = 360*(self.observe_times+1)
|
||||
return
|
||||
|
||||
await asyncio.sleep(10) # 每10秒检查一次
|
||||
|
||||
async def observe_world(self):
|
||||
# 查找新消息
|
||||
new_messages = list(db.messages.find({
|
||||
"chat_id": self.chat_id,
|
||||
"time": {"$gt": self.last_observe_time}
|
||||
}).sort("time", 1)) # 按时间正序排列
|
||||
|
||||
if not new_messages:
|
||||
self.activate += -1
|
||||
return
|
||||
|
||||
# 将新消息添加到talking_message
|
||||
self.talking_message.extend(new_messages)
|
||||
self.translate_message_list_to_str()
|
||||
self.observe_times += 1
|
||||
self.last_observe_time = new_messages[-1]["time"]
|
||||
|
||||
if self.observe_times > 3:
|
||||
await self.update_talking_summary()
|
||||
print(f"更新了聊天总结:{self.talking_summary}")
|
||||
|
||||
async def update_talking_summary(self):
|
||||
#基于已经有的talking_summary,和新的talking_message,生成一个summary
|
||||
prompt = f"聊天内容:{self.talking_message_str}\n\n"
|
||||
prompt += f"以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n\n"
|
||||
prompt += f"总结:"
|
||||
prompt = ""
|
||||
prompt = f"你正在参与一个qq群聊的讨论,这个群之前在聊的内容是:{self.talking_summary}\n"
|
||||
prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{self.talking_message_str}\n"
|
||||
prompt += f"以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n"
|
||||
prompt += f"总结概括:"
|
||||
self.talking_summary, reasoning_content = await self.llm_summary.generate_response_async(prompt)
|
||||
|
||||
def translate_message_list_to_str(self):
|
||||
self.talking_message_str = ""
|
||||
for message in self.talking_message:
|
||||
self.talking_message_str += message["detailed_plain_text"]
|
||||
|
||||
class SheduleInfo:
|
||||
def __init__(self):
|
||||
self.shedule_info = ""
|
||||
@@ -38,72 +87,41 @@ class OuterWorld:
|
||||
def __init__(self):
|
||||
self.talking_info_list = [] #装的一堆talking_info
|
||||
self.shedule_info = "无日程"
|
||||
self.interest_info = "麦麦你好"
|
||||
|
||||
# self.interest_info = "麦麦你好"
|
||||
self.outer_world_info = ""
|
||||
|
||||
self.start_time = int(datetime.now().timestamp())
|
||||
|
||||
self.llm_summary = LLMModel("Qwen/Qwen2.5-32B-Instruct")
|
||||
self.llm_summary = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=600, request_type="outer_world_info")
|
||||
|
||||
async def check_and_add_new_observe(self):
|
||||
# 获取所有聊天流
|
||||
all_streams = db.chat_streams.find({})
|
||||
# 遍历所有聊天流
|
||||
for data in all_streams:
|
||||
stream_id = data.get("stream_id")
|
||||
# 检查是否已存在该聊天流的观察对象
|
||||
existing_info = next((info for info in self.talking_info_list if info.chat_id == stream_id), None)
|
||||
|
||||
# 如果不存在,创建新的Talking_info对象并添加到列表中
|
||||
if existing_info is None:
|
||||
print(f"发现新的聊天流: {stream_id}")
|
||||
new_talking_info = Talking_info(stream_id)
|
||||
self.talking_info_list.append(new_talking_info)
|
||||
# 启动新对象的观察任务
|
||||
asyncio.create_task(new_talking_info.start_observe())
|
||||
|
||||
async def open_eyes(self):
|
||||
while True:
|
||||
print("检查新的聊天流")
|
||||
await self.check_and_add_new_observe()
|
||||
await asyncio.sleep(60)
|
||||
print("更新所有聊天信息")
|
||||
await self.update_all_talking_info()
|
||||
print("更新outer_world_info")
|
||||
await self.update_outer_world_info()
|
||||
|
||||
print(self.outer_world_info)
|
||||
|
||||
def get_world_by_stream_id(self,stream_id):
|
||||
for talking_info in self.talking_info_list:
|
||||
# print(talking_info.talking_message_str)
|
||||
# print(talking_info.talking_summary)
|
||||
pass
|
||||
if talking_info.chat_id == stream_id:
|
||||
return talking_info
|
||||
return None
|
||||
|
||||
async def update_outer_world_info(self):
|
||||
print("总结当前outer_world_info")
|
||||
all_talking_summary = ""
|
||||
for talking_info in self.talking_info_list:
|
||||
all_talking_summary += talking_info.talking_summary
|
||||
|
||||
prompt = f"聊天内容:{all_talking_summary}\n\n"
|
||||
prompt += f"以上是多个群里在进行的聊天,请你对所有聊天内容进行总结,总结内容要包含聊天的大致内容,以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n\n"
|
||||
prompt += f"总结:"
|
||||
self.outer_world_info, reasoning_content = await self.llm_summary.generate_response_async(prompt)
|
||||
|
||||
|
||||
async def update_talking_info(self,chat_id):
|
||||
# 查找现有的talking_info
|
||||
talking_info = next((info for info in self.talking_info_list if info.chat_id == chat_id), None)
|
||||
|
||||
if talking_info is None:
|
||||
print("新聊天流")
|
||||
talking_info = Talking_info(chat_id)
|
||||
talking_info.update_talking_message()
|
||||
await talking_info.update_talking_summary()
|
||||
self.talking_info_list.append(talking_info)
|
||||
else:
|
||||
print("旧聊天流")
|
||||
talking_info.update_talking_message()
|
||||
await talking_info.update_talking_summary()
|
||||
|
||||
async def update_all_talking_info(self):
|
||||
all_streams = db.chat_streams.find({})
|
||||
update_tasks = []
|
||||
|
||||
for data in all_streams:
|
||||
stream_id = data.get("stream_id")
|
||||
# print(stream_id)
|
||||
last_active_time = data.get("last_active_time")
|
||||
|
||||
if last_active_time > self.start_time or 1:
|
||||
update_tasks.append(self.update_talking_info(stream_id))
|
||||
|
||||
# 并行执行所有更新任务
|
||||
if update_tasks:
|
||||
await asyncio.gather(*update_tasks)
|
||||
|
||||
outer_world = OuterWorld()
|
||||
|
||||
|
||||
1
src/think_flow_demo/personality_info.txt
Normal file
1
src/think_flow_demo/personality_info.txt
Normal file
@@ -0,0 +1 @@
|
||||
你叫麦麦,你19岁,是一个大二的心理学和脑科女大学生,你有一头黑色短发,你会刷贴吧
|
||||
Reference in New Issue
Block a user