From 5dea0c3a16418d7ff660a02908e4b997093589f7 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 24 Mar 2025 23:22:03 +0800 Subject: [PATCH] =?UTF-8?q?feat=20=E5=B0=9D=E8=AF=95=E5=B0=86=E5=9B=9E?= =?UTF-8?q?=E5=A4=8D=E6=84=8F=E6=84=BF=E5=8A=A0=E5=85=A5=E6=80=9D=E7=BB=B4?= =?UTF-8?q?=E6=B5=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/bot.py | 5 +++- src/plugins/chat/config.py | 2 ++ src/think_flow_demo/current_mind.py | 39 ++++++++++++++++++++++++----- src/think_flow_demo/heartflow.py | 3 ++- src/think_flow_demo/outer_world.py | 6 ++--- 5 files changed, 44 insertions(+), 11 deletions(-) diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index 77481f039..57c387c09 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -145,7 +145,10 @@ class ChatBot: interested_rate=interested_rate, sender_id=str(message.message_info.user_info.user_id), ) - current_willing = willing_manager.get_willing(chat_stream=chat) + current_willing_old = willing_manager.get_willing(chat_stream=chat) + current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing-5)/4 + print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}") + current_willing = (current_willing_old + current_willing_new) / 2 logger.info( f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]" diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py index 151aa5724..09ebe3520 100644 --- a/src/plugins/chat/config.py +++ b/src/plugins/chat/config.py @@ -59,6 +59,7 @@ class BotConfig: llm_topic_judge: Dict[str, str] = field(default_factory=lambda: {}) llm_summary_by_topic: Dict[str, str] = field(default_factory=lambda: {}) llm_emotion_judge: Dict[str, str] = field(default_factory=lambda: {}) + llm_outer_world: Dict[str, str] = field(default_factory=lambda: {}) embedding: Dict[str, str] = field(default_factory=lambda: {}) vlm: Dict[str, str] = field(default_factory=lambda: {}) moderation: Dict[str, str] = field(default_factory=lambda: {}) @@ -237,6 +238,7 @@ class BotConfig: "llm_topic_judge", "llm_summary_by_topic", "llm_emotion_judge", + "llm_outer_world", "vlm", "embedding", "moderation", diff --git a/src/think_flow_demo/current_mind.py b/src/think_flow_demo/current_mind.py index b45428abb..fd4ca6160 100644 --- a/src/think_flow_demo/current_mind.py +++ b/src/think_flow_demo/current_mind.py @@ -3,7 +3,7 @@ import asyncio from src.plugins.moods.moods import MoodManager from src.plugins.models.utils_model import LLM_request from src.plugins.chat.config import global_config - +import re class CuttentState: def __init__(self): self.willing = 0 @@ -38,7 +38,9 @@ class SubHeartflow: async def subheartflow_start_working(self): while True: await self.do_a_thinking() - await asyncio.sleep(30) + print("麦麦闹情绪了") + await self.judge_willing() + await asyncio.sleep(20) async def do_a_thinking(self): print("麦麦小脑袋转起来了") @@ -67,7 +69,7 @@ class SubHeartflow: print(f"麦麦的脑内状态:{self.current_mind}") async def do_after_reply(self,reply_content,chat_talking_prompt): - print("麦麦脑袋转起来了") + # print("麦麦脑袋转起来了") self.current_state.update_current_state_info() personality_info = open("src/think_flow_demo/personality_info.txt", "r", encoding="utf-8").read() @@ -93,9 +95,34 @@ class SubHeartflow: self.update_current_mind(reponse) self.current_mind = reponse - print(f"麦麦的脑内状态:{self.current_mind}") - - + print(f"{self.observe_chat_id}麦麦的脑内状态:{self.current_mind}") + + async def judge_willing(self): + # print("麦麦闹情绪了1") + personality_info = open("src/think_flow_demo/personality_info.txt", "r", encoding="utf-8").read() + current_thinking_info = self.current_mind + mood_info = self.current_state.mood + # print("麦麦闹情绪了2") + prompt = f"" + prompt += f"{personality_info}\n" + prompt += f"现在你正在上网,和qq群里的网友们聊天" + prompt += f"你现在的想法是{current_thinking_info}。" + prompt += f"你现在{mood_info}。" + prompt += f"现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。" + prompt += f"请你用<>包裹你的回复意愿,例如输出<1>表示不想回复,输出<10>表示非常想回复。<5>表示想回复,但是需要思考一下。" + + response, reasoning_content = await self.llm_model.generate_response_async(prompt) + # 解析willing值 + willing_match = re.search(r'<(\d+)>', response) + if willing_match: + self.current_state.willing = int(willing_match.group(1)) + else: + self.current_state.willing = 0 + + print(f"{self.observe_chat_id}麦麦的回复意愿:{self.current_state.willing}") + + return self.current_state.willing + def build_outer_world_info(self): outer_world_info = outer_world.outer_world_info return outer_world_info diff --git a/src/think_flow_demo/heartflow.py b/src/think_flow_demo/heartflow.py index d906ae3ff..696641cb7 100644 --- a/src/think_flow_demo/heartflow.py +++ b/src/think_flow_demo/heartflow.py @@ -95,7 +95,8 @@ class Heartflow: if observe_chat_id not in self._subheartflows: subheartflow = SubHeartflow() subheartflow.assign_observe(observe_chat_id) - subheartflow.subheartflow_start_working() + # 创建异步任务 + asyncio.create_task(subheartflow.subheartflow_start_working()) self._subheartflows[observe_chat_id] = subheartflow return self._subheartflows[observe_chat_id] diff --git a/src/think_flow_demo/outer_world.py b/src/think_flow_demo/outer_world.py index e95fe516b..58eb4bbed 100644 --- a/src/think_flow_demo/outer_world.py +++ b/src/think_flow_demo/outer_world.py @@ -17,9 +17,9 @@ class Talking_info: self.observe_times = 0 self.activate = 360 - self.oberve_interval = 5 + self.oberve_interval = 3 - self.llm_summary = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=300, request_type="outer_world") + self.llm_summary = LLM_request(model=global_config.llm_outer_world, temperature=0.7, max_tokens=300, request_type="outer_world") async def start_observe(self): while True: @@ -42,7 +42,7 @@ class Talking_info: self.activate = 360*(self.observe_times+1) return - await asyncio.sleep(10) # 每10秒检查一次 + await asyncio.sleep(8) # 每10秒检查一次 async def observe_world(self): # 查找新消息,限制最多20条