diff --git a/src/plugins/chat/__init__.py b/src/plugins/chat/__init__.py index 713f1d375..d8a41fe87 100644 --- a/src/plugins/chat/__init__.py +++ b/src/plugins/chat/__init__.py @@ -51,7 +51,10 @@ async def start_think_flow(): try: outer_world_task = asyncio.create_task(outer_world.open_eyes()) logger.success("大脑和外部世界启动成功") - return outer_world_task + # 启动心流系统 + heartflow_task = asyncio.create_task(subheartflow_manager.heartflow_start_working()) + logger.success("心流系统启动成功") + return outer_world_task, heartflow_task except Exception as e: logger.error(f"启动大脑和外部世界失败: {e}") raise @@ -70,11 +73,9 @@ async def start_background_tasks(): logger.success("情绪管理器启动成功") # 启动大脑和外部世界 - await start_think_flow() - - # 启动心流系统 - heartflow_task = asyncio.create_task(subheartflow_manager.heartflow_start_working()) - logger.success("心流系统启动成功") + if global_config.enable_think_flow: + logger.success("启动测试功能:心流系统") + await start_think_flow() # 只启动表情包管理任务 asyncio.create_task(emoji_manager.start_periodic_check()) diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index a9e76648a..e89375217 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -91,9 +91,11 @@ class ChatBot: ) message.update_chat_stream(chat) + #创建 心流 观察 - await outer_world.check_and_add_new_observe() - subheartflow_manager.create_subheartflow(chat.stream_id) + if global_config.enable_think_flow: + await outer_world.check_and_add_new_observe() + subheartflow_manager.create_subheartflow(chat.stream_id) await relationship_manager.update_relationship( @@ -142,10 +144,14 @@ class ChatBot: interested_rate=interested_rate, sender_id=str(message.message_info.user_info.user_id), ) - current_willing_old = willing_manager.get_willing(chat_stream=chat) - current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing-5)/4 - print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}") - current_willing = (current_willing_old + current_willing_new) / 2 + + if global_config.enable_think_flow: + current_willing_old = willing_manager.get_willing(chat_stream=chat) + current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing-5)/4 + print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}") + current_willing = (current_willing_old + current_willing_new) / 2 + else: + current_willing = willing_manager.get_willing(chat_stream=chat) logger.info( f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]" @@ -185,13 +191,16 @@ class ChatBot: # print(f"response: {response}") if response: stream_id = message.chat_stream.stream_id - chat_talking_prompt = "" - if stream_id: - chat_talking_prompt = get_recent_group_detailed_plain_text( - stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True - ) - - await subheartflow_manager.get_subheartflow(stream_id).do_after_reply(response,chat_talking_prompt) + + if global_config.enable_think_flow: + chat_talking_prompt = "" + if stream_id: + chat_talking_prompt = get_recent_group_detailed_plain_text( + stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True + ) + await subheartflow_manager.get_subheartflow(stream_id).do_after_reply(response,chat_talking_prompt) + + # print(f"有response: {response}") container = message_manager.get_container(chat.stream_id) thinking_message = None diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py index 54303b959..503ba0dcb 100644 --- a/src/plugins/chat/config.py +++ b/src/plugins/chat/config.py @@ -130,6 +130,8 @@ class BotConfig: # 实验性 llm_outer_world: Dict[str, str] = field(default_factory=lambda: {}) + llm_sub_heartflow: Dict[str, str] = field(default_factory=lambda: {}) + llm_heartflow: Dict[str, str] = field(default_factory=lambda: {}) @staticmethod @@ -265,6 +267,8 @@ class BotConfig: "embedding", "moderation", "llm_outer_world", + "llm_sub_heartflow", + "llm_heartflow", ] for item in config_list: diff --git a/src/plugins/chat/prompt_builder.py b/src/plugins/chat/prompt_builder.py index b03e6b044..e6bdaf979 100644 --- a/src/plugins/chat/prompt_builder.py +++ b/src/plugins/chat/prompt_builder.py @@ -37,7 +37,10 @@ class PromptBuilder: ) # outer_world_info = outer_world.outer_world_info - current_mind_info = subheartflow_manager.get_subheartflow(stream_id).current_mind + if global_config.enable_think_flow: + current_mind_info = subheartflow_manager.get_subheartflow(stream_id).current_mind + else: + current_mind_info = "" relation_prompt = "" for person in who_chat_in_group: diff --git a/src/plugins/moods/moods.py b/src/plugins/moods/moods.py index b09e58168..3e977d024 100644 --- a/src/plugins/moods/moods.py +++ b/src/plugins/moods/moods.py @@ -122,7 +122,7 @@ class MoodManager: time_diff = current_time - self.last_update # Valence 向中性(0)回归 - valence_target = -0.2 + valence_target = 0 self.current_mood.valence = valence_target + (self.current_mood.valence - valence_target) * math.exp( -self.decay_rate_valence * time_diff ) diff --git a/src/think_flow_demo/current_mind.py b/src/think_flow_demo/current_mind.py index 09634cf2d..2446d66d6 100644 --- a/src/think_flow_demo/current_mind.py +++ b/src/think_flow_demo/current_mind.py @@ -21,7 +21,7 @@ class SubHeartflow: self.current_mind = "" self.past_mind = [] self.current_state : CuttentState = CuttentState() - self.llm_model = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=600, request_type="sub_heart_flow") + self.llm_model = LLM_request(model=global_config.llm_sub_heartflow, temperature=0.7, max_tokens=600, request_type="sub_heart_flow") self.outer_world = None self.main_heartflow_info = "" diff --git a/src/think_flow_demo/heartflow.py b/src/think_flow_demo/heartflow.py index 696641cb7..e455e1977 100644 --- a/src/think_flow_demo/heartflow.py +++ b/src/think_flow_demo/heartflow.py @@ -21,7 +21,7 @@ class Heartflow: self.current_mind = "你什么也没想" self.past_mind = [] self.current_state : CuttentState = CuttentState() - self.llm_model = LLM_request(model=global_config.llm_topic_judge, temperature=0.6, max_tokens=1000, request_type="heart_flow") + self.llm_model = LLM_request(model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow") self._subheartflows = {} self.active_subheartflows_nums = 0 diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index c8ce896ec..2359b678d 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -132,7 +132,7 @@ enable = true [experimental] enable_friend_chat = false # 是否启用好友聊天 -enable_thinkflow = false # 是否启用思维流 +enable_think_flow = false # 是否启用思维流 #下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env.prod自定义的宏,使用自定义模型则选择定位相似的模型自己填写 #推理模型 @@ -202,4 +202,18 @@ provider = "SILICONFLOW" name = "Qwen/Qwen2.5-7B-Instruct" provider = "SILICONFLOW" pri_in = 0 -pri_out = 0 \ No newline at end of file +pri_out = 0 + +[model.llm_sub_heartflow] #心流:建议使用qwen2.5 7b +# name = "Pro/Qwen/Qwen2.5-7B-Instruct" +name = "Qwen/Qwen2.5-32B-Instruct" +provider = "SILICONFLOW" +pri_in = 1.26 +pri_out = 1.26 + +[model.llm_heartflow] #心流:建议使用qwen2.5 32b +# name = "Pro/Qwen/Qwen2.5-7B-Instruct" +name = "Qwen/Qwen2.5-32B-Instruct" +provider = "SILICONFLOW" +pri_in = 1.26 +pri_out = 1.26 \ No newline at end of file