diff --git a/src/heart_flow/chat_state_info.py b/src/heart_flow/chat_state_info.py index 78fb4e8d1..14fd33403 100644 --- a/src/heart_flow/chat_state_info.py +++ b/src/heart_flow/chat_state_info.py @@ -14,4 +14,4 @@ class ChatStateInfo: self.current_state_time = 120 self.mood_manager = MoodManager() - self.mood = self.mood_manager.get_prompt() \ No newline at end of file + self.mood = self.mood_manager.get_prompt() diff --git a/src/heart_flow/mai_state_manager.py b/src/heart_flow/mai_state_manager.py index 9a39b5fe5..7df55d6f0 100644 --- a/src/heart_flow/mai_state_manager.py +++ b/src/heart_flow/mai_state_manager.py @@ -13,8 +13,8 @@ mai_state_config = LogConfig( logger = get_module_logger("mai_state_manager", config=mai_state_config) -enable_unlimited_hfc_chat = True -# enable_unlimited_hfc_chat = False +# enable_unlimited_hfc_chat = True +enable_unlimited_hfc_chat = False class MaiState(enum.Enum): diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index bb6719136..7a6e009c7 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -72,7 +72,7 @@ class InterestChatting: self.above_threshold = False self.start_hfc_probability = 0.0 - + def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool): self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned) self.last_interaction_time = time.time() @@ -221,9 +221,7 @@ class SubHeartflow: # 聊天状态管理 self.chat_state: ChatStateInfo = ChatStateInfo() # 该sub_heartflow的聊天状态信息 - self.interest_chatting = InterestChatting( - state_change_callback=self.set_chat_state - ) + self.interest_chatting = InterestChatting(state_change_callback=self.set_chat_state) # 活动状态管理 self.last_active_time = time.time() # 最后活跃时间 @@ -238,14 +236,10 @@ class SubHeartflow: # LLM模型配置 self.sub_mind = SubMind( - subheartflow_id=self.subheartflow_id, - chat_state=self.chat_state, - observations=self.observations + subheartflow_id=self.subheartflow_id, chat_state=self.chat_state, observations=self.observations ) - self.log_prefix = chat_manager.get_stream_name(self.subheartflow_id) or self.subheartflow_id - async def add_time_current_state(self, add_time: float): self.current_state_time += add_time @@ -335,9 +329,7 @@ class SubHeartflow: logger.info(f"{log_prefix} 麦麦准备开始专注聊天 (创建新实例)...") try: self.heart_fc_instance = HeartFChatting( - chat_id=self.chat_id, - sub_mind=self.sub_mind, - observations=self.observations + chat_id=self.chat_id, sub_mind=self.sub_mind, observations=self.observations ) if await self.heart_fc_instance._initialize(): await self.heart_fc_instance.start() # 初始化成功后启动循环 @@ -434,7 +426,6 @@ class SubHeartflow: logger.info(f"{self.log_prefix} 子心流后台任务已停止。") - def update_current_mind(self, response): self.sub_mind.update_current_mind(response) @@ -523,6 +514,3 @@ class SubHeartflow: self.chat_state.chat_status = ChatState.ABSENT # 状态重置为不参与 logger.info(f"{self.log_prefix} 子心流关闭完成。") - - - diff --git a/src/heart_flow/sub_mind.py b/src/heart_flow/sub_mind.py index 03a9997fc..4bde47275 100644 --- a/src/heart_flow/sub_mind.py +++ b/src/heart_flow/sub_mind.py @@ -19,7 +19,6 @@ subheartflow_config = LogConfig( logger = get_module_logger("subheartflow", config=subheartflow_config) - def init_prompt(): prompt = "" # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n" @@ -39,32 +38,26 @@ def init_prompt(): prompt += "在输出完想法后,请你思考应该使用什么工具。如果你需要做某件事,来对消息和你的回复进行处理,请使用工具。\n" Prompt(prompt, "sub_heartflow_prompt_before") - - + class SubMind: - def __init__( - self, - subheartflow_id: str, - chat_state: ChatStateInfo, - observations: Observation - ): + def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: Observation): self.subheartflow_id = subheartflow_id - + self.llm_model = LLMRequest( model=global_config.llm_sub_heartflow, temperature=global_config.llm_sub_heartflow["temp"], max_tokens=800, request_type="sub_heart_flow", ) - + self.chat_state = chat_state self.observations = observations - + self.current_mind = "" self.past_mind = [] self.structured_info = {} - + async def do_thinking_before_reply(self): """ 在回复前进行思考,生成内心想法并收集工具调用结果 @@ -151,7 +144,7 @@ class SubMind: # ---------- 5. 执行LLM请求并处理响应 ---------- content = "" # 初始化内容变量 - reasoning_content = "" # 初始化推理内容变量 + _reasoning_content = "" # 初始化推理内容变量 try: # 调用LLM生成响应 @@ -211,8 +204,7 @@ class SubMind: self.update_current_mind(content) return self.current_mind, self.past_mind - - + async def _execute_tool_calls(self, tool_calls, tool_instance): """ 执行一组工具调用并收集结果 @@ -244,11 +236,10 @@ class SubMind: if structured_info: logger.debug(f"工具调用收集到结构化信息: {safe_json_dumps(structured_info, ensure_ascii=False)}") self.structured_info = structured_info - - + def update_current_mind(self, response): self.past_mind.append(self.current_mind) self.current_mind = response - - -init_prompt() \ No newline at end of file + + +init_prompt() diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index 3229317fd..d746059e7 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -67,12 +67,7 @@ class HeartFChatting: 其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。 """ - def __init__( - self, - chat_id: str, - sub_mind: SubMind, - observations: Observation - ): + def __init__(self, chat_id: str, sub_mind: SubMind, observations: Observation): """ HeartFChatting 初始化函数 @@ -438,7 +433,9 @@ class HeartFChatting: llm_error = False # LLM错误标志 try: - prompt = await self._build_planner_prompt(observed_messages_str, current_mind, self.sub_mind.structured_info) + prompt = await self._build_planner_prompt( + observed_messages_str, current_mind, self.sub_mind.structured_info + ) payload = { "model": self.planner_llm.model_name, "messages": [{"role": "user", "content": prompt}],