From c77f468dfc54140e80d578baf8a8600e1c6f79a9 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 28 Apr 2025 01:31:59 +0800 Subject: [PATCH] =?UTF-8?q?fix:V3=E5=93=88=E6=B0=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/mai_state_manager.py | 4 ++-- src/plugins/heartFC_chat/heartFC_chat.py | 9 +++++++++ src/plugins/utils/json_utils.py | 3 +++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/heart_flow/mai_state_manager.py b/src/heart_flow/mai_state_manager.py index 0888ae1fd..1743df167 100644 --- a/src/heart_flow/mai_state_manager.py +++ b/src/heart_flow/mai_state_manager.py @@ -8,8 +8,8 @@ from src.plugins.moods.moods import MoodManager logger = get_logger("mai_state") -# enable_unlimited_hfc_chat = True -enable_unlimited_hfc_chat = False +enable_unlimited_hfc_chat = True +# enable_unlimited_hfc_chat = False class MaiState(enum.Enum): diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index 3ae3a1c7c..a6ed990a6 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -762,9 +762,15 @@ class HeartFChatting: # 执行LLM请求 try: + print("prompt") + print("prompt") + print("prompt") + print(payload) + print(prompt) response = await self.planner_llm._execute_request( endpoint="/chat/completions", payload=payload, prompt=prompt ) + print(response) except Exception as req_e: logger.error(f"{self.log_prefix}[Planner] LLM请求执行失败: {req_e}") return { @@ -779,6 +785,9 @@ class HeartFChatting: # 处理LLM响应 with Timer("使用工具", cycle_timers): # 使用辅助函数处理工具调用响应 + print(1111122222222222) + print(response) + success, arguments, error_msg = process_llm_tool_response( response, expected_tool_name="decide_reply_action", log_prefix=f"{self.log_prefix}[Planner] " ) diff --git a/src/plugins/utils/json_utils.py b/src/plugins/utils/json_utils.py index bf4b08398..44e555dc7 100644 --- a/src/plugins/utils/json_utils.py +++ b/src/plugins/utils/json_utils.py @@ -213,6 +213,9 @@ def process_llm_tool_calls(response: List[Any], log_prefix: str = "") -> Tuple[b 元组 (成功标志, 工具调用列表, 错误消息) """ # 确保响应格式正确 + print(response) + print(11111111111111111) + if len(response) != 3: return False, [], f"LLM响应元素数量不正确: 预期3个元素,实际{len(response)}个"