From da760bb2009667c947b102b787b1927636184c72 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 1 Apr 2025 23:04:38 +0800 Subject: [PATCH] =?UTF-8?q?fix=EF=BC=9A=E6=9B=B4=E6=96=B0=E7=BB=9F?= =?UTF-8?q?=E8=AE=A1=E4=BF=A1=E6=81=AF=E7=BD=A2=E4=BA=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat_module/reasoning_chat/reasoning_generator.py | 4 ++-- src/plugins/chat_module/think_flow_chat/think_flow_chat.py | 2 +- .../chat_module/think_flow_chat/think_flow_generator.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py index 787b8b229..354ddaefc 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py @@ -26,10 +26,10 @@ class ResponseGenerator: model=global_config.llm_reasoning, temperature=0.7, max_tokens=3000, - request_type="response", + request_type="response_reasoning", ) self.model_normal = LLM_request( - model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response" + model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_reasoning" ) self.model_sum = LLM_request( diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py index cd9452438..e644e1eb9 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py @@ -148,7 +148,7 @@ class ThinkFlowChat: if groupinfo.group_id not in global_config.talk_allowed_groups: return - logger.info("使用思维流聊天模式") + # logger.info("使用思维流聊天模式") # 创建聊天流 chat = await chat_manager.get_or_create_stream( diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py index d9a5c4ce0..18769983f 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py @@ -23,7 +23,7 @@ logger = get_module_logger("llm_generator", config=llm_config) class ResponseGenerator: def __init__(self): self.model_normal = LLM_request( - model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response" + model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_heartflow" ) self.model_sum = LLM_request(