From 42b1b772efc7db9a47b20970cd98f401c7677716 Mon Sep 17 00:00:00 2001 From: tcmofashi Date: Mon, 31 Mar 2025 09:09:30 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E8=BE=93=E5=87=BA=E9=97=AE=E9=A2=98=EF=BC=8C=E4=BB=A5=E5=8F=8A?= =?UTF-8?q?=E5=90=84=E7=A7=8D=E7=A5=9E=E7=A7=98=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/heartflow.py | 4 +++- src/plugins/chat/bot.py | 4 ++-- src/plugins/chat/llm_generator.py | 3 +-- src/plugins/message/api.py | 9 +++++++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py index ffc7ca4fc..8637d2071 100644 --- a/src/heart_flow/heartflow.py +++ b/src/heart_flow/heartflow.py @@ -50,7 +50,9 @@ class Heartflow: # 检查所有子心流 for subheartflow_id, subheartflow in self._subheartflows.items(): - if current_time - subheartflow.last_active_time > global_config.sub_heart_flow_stop_time: # 10分钟 = 600秒 + if ( + current_time - subheartflow.last_active_time > global_config.sub_heart_flow_stop_time + ): # 10分钟 = 600秒 inactive_subheartflows.append(subheartflow_id) logger.info(f"发现不活跃的子心流: {subheartflow_id}") diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index 0a074c644..e01a928d5 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -162,7 +162,7 @@ class ChatBot: logger.debug(f"8处理表情包时间: {timer2 - timer1}秒") timer1 = time.time() - await self._update_using_response(message, chat, response_set) + await self._update_using_response(message, response_set) timer2 = time.time() logger.info(f"6更新htfl时间: {timer2 - timer1}秒") @@ -213,7 +213,7 @@ class ChatBot: stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True ) - heartflow.get_subheartflow(stream_id).do_after_reply(response_set, chat_talking_prompt) + await heartflow.get_subheartflow(stream_id).do_after_reply(response_set, chat_talking_prompt) async def _send_response_messages(self, message, chat, response_set, thinking_id): container = message_manager.get_container(chat.stream_id) diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 1023cb52d..f551dcca7 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -26,8 +26,7 @@ class ResponseGenerator: self.model_reasoning = LLM_request( model=global_config.llm_reasoning, temperature=0.7, - max_tokens=1000, - stream=True, + max_tokens=3000, request_type="response", ) self.model_normal = LLM_request( diff --git a/src/plugins/message/api.py b/src/plugins/message/api.py index db609823f..0478aab16 100644 --- a/src/plugins/message/api.py +++ b/src/plugins/message/api.py @@ -1,9 +1,13 @@ from fastapi import FastAPI, HTTPException from typing import Dict, Any, Callable, List +from src.common.logger import get_module_logger import aiohttp import asyncio import uvicorn import os +import traceback + +logger = get_module_logger("api") class BaseMessageAPI: @@ -50,8 +54,9 @@ class BaseMessageAPI: for handler in self.message_handlers: try: await handler(self.cache[0]) - except Exception: - pass + except Exception as e: + logger.error(str(e)) + logger.error(traceback.format_exc()) self.cache.pop(0) if len(self.cache) > 0: await asyncio.sleep(0.1 / len(self.cache))