From c1da7452d68126c5ff06539fc6c026deafdd2c9e Mon Sep 17 00:00:00 2001 From: Windpicker-owo <3431391539@qq.com> Date: Wed, 3 Dec 2025 13:22:07 +0800 Subject: [PATCH] =?UTF-8?q?feat(interest):=20=E5=A2=9E=E5=8A=A0LLM?= =?UTF-8?q?=E5=85=B4=E8=B6=A3=E6=A0=87=E7=AD=BE=E7=94=9F=E6=88=90=E6=97=B6?= =?UTF-8?q?=E7=9A=84=E8=B6=85=E6=97=B6=E8=AE=BE=E7=BD=AE=EF=BC=8C=E7=A1=AE?= =?UTF-8?q?=E4=BF=9D=E5=88=9D=E5=A7=8B=E5=8C=96=E9=98=B6=E6=AE=B5=E4=B8=8D?= =?UTF-8?q?=E5=9B=A0=E8=B6=85=E6=97=B6=E5=A4=B1=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../interest_system/bot_interest_manager.py | 50 +++++++++++++++---- 1 file changed, 41 insertions(+), 9 deletions(-) diff --git a/src/chat/interest_system/bot_interest_manager.py b/src/chat/interest_system/bot_interest_manager.py index 7843588be..52e71ed84 100644 --- a/src/chat/interest_system/bot_interest_manager.py +++ b/src/chat/interest_system/bot_interest_manager.py @@ -246,7 +246,11 @@ class BotInterestManager: raise async def _call_llm_for_interest_generation(self, prompt: str) -> str | None: - """调用LLM生成兴趣标签""" + """调用LLM生成兴趣标签 + + 注意:此方法会临时增加 API 超时时间,以确保初始化阶段的人设标签生成 + 不会因用户配置的较短超时而失败。 + """ try: logger.debug("配置LLM客户端...") @@ -267,14 +271,42 @@ class BotInterestManager: # 使用replyer模型配置 replyer_config = model_config.model_task_config.replyer - # 调用LLM API - success, response, reasoning_content, model_name = await llm_api.generate_with_model( - prompt=full_prompt, - model_config=replyer_config, - request_type="interest_generation", - temperature=0.7, - max_tokens=2000, - ) + # 🔧 临时增加超时时间,避免初始化阶段因超时失败 + # 人设标签生成需要较长时间(15-25个标签的JSON),使用更长的超时 + INIT_TIMEOUT = 180 # 初始化阶段使用 180 秒超时 + original_timeouts: dict[str, int] = {} + + try: + # 保存并修改所有相关模型的 API provider 超时设置 + for model_name in replyer_config.model_list: + try: + model_info = model_config.get_model_info(model_name) + provider = model_config.get_provider(model_info.api_provider) + original_timeouts[provider.name] = provider.timeout + if provider.timeout < INIT_TIMEOUT: + logger.debug(f"⏱️ 临时增加 API provider '{provider.name}' 超时: {provider.timeout}s → {INIT_TIMEOUT}s") + provider.timeout = INIT_TIMEOUT + except Exception as e: + logger.warning(f"⚠️ 无法修改模型 '{model_name}' 的超时设置: {e}") + + # 调用LLM API + success, response, reasoning_content, model_name = await llm_api.generate_with_model( + prompt=full_prompt, + model_config=replyer_config, + request_type="interest_generation", + temperature=0.7, + max_tokens=2000, + ) + finally: + # 🔧 恢复原始超时设置 + for provider_name, original_timeout in original_timeouts.items(): + try: + provider = model_config.get_provider(provider_name) + if provider.timeout != original_timeout: + logger.debug(f"⏱️ 恢复 API provider '{provider_name}' 超时: {provider.timeout}s → {original_timeout}s") + provider.timeout = original_timeout + except Exception as e: + logger.warning(f"⚠️ 无法恢复 provider '{provider_name}' 的超时设置: {e}") if success and response: # 直接返回原始响应,后续使用统一的 JSON 解析工具