From 1e82d42ea0492e23f5d8decd177a80ad7556dd1f Mon Sep 17 00:00:00 2001 From: minecraft1024a Date: Wed, 24 Sep 2025 21:14:39 +0800 Subject: [PATCH] =?UTF-8?q?Revert=20"refactor(llm):=20=E7=B2=BE=E7=AE=80?= =?UTF-8?q?=E6=95=85=E9=9A=9C=E8=BD=AC=E7=A7=BB=E6=89=A7=E8=A1=8C=E5=99=A8?= =?UTF-8?q?=E7=9A=84=E8=B0=83=E7=94=A8=E9=80=BB=E8=BE=91"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 7dfc005a3e0efa7f8b4de7ccf7250c16f81a50da. --- src/llm_models/utils_model.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 45488fa39..b750bbbb5 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -209,7 +209,12 @@ class LLMRequest: ) return content, (reasoning_content, model_info.name, tool_calls) - return await self._execute_with_failover(request_callable=request_logic, raise_on_failure=True) + result = await self._execute_with_failover(request_callable=request_logic, raise_on_failure=True) + if result: + return result + + # 这段代码理论上不可达,因为 raise_on_failure=True 会抛出异常 + raise RuntimeError("图片响应生成失败,所有模型均尝试失败。") async def generate_response_for_voice(self, voice_base64: str) -> Optional[str]: """ @@ -232,7 +237,8 @@ class LLMRequest: return response.content or None # 对于语音识别,如果所有模型都失败,我们可能不希望程序崩溃,而是返回None - return await self._execute_with_failover(request_callable=request_logic, raise_on_failure=False) + result = await self._execute_with_failover(request_callable=request_logic, raise_on_failure=False) + return result async def generate_response_async( self, @@ -501,7 +507,12 @@ class LLMRequest: return embedding, model_info.name - return await self._execute_with_failover(request_callable=request_logic, raise_on_failure=True) + result = await self._execute_with_failover(request_callable=request_logic, raise_on_failure=True) + if result: + return result + + # 这段代码理论上不可达,因为 raise_on_failure=True 会抛出异常 + raise RuntimeError("获取 embedding 失败,所有模型均尝试失败。") def _model_scheduler( self, failed_models: set | None = None @@ -663,7 +674,7 @@ class LLMRequest: cannot_retry_msg=f"任务-'{task_name}' 模型-'{model_name}': 连接异常,超过最大重试次数,请检查网络连接状态或URL是否正确", ) elif isinstance(e, ReqAbortException): - logger.warning(f"任务-'{task_name}' 模型-'{model_name}': 请求被中断,详细信息-{e}") + logger.warning(f"任务-'{task_name}' 模型-'{model_name}': 请求被中断,详细信息-{str(e.message)}") return -1, None # 不再重试请求该模型 elif isinstance(e, RespNotOkException): return self._handle_resp_not_ok( @@ -677,7 +688,7 @@ class LLMRequest: ) elif isinstance(e, RespParseException): # 响应解析错误 - logger.error(f"任务-'{task_name}' 模型-'{model_name}': 响应解析错误,错误信息-{e}") + logger.error(f"任务-'{task_name}' 模型-'{model_name}': 响应解析错误,错误信息-{e.message}") logger.debug(f"附加内容: {str(e.ext_info)}") return -1, None # 不再重试请求该模型 else: