From 30d470d9f517545ee01e5738a504ac6554fbd67a Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Thu, 3 Apr 2025 11:07:10 +0800
Subject: [PATCH] =?UTF-8?q?fix:=E5=B0=9D=E8=AF=95=E4=BF=AE=E5=A4=8D?=
=?UTF-8?q?=E7=82=B8=E9=A3=9E=E9=97=AE=E9=A2=98?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
run.sh => scripts/run.sh | 0
src/plugins/models/utils_model.py | 323 +++++++++++++++++-------------
2 files changed, 181 insertions(+), 142 deletions(-)
rename run.sh => scripts/run.sh (100%)
diff --git a/run.sh b/scripts/run.sh
similarity index 100%
rename from run.sh
rename to scripts/run.sh
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index 263e11618..260c5f5a6 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -198,156 +198,195 @@ class LLM_request:
headers["Accept"] = "text/event-stream"
async with aiohttp.ClientSession() as session:
- async with session.post(api_url, headers=headers, json=payload) as response:
- # 处理需要重试的状态码
- if response.status in policy["retry_codes"]:
- wait_time = policy["base_wait"] * (2**retry)
- logger.warning(f"错误码: {response.status}, 等待 {wait_time}秒后重试")
- if response.status == 413:
- logger.warning("请求体过大,尝试压缩...")
- image_base64 = compress_base64_image_by_scale(image_base64)
- payload = await self._build_payload(prompt, image_base64, image_format)
- elif response.status in [500, 503]:
- logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
- raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
- else:
- logger.warning(f"请求限制(429),等待{wait_time}秒后重试...")
-
- await asyncio.sleep(wait_time)
- continue
- elif response.status in policy["abort_codes"]:
- logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
- # 尝试获取并记录服务器返回的详细错误信息
- try:
- error_json = await response.json()
- if error_json and isinstance(error_json, list) and len(error_json) > 0:
- for error_item in error_json:
- if "error" in error_item and isinstance(error_item["error"], dict):
- error_obj = error_item["error"]
- error_code = error_obj.get("code")
- error_message = error_obj.get("message")
- error_status = error_obj.get("status")
- logger.error(
- f"服务器错误详情: 代码={error_code}, 状态={error_status}, "
- f"消息={error_message}"
- )
- elif isinstance(error_json, dict) and "error" in error_json:
- # 处理单个错误对象的情况
- error_obj = error_json.get("error", {})
- error_code = error_obj.get("code")
- error_message = error_obj.get("message")
- error_status = error_obj.get("status")
- logger.error(
- f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
- )
+ try:
+ async with session.post(api_url, headers=headers, json=payload) as response:
+ # 处理需要重试的状态码
+ if response.status in policy["retry_codes"]:
+ wait_time = policy["base_wait"] * (2**retry)
+ logger.warning(f"错误码: {response.status}, 等待 {wait_time}秒后重试")
+ if response.status == 413:
+ logger.warning("请求体过大,尝试压缩...")
+ image_base64 = compress_base64_image_by_scale(image_base64)
+ payload = await self._build_payload(prompt, image_base64, image_format)
+ elif response.status in [500, 503]:
+ logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
+ raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
else:
- # 记录原始错误响应内容
- logger.error(f"服务器错误响应: {error_json}")
- except Exception as e:
- logger.warning(f"无法解析服务器错误响应: {str(e)}")
+ logger.warning(f"请求限制(429),等待{wait_time}秒后重试...")
- if response.status == 403:
- # 只针对硅基流动的V3和R1进行降级处理
- if (
- self.model_name.startswith("Pro/deepseek-ai")
- and self.base_url == "https://api.siliconflow.cn/v1/"
- ):
- old_model_name = self.model_name
- self.model_name = self.model_name[4:] # 移除"Pro/"前缀
- logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
-
- # 对全局配置进行更新
- if global_config.llm_normal.get("name") == old_model_name:
- global_config.llm_normal["name"] = self.model_name
- logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
-
- if global_config.llm_reasoning.get("name") == old_model_name:
- global_config.llm_reasoning["name"] = self.model_name
- logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
-
- # 更新payload中的模型名
- if payload and "model" in payload:
- payload["model"] = self.model_name
-
- # 重新尝试请求
- retry -= 1 # 不计入重试次数
- continue
-
- raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
-
- response.raise_for_status()
- reasoning_content = ""
-
- # 将流式输出转化为非流式输出
- if stream_mode:
- flag_delta_content_finished = False
- accumulated_content = ""
- usage = None # 初始化usage变量,避免未定义错误
-
- async for line_bytes in response.content:
+ await asyncio.sleep(wait_time)
+ continue
+ elif response.status in policy["abort_codes"]:
+ logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
+ # 尝试获取并记录服务器返回的详细错误信息
try:
- line = line_bytes.decode("utf-8").strip()
- if not line:
+ error_json = await response.json()
+ if error_json and isinstance(error_json, list) and len(error_json) > 0:
+ for error_item in error_json:
+ if "error" in error_item and isinstance(error_item["error"], dict):
+ error_obj = error_item["error"]
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(
+ f"服务器错误详情: 代码={error_code}, 状态={error_status}, "
+ f"消息={error_message}"
+ )
+ elif isinstance(error_json, dict) and "error" in error_json:
+ # 处理单个错误对象的情况
+ error_obj = error_json.get("error", {})
+ error_code = error_obj.get("code")
+ error_message = error_obj.get("message")
+ error_status = error_obj.get("status")
+ logger.error(
+ f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
+ )
+ else:
+ # 记录原始错误响应内容
+ logger.error(f"服务器错误响应: {error_json}")
+ except Exception as e:
+ logger.warning(f"无法解析服务器错误响应: {str(e)}")
+
+ if response.status == 403:
+ # 只针对硅基流动的V3和R1进行降级处理
+ if (
+ self.model_name.startswith("Pro/deepseek-ai")
+ and self.base_url == "https://api.siliconflow.cn/v1/"
+ ):
+ old_model_name = self.model_name
+ self.model_name = self.model_name[4:] # 移除"Pro/"前缀
+ logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
+
+ # 对全局配置进行更新
+ if global_config.llm_normal.get("name") == old_model_name:
+ global_config.llm_normal["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
+
+ if global_config.llm_reasoning.get("name") == old_model_name:
+ global_config.llm_reasoning["name"] = self.model_name
+ logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
+
+ # 更新payload中的模型名
+ if payload and "model" in payload:
+ payload["model"] = self.model_name
+
+ # 重新尝试请求
+ retry -= 1 # 不计入重试次数
continue
- if line.startswith("data:"):
- data_str = line[5:].strip()
- if data_str == "[DONE]":
- break
- try:
- chunk = json.loads(data_str)
- if flag_delta_content_finished:
- chunk_usage = chunk.get("usage", None)
- if chunk_usage:
- usage = chunk_usage # 获取token用量
- else:
- delta = chunk["choices"][0]["delta"]
- delta_content = delta.get("content")
- if delta_content is None:
- delta_content = ""
- accumulated_content += delta_content
- # 检测流式输出文本是否结束
- finish_reason = chunk["choices"][0].get("finish_reason")
- if delta.get("reasoning_content", None):
- reasoning_content += delta["reasoning_content"]
- if finish_reason == "stop":
+
+ raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
+
+ response.raise_for_status()
+ reasoning_content = ""
+
+ # 将流式输出转化为非流式输出
+ if stream_mode:
+ flag_delta_content_finished = False
+ accumulated_content = ""
+ usage = None # 初始化usage变量,避免未定义错误
+
+ async for line_bytes in response.content:
+ try:
+ line = line_bytes.decode("utf-8").strip()
+ if not line:
+ continue
+ if line.startswith("data:"):
+ data_str = line[5:].strip()
+ if data_str == "[DONE]":
+ break
+ try:
+ chunk = json.loads(data_str)
+ if flag_delta_content_finished:
chunk_usage = chunk.get("usage", None)
if chunk_usage:
- usage = chunk_usage
- break
- # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
- flag_delta_content_finished = True
+ usage = chunk_usage # 获取token用量
+ else:
+ delta = chunk["choices"][0]["delta"]
+ delta_content = delta.get("content")
+ if delta_content is None:
+ delta_content = ""
+ accumulated_content += delta_content
+ # 检测流式输出文本是否结束
+ finish_reason = chunk["choices"][0].get("finish_reason")
+ if delta.get("reasoning_content", None):
+ reasoning_content += delta["reasoning_content"]
+ if finish_reason == "stop":
+ chunk_usage = chunk.get("usage", None)
+ if chunk_usage:
+ usage = chunk_usage
+ break
+ # 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
+ flag_delta_content_finished = True
- except Exception as e:
- logger.exception(f"解析流式输出错误: {str(e)}")
- except GeneratorExit:
- logger.warning("流式输出被中断")
- break
- except Exception as e:
- logger.error(f"处理流式输出时发生错误: {str(e)}")
- break
- content = accumulated_content
- think_match = re.search(r"(.*?)", content, re.DOTALL)
- if think_match:
- reasoning_content = think_match.group(1).strip()
- content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
- # 构造一个伪result以便调用自定义响应处理器或默认处理器
- result = {
- "choices": [{"message": {"content": content, "reasoning_content": reasoning_content}}],
- "usage": usage,
- }
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
+ except Exception as e:
+ logger.exception(f"解析流式输出错误: {str(e)}")
+ except GeneratorExit:
+ logger.warning("流式输出被中断,正在清理资源...")
+ # 确保资源被正确清理
+ await response.release()
+ # 返回已经累积的内容
+ result = {
+ "choices": [{"message": {"content": accumulated_content, "reasoning_content": reasoning_content}}],
+ "usage": usage,
+ }
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+ except Exception as e:
+ logger.error(f"处理流式输出时发生错误: {str(e)}")
+ # 确保在发生错误时也能正确清理资源
+ try:
+ await response.release()
+ except Exception as cleanup_error:
+ logger.error(f"清理资源时发生错误: {cleanup_error}")
+ # 返回已经累积的内容
+ result = {
+ "choices": [{"message": {"content": accumulated_content, "reasoning_content": reasoning_content}}],
+ "usage": usage,
+ }
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+ content = accumulated_content
+ think_match = re.search(r"(.*?)", content, re.DOTALL)
+ if think_match:
+ reasoning_content = think_match.group(1).strip()
+ content = re.sub(r".*?", "", content, flags=re.DOTALL).strip()
+ # 构造一个伪result以便调用自定义响应处理器或默认处理器
+ result = {
+ "choices": [{"message": {"content": content, "reasoning_content": reasoning_content}}],
+ "usage": usage,
+ }
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+ else:
+ result = await response.json()
+ # 使用自定义处理器或默认处理
+ return (
+ response_handler(result)
+ if response_handler
+ else self._default_response_handler(result, user_id, request_type, endpoint)
+ )
+
+ except (aiohttp.ClientError, asyncio.TimeoutError) as e:
+ if retry < policy["max_retries"] - 1:
+ wait_time = policy["base_wait"] * (2**retry)
+ logger.error(f"网络错误,等待{wait_time}秒后重试... 错误: {str(e)}")
+ await asyncio.sleep(wait_time)
+ continue
else:
- result = await response.json()
- # 使用自定义处理器或默认处理
- return (
- response_handler(result)
- if response_handler
- else self._default_response_handler(result, user_id, request_type, endpoint)
- )
+ logger.critical(f"网络错误达到最大重试次数: {str(e)}")
+ raise RuntimeError(f"网络请求失败: {str(e)}") from e
+ except Exception as e:
+ logger.critical(f"未预期的错误: {str(e)}")
+ raise RuntimeError(f"请求过程中发生错误: {str(e)}") from e
except aiohttp.ClientResponseError as e:
# 处理aiohttp抛出的响应错误