fix:尝试修复炸飞问题
This commit is contained in:
@@ -198,156 +198,195 @@ class LLM_request:
|
|||||||
headers["Accept"] = "text/event-stream"
|
headers["Accept"] = "text/event-stream"
|
||||||
|
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
async with session.post(api_url, headers=headers, json=payload) as response:
|
try:
|
||||||
# 处理需要重试的状态码
|
async with session.post(api_url, headers=headers, json=payload) as response:
|
||||||
if response.status in policy["retry_codes"]:
|
# 处理需要重试的状态码
|
||||||
wait_time = policy["base_wait"] * (2**retry)
|
if response.status in policy["retry_codes"]:
|
||||||
logger.warning(f"错误码: {response.status}, 等待 {wait_time}秒后重试")
|
wait_time = policy["base_wait"] * (2**retry)
|
||||||
if response.status == 413:
|
logger.warning(f"错误码: {response.status}, 等待 {wait_time}秒后重试")
|
||||||
logger.warning("请求体过大,尝试压缩...")
|
if response.status == 413:
|
||||||
image_base64 = compress_base64_image_by_scale(image_base64)
|
logger.warning("请求体过大,尝试压缩...")
|
||||||
payload = await self._build_payload(prompt, image_base64, image_format)
|
image_base64 = compress_base64_image_by_scale(image_base64)
|
||||||
elif response.status in [500, 503]:
|
payload = await self._build_payload(prompt, image_base64, image_format)
|
||||||
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
elif response.status in [500, 503]:
|
||||||
raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
|
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
||||||
else:
|
raise RuntimeError("服务器负载过高,模型恢复失败QAQ")
|
||||||
logger.warning(f"请求限制(429),等待{wait_time}秒后重试...")
|
|
||||||
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
continue
|
|
||||||
elif response.status in policy["abort_codes"]:
|
|
||||||
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
|
||||||
# 尝试获取并记录服务器返回的详细错误信息
|
|
||||||
try:
|
|
||||||
error_json = await response.json()
|
|
||||||
if error_json and isinstance(error_json, list) and len(error_json) > 0:
|
|
||||||
for error_item in error_json:
|
|
||||||
if "error" in error_item and isinstance(error_item["error"], dict):
|
|
||||||
error_obj = error_item["error"]
|
|
||||||
error_code = error_obj.get("code")
|
|
||||||
error_message = error_obj.get("message")
|
|
||||||
error_status = error_obj.get("status")
|
|
||||||
logger.error(
|
|
||||||
f"服务器错误详情: 代码={error_code}, 状态={error_status}, "
|
|
||||||
f"消息={error_message}"
|
|
||||||
)
|
|
||||||
elif isinstance(error_json, dict) and "error" in error_json:
|
|
||||||
# 处理单个错误对象的情况
|
|
||||||
error_obj = error_json.get("error", {})
|
|
||||||
error_code = error_obj.get("code")
|
|
||||||
error_message = error_obj.get("message")
|
|
||||||
error_status = error_obj.get("status")
|
|
||||||
logger.error(
|
|
||||||
f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
# 记录原始错误响应内容
|
logger.warning(f"请求限制(429),等待{wait_time}秒后重试...")
|
||||||
logger.error(f"服务器错误响应: {error_json}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.warning(f"无法解析服务器错误响应: {str(e)}")
|
|
||||||
|
|
||||||
if response.status == 403:
|
await asyncio.sleep(wait_time)
|
||||||
# 只针对硅基流动的V3和R1进行降级处理
|
continue
|
||||||
if (
|
elif response.status in policy["abort_codes"]:
|
||||||
self.model_name.startswith("Pro/deepseek-ai")
|
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
||||||
and self.base_url == "https://api.siliconflow.cn/v1/"
|
# 尝试获取并记录服务器返回的详细错误信息
|
||||||
):
|
|
||||||
old_model_name = self.model_name
|
|
||||||
self.model_name = self.model_name[4:] # 移除"Pro/"前缀
|
|
||||||
logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
|
|
||||||
|
|
||||||
# 对全局配置进行更新
|
|
||||||
if global_config.llm_normal.get("name") == old_model_name:
|
|
||||||
global_config.llm_normal["name"] = self.model_name
|
|
||||||
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
|
|
||||||
|
|
||||||
if global_config.llm_reasoning.get("name") == old_model_name:
|
|
||||||
global_config.llm_reasoning["name"] = self.model_name
|
|
||||||
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
|
|
||||||
|
|
||||||
# 更新payload中的模型名
|
|
||||||
if payload and "model" in payload:
|
|
||||||
payload["model"] = self.model_name
|
|
||||||
|
|
||||||
# 重新尝试请求
|
|
||||||
retry -= 1 # 不计入重试次数
|
|
||||||
continue
|
|
||||||
|
|
||||||
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
|
|
||||||
|
|
||||||
response.raise_for_status()
|
|
||||||
reasoning_content = ""
|
|
||||||
|
|
||||||
# 将流式输出转化为非流式输出
|
|
||||||
if stream_mode:
|
|
||||||
flag_delta_content_finished = False
|
|
||||||
accumulated_content = ""
|
|
||||||
usage = None # 初始化usage变量,避免未定义错误
|
|
||||||
|
|
||||||
async for line_bytes in response.content:
|
|
||||||
try:
|
try:
|
||||||
line = line_bytes.decode("utf-8").strip()
|
error_json = await response.json()
|
||||||
if not line:
|
if error_json and isinstance(error_json, list) and len(error_json) > 0:
|
||||||
|
for error_item in error_json:
|
||||||
|
if "error" in error_item and isinstance(error_item["error"], dict):
|
||||||
|
error_obj = error_item["error"]
|
||||||
|
error_code = error_obj.get("code")
|
||||||
|
error_message = error_obj.get("message")
|
||||||
|
error_status = error_obj.get("status")
|
||||||
|
logger.error(
|
||||||
|
f"服务器错误详情: 代码={error_code}, 状态={error_status}, "
|
||||||
|
f"消息={error_message}"
|
||||||
|
)
|
||||||
|
elif isinstance(error_json, dict) and "error" in error_json:
|
||||||
|
# 处理单个错误对象的情况
|
||||||
|
error_obj = error_json.get("error", {})
|
||||||
|
error_code = error_obj.get("code")
|
||||||
|
error_message = error_obj.get("message")
|
||||||
|
error_status = error_obj.get("status")
|
||||||
|
logger.error(
|
||||||
|
f"服务器错误详情: 代码={error_code}, 状态={error_status}, 消息={error_message}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# 记录原始错误响应内容
|
||||||
|
logger.error(f"服务器错误响应: {error_json}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"无法解析服务器错误响应: {str(e)}")
|
||||||
|
|
||||||
|
if response.status == 403:
|
||||||
|
# 只针对硅基流动的V3和R1进行降级处理
|
||||||
|
if (
|
||||||
|
self.model_name.startswith("Pro/deepseek-ai")
|
||||||
|
and self.base_url == "https://api.siliconflow.cn/v1/"
|
||||||
|
):
|
||||||
|
old_model_name = self.model_name
|
||||||
|
self.model_name = self.model_name[4:] # 移除"Pro/"前缀
|
||||||
|
logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
|
||||||
|
|
||||||
|
# 对全局配置进行更新
|
||||||
|
if global_config.llm_normal.get("name") == old_model_name:
|
||||||
|
global_config.llm_normal["name"] = self.model_name
|
||||||
|
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
|
||||||
|
|
||||||
|
if global_config.llm_reasoning.get("name") == old_model_name:
|
||||||
|
global_config.llm_reasoning["name"] = self.model_name
|
||||||
|
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
|
||||||
|
|
||||||
|
# 更新payload中的模型名
|
||||||
|
if payload and "model" in payload:
|
||||||
|
payload["model"] = self.model_name
|
||||||
|
|
||||||
|
# 重新尝试请求
|
||||||
|
retry -= 1 # 不计入重试次数
|
||||||
continue
|
continue
|
||||||
if line.startswith("data:"):
|
|
||||||
data_str = line[5:].strip()
|
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
|
||||||
if data_str == "[DONE]":
|
|
||||||
break
|
response.raise_for_status()
|
||||||
try:
|
reasoning_content = ""
|
||||||
chunk = json.loads(data_str)
|
|
||||||
if flag_delta_content_finished:
|
# 将流式输出转化为非流式输出
|
||||||
chunk_usage = chunk.get("usage", None)
|
if stream_mode:
|
||||||
if chunk_usage:
|
flag_delta_content_finished = False
|
||||||
usage = chunk_usage # 获取token用量
|
accumulated_content = ""
|
||||||
else:
|
usage = None # 初始化usage变量,避免未定义错误
|
||||||
delta = chunk["choices"][0]["delta"]
|
|
||||||
delta_content = delta.get("content")
|
async for line_bytes in response.content:
|
||||||
if delta_content is None:
|
try:
|
||||||
delta_content = ""
|
line = line_bytes.decode("utf-8").strip()
|
||||||
accumulated_content += delta_content
|
if not line:
|
||||||
# 检测流式输出文本是否结束
|
continue
|
||||||
finish_reason = chunk["choices"][0].get("finish_reason")
|
if line.startswith("data:"):
|
||||||
if delta.get("reasoning_content", None):
|
data_str = line[5:].strip()
|
||||||
reasoning_content += delta["reasoning_content"]
|
if data_str == "[DONE]":
|
||||||
if finish_reason == "stop":
|
break
|
||||||
|
try:
|
||||||
|
chunk = json.loads(data_str)
|
||||||
|
if flag_delta_content_finished:
|
||||||
chunk_usage = chunk.get("usage", None)
|
chunk_usage = chunk.get("usage", None)
|
||||||
if chunk_usage:
|
if chunk_usage:
|
||||||
usage = chunk_usage
|
usage = chunk_usage # 获取token用量
|
||||||
break
|
else:
|
||||||
# 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
|
delta = chunk["choices"][0]["delta"]
|
||||||
flag_delta_content_finished = True
|
delta_content = delta.get("content")
|
||||||
|
if delta_content is None:
|
||||||
|
delta_content = ""
|
||||||
|
accumulated_content += delta_content
|
||||||
|
# 检测流式输出文本是否结束
|
||||||
|
finish_reason = chunk["choices"][0].get("finish_reason")
|
||||||
|
if delta.get("reasoning_content", None):
|
||||||
|
reasoning_content += delta["reasoning_content"]
|
||||||
|
if finish_reason == "stop":
|
||||||
|
chunk_usage = chunk.get("usage", None)
|
||||||
|
if chunk_usage:
|
||||||
|
usage = chunk_usage
|
||||||
|
break
|
||||||
|
# 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
|
||||||
|
flag_delta_content_finished = True
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(f"解析流式输出错误: {str(e)}")
|
logger.exception(f"解析流式输出错误: {str(e)}")
|
||||||
except GeneratorExit:
|
except GeneratorExit:
|
||||||
logger.warning("流式输出被中断")
|
logger.warning("流式输出被中断,正在清理资源...")
|
||||||
break
|
# 确保资源被正确清理
|
||||||
except Exception as e:
|
await response.release()
|
||||||
logger.error(f"处理流式输出时发生错误: {str(e)}")
|
# 返回已经累积的内容
|
||||||
break
|
result = {
|
||||||
content = accumulated_content
|
"choices": [{"message": {"content": accumulated_content, "reasoning_content": reasoning_content}}],
|
||||||
think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
|
"usage": usage,
|
||||||
if think_match:
|
}
|
||||||
reasoning_content = think_match.group(1).strip()
|
return (
|
||||||
content = re.sub(r"<think>.*?</think>", "", content, flags=re.DOTALL).strip()
|
response_handler(result)
|
||||||
# 构造一个伪result以便调用自定义响应处理器或默认处理器
|
if response_handler
|
||||||
result = {
|
else self._default_response_handler(result, user_id, request_type, endpoint)
|
||||||
"choices": [{"message": {"content": content, "reasoning_content": reasoning_content}}],
|
)
|
||||||
"usage": usage,
|
except Exception as e:
|
||||||
}
|
logger.error(f"处理流式输出时发生错误: {str(e)}")
|
||||||
return (
|
# 确保在发生错误时也能正确清理资源
|
||||||
response_handler(result)
|
try:
|
||||||
if response_handler
|
await response.release()
|
||||||
else self._default_response_handler(result, user_id, request_type, endpoint)
|
except Exception as cleanup_error:
|
||||||
)
|
logger.error(f"清理资源时发生错误: {cleanup_error}")
|
||||||
|
# 返回已经累积的内容
|
||||||
|
result = {
|
||||||
|
"choices": [{"message": {"content": accumulated_content, "reasoning_content": reasoning_content}}],
|
||||||
|
"usage": usage,
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
response_handler(result)
|
||||||
|
if response_handler
|
||||||
|
else self._default_response_handler(result, user_id, request_type, endpoint)
|
||||||
|
)
|
||||||
|
content = accumulated_content
|
||||||
|
think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
|
||||||
|
if think_match:
|
||||||
|
reasoning_content = think_match.group(1).strip()
|
||||||
|
content = re.sub(r"<think>.*?</think>", "", content, flags=re.DOTALL).strip()
|
||||||
|
# 构造一个伪result以便调用自定义响应处理器或默认处理器
|
||||||
|
result = {
|
||||||
|
"choices": [{"message": {"content": content, "reasoning_content": reasoning_content}}],
|
||||||
|
"usage": usage,
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
response_handler(result)
|
||||||
|
if response_handler
|
||||||
|
else self._default_response_handler(result, user_id, request_type, endpoint)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result = await response.json()
|
||||||
|
# 使用自定义处理器或默认处理
|
||||||
|
return (
|
||||||
|
response_handler(result)
|
||||||
|
if response_handler
|
||||||
|
else self._default_response_handler(result, user_id, request_type, endpoint)
|
||||||
|
)
|
||||||
|
|
||||||
|
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
||||||
|
if retry < policy["max_retries"] - 1:
|
||||||
|
wait_time = policy["base_wait"] * (2**retry)
|
||||||
|
logger.error(f"网络错误,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||||
|
await asyncio.sleep(wait_time)
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
result = await response.json()
|
logger.critical(f"网络错误达到最大重试次数: {str(e)}")
|
||||||
# 使用自定义处理器或默认处理
|
raise RuntimeError(f"网络请求失败: {str(e)}") from e
|
||||||
return (
|
except Exception as e:
|
||||||
response_handler(result)
|
logger.critical(f"未预期的错误: {str(e)}")
|
||||||
if response_handler
|
raise RuntimeError(f"请求过程中发生错误: {str(e)}") from e
|
||||||
else self._default_response_handler(result, user_id, request_type, endpoint)
|
|
||||||
)
|
|
||||||
|
|
||||||
except aiohttp.ClientResponseError as e:
|
except aiohttp.ClientResponseError as e:
|
||||||
# 处理aiohttp抛出的响应错误
|
# 处理aiohttp抛出的响应错误
|
||||||
|
|||||||
Reference in New Issue
Block a user