增加<think>标签处理
This commit is contained in:
@@ -2,6 +2,7 @@ import aiohttp
|
||||
import asyncio
|
||||
import requests
|
||||
import time
|
||||
import re
|
||||
from typing import Tuple, Union
|
||||
from nonebot import get_driver
|
||||
from loguru import logger
|
||||
@@ -10,6 +11,7 @@ from ..chat.config import global_config
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
|
||||
class LLM_request:
|
||||
def __init__(self, model, **kwargs):
|
||||
# 将大写的配置键转换为小写并从config中获取实际值
|
||||
@@ -28,21 +30,21 @@ class LLM_request:
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
|
||||
# 构建请求体
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
**self.params
|
||||
}
|
||||
|
||||
|
||||
# 发送请求到完整的chat/completions端点
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
|
||||
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15
|
||||
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
@@ -52,16 +54,16 @@ class LLM_request:
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
@@ -70,7 +72,7 @@ class LLM_request:
|
||||
else:
|
||||
logger.critical(f"请求失败: {str(e)}", exc_info=True)
|
||||
raise RuntimeError(f"API请求失败: {str(e)}")
|
||||
|
||||
|
||||
logger.error("达到最大重试次数,请求仍然失败")
|
||||
raise RuntimeError("达到最大重试次数,API请求仍然失败")
|
||||
|
||||
@@ -80,7 +82,7 @@ class LLM_request:
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
|
||||
# 构建请求体
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
@@ -103,14 +105,14 @@ class LLM_request:
|
||||
],
|
||||
**self.params
|
||||
}
|
||||
|
||||
|
||||
# 发送请求到完整的chat/completions端点
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
|
||||
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15
|
||||
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
@@ -120,16 +122,23 @@ class LLM_request:
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
message = result["choices"][0]["message"]
|
||||
content = message.get("content", "")
|
||||
think_match = None
|
||||
reasoning_content = message.get("reasoning_content", "")
|
||||
if not reasoning_content:
|
||||
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL)
|
||||
if think_match:
|
||||
reasoning_content = think_match.group(1).strip()
|
||||
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip()
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
@@ -138,7 +147,7 @@ class LLM_request:
|
||||
else:
|
||||
logger.critical(f"请求失败: {str(e)}", exc_info=True)
|
||||
raise RuntimeError(f"API请求失败: {str(e)}")
|
||||
|
||||
|
||||
logger.error("达到最大重试次数,请求仍然失败")
|
||||
raise RuntimeError("达到最大重试次数,API请求仍然失败")
|
||||
|
||||
@@ -148,7 +157,7 @@ class LLM_request:
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
|
||||
# 构建请求体
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
@@ -171,33 +180,40 @@ class LLM_request:
|
||||
],
|
||||
**self.params
|
||||
}
|
||||
|
||||
|
||||
# 发送请求到完整的chat/completions端点
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
|
||||
|
||||
|
||||
max_retries = 2
|
||||
base_wait_time = 6
|
||||
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data, timeout=30)
|
||||
|
||||
|
||||
if response.status_code == 429:
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
|
||||
result = response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
message = result["choices"][0]["message"]
|
||||
content = message.get("content", "")
|
||||
think_match = None
|
||||
reasoning_content = message.get("reasoning_content", "")
|
||||
if not reasoning_content:
|
||||
think_match = re.search(r'<think>(.*?)</think>', content, re.DOTALL)
|
||||
if think_match:
|
||||
reasoning_content = think_match.group(1).strip()
|
||||
content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip()
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
@@ -206,7 +222,7 @@ class LLM_request:
|
||||
else:
|
||||
logger.critical(f"请求失败: {str(e)}", exc_info=True)
|
||||
raise RuntimeError(f"API请求失败: {str(e)}")
|
||||
|
||||
|
||||
logger.error("达到最大重试次数,请求仍然失败")
|
||||
raise RuntimeError("达到最大重试次数,API请求仍然失败")
|
||||
|
||||
@@ -224,36 +240,36 @@ class LLM_request:
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
|
||||
data = {
|
||||
"model": model,
|
||||
"input": text,
|
||||
"encoding_format": "float"
|
||||
}
|
||||
|
||||
|
||||
api_url = f"{self.base_url.rstrip('/')}/embeddings"
|
||||
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
|
||||
|
||||
|
||||
max_retries = 2
|
||||
base_wait_time = 6
|
||||
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data, timeout=30)
|
||||
|
||||
|
||||
if response.status_code == 429:
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
|
||||
result = response.json()
|
||||
if 'data' in result and len(result['data']) > 0:
|
||||
return result['data'][0]['embedding']
|
||||
return None
|
||||
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1:
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
@@ -262,7 +278,7 @@ class LLM_request:
|
||||
else:
|
||||
logger.critical(f"embedding请求失败: {str(e)}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
logger.error("达到最大重试次数,embedding请求仍然失败")
|
||||
return None
|
||||
|
||||
@@ -280,19 +296,19 @@ class LLM_request:
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
|
||||
data = {
|
||||
"model": model,
|
||||
"input": text,
|
||||
"encoding_format": "float"
|
||||
}
|
||||
|
||||
|
||||
api_url = f"{self.base_url.rstrip('/')}/embeddings"
|
||||
logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL
|
||||
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15
|
||||
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
async with aiohttp.ClientSession() as session:
|
||||
@@ -302,14 +318,14 @@ class LLM_request:
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
|
||||
result = await response.json()
|
||||
if 'data' in result and len(result['data']) > 0:
|
||||
return result['data'][0]['embedding']
|
||||
return None
|
||||
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1:
|
||||
wait_time = base_wait_time * (2 ** retry)
|
||||
@@ -318,6 +334,6 @@ class LLM_request:
|
||||
else:
|
||||
logger.critical(f"embedding请求失败: {str(e)}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
logger.error("达到最大重试次数,embedding请求仍然失败")
|
||||
return None
|
||||
return None
|
||||
|
||||
Reference in New Issue
Block a user