fix: 模型优先使用无需付费模型,并模型名称全局化
This commit is contained in:
@@ -33,6 +33,11 @@ class BotConfig:
|
||||
EMOJI_REGISTER_INTERVAL: int = 10 # 表情包注册间隔(分钟)
|
||||
|
||||
API_USING: str = "siliconflow" # 使用的API
|
||||
DEEPSEEK_MODEL_R1: str = "deepseek-reasoner" # deepseek-R1模型
|
||||
DEEPSEEK_MODEL_V3: str = "deepseek-chat" # deepseek-V3模型
|
||||
SILICONFLOW_MODEL_R1: str = "deepseek-ai/DeepSeek-R1" # siliconflow-R1模型
|
||||
SILICONFLOW_MODEL_R1_DISTILL: str = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" # siliconflow-R1蒸馏模型
|
||||
SILICONFLOW_MODEL_V3: str = "deepseek-ai/DeepSeek-V3" # siliconflow-V3模型
|
||||
MODEL_R1_PROBABILITY: float = 0.8 # R1模型概率
|
||||
MODEL_V3_PROBABILITY: float = 0.1 # V3模型概率
|
||||
MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
|
||||
@@ -82,6 +87,9 @@ class BotConfig:
|
||||
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
|
||||
config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY)
|
||||
config.API_USING = response_config.get("api_using", config.API_USING)
|
||||
if response_config.get("api_using", config.API_PAID):
|
||||
config.SILICONFLOW_MODEL_R1 = "Pro/deepseek-ai/DeepSeek-R1"
|
||||
config.SILICONFLOW_MODEL_V3 = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
|
||||
# 消息配置
|
||||
if "message" in toml_dict:
|
||||
|
||||
@@ -206,13 +206,13 @@ class LLMResponseGenerator:
|
||||
if global_config.API_USING == "deepseek":
|
||||
return await self._generate_base_response(
|
||||
message,
|
||||
"deepseek-reasoner",
|
||||
global_config.DEEPSEEK_MODEL_R1,
|
||||
{"temperature": 0.7, "max_tokens": 1024}
|
||||
)
|
||||
else:
|
||||
return await self._generate_base_response(
|
||||
message,
|
||||
"Pro/deepseek-ai/DeepSeek-R1",
|
||||
global_config.SILICONFLOW_MODEL_R1,
|
||||
{"temperature": 0.7, "max_tokens": 1024}
|
||||
)
|
||||
|
||||
@@ -221,13 +221,13 @@ class LLMResponseGenerator:
|
||||
if global_config.API_USING == "deepseek":
|
||||
return await self._generate_base_response(
|
||||
message,
|
||||
"deepseek-chat",
|
||||
global_config.DEEPSEEK_MODEL_V3,
|
||||
{"temperature": 0.8, "max_tokens": 1024}
|
||||
)
|
||||
else:
|
||||
return await self._generate_base_response(
|
||||
message,
|
||||
"Pro/deepseek-ai/DeepSeek-V3",
|
||||
global_config.SILICONFLOW_MODEL_V3,
|
||||
{"temperature": 0.8, "max_tokens": 1024}
|
||||
)
|
||||
|
||||
@@ -235,7 +235,7 @@ class LLMResponseGenerator:
|
||||
"""使用 DeepSeek-R1-Distill-Qwen-32B 模型生成回复"""
|
||||
return await self._generate_base_response(
|
||||
message,
|
||||
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||
global_config.SILICONFLOW_MODEL_R1_DISTILL,
|
||||
{"temperature": 0.7, "max_tokens": 1024}
|
||||
)
|
||||
|
||||
@@ -270,9 +270,9 @@ class LLMResponseGenerator:
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
if global_config.API_USING == "deepseek":
|
||||
model = "deepseek-chat"
|
||||
model = global_config.DEEPSEEK_MODEL_V3
|
||||
else:
|
||||
model = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
model = global_config.SILICONFLOW_MODEL_V3
|
||||
create_completion = partial(
|
||||
self.client.chat.completions.create,
|
||||
model=model,
|
||||
|
||||
@@ -3,6 +3,7 @@ from openai import OpenAI
|
||||
from .message import Message
|
||||
import jieba
|
||||
from nonebot import get_driver
|
||||
from .config import global_config
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -24,7 +25,7 @@ class TopicIdentifier:
|
||||
消息内容:{text}"""
|
||||
|
||||
response = self.client.chat.completions.create(
|
||||
model="Pro/deepseek-ai/DeepSeek-V3",
|
||||
model=global_config.SILICONFLOW_MODEL_V3,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.8,
|
||||
max_tokens=10
|
||||
|
||||
Reference in New Issue
Block a user