fix: 更新模型配置和环境变量,调整版本号和模型参数
This commit is contained in:
33
bot.py
33
bot.py
@@ -74,36 +74,6 @@ def easter_egg():
|
|||||||
print(rainbow_text)
|
print(rainbow_text)
|
||||||
|
|
||||||
|
|
||||||
def scan_provider(env_config: dict):
|
|
||||||
provider = {}
|
|
||||||
|
|
||||||
# 利用未初始化 env 时获取的 env_mask 来对新的环境变量集去重
|
|
||||||
# 避免 GPG_KEY 这样的变量干扰检查
|
|
||||||
env_config = dict(filter(lambda item: item[0] not in env_mask, env_config.items()))
|
|
||||||
|
|
||||||
# 遍历 env_config 的所有键
|
|
||||||
for key in env_config:
|
|
||||||
# 检查键是否符合 {provider}_BASE_URL 或 {provider}_KEY 的格式
|
|
||||||
if key.endswith("_BASE_URL") or key.endswith("_KEY"):
|
|
||||||
# 提取 provider 名称
|
|
||||||
provider_name = key.split("_", 1)[0] # 从左分割一次,取第一部分
|
|
||||||
|
|
||||||
# 初始化 provider 的字典(如果尚未初始化)
|
|
||||||
if provider_name not in provider:
|
|
||||||
provider[provider_name] = {"url": None, "key": None}
|
|
||||||
|
|
||||||
# 根据键的类型填充 url 或 key
|
|
||||||
if key.endswith("_BASE_URL"):
|
|
||||||
provider[provider_name]["url"] = env_config[key]
|
|
||||||
elif key.endswith("_KEY"):
|
|
||||||
provider[provider_name]["key"] = env_config[key]
|
|
||||||
|
|
||||||
# 检查每个 provider 是否同时存在 url 和 key
|
|
||||||
for provider_name, config in provider.items():
|
|
||||||
if config["url"] is None or config["key"] is None:
|
|
||||||
logger.error(f"provider 内容:{config}\nenv_config 内容:{env_config}")
|
|
||||||
raise ValueError(f"请检查 '{provider_name}' 提供商配置是否丢失 BASE_URL 或 KEY 环境变量")
|
|
||||||
|
|
||||||
|
|
||||||
async def graceful_shutdown():
|
async def graceful_shutdown():
|
||||||
try:
|
try:
|
||||||
@@ -229,9 +199,6 @@ def raw_main():
|
|||||||
|
|
||||||
easter_egg()
|
easter_egg()
|
||||||
|
|
||||||
env_config = {key: os.getenv(key) for key in os.environ}
|
|
||||||
scan_provider(env_config)
|
|
||||||
|
|
||||||
# 返回MainSystem实例
|
# 返回MainSystem实例
|
||||||
return MainSystem()
|
return MainSystem()
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from typing import List, Dict
|
|||||||
|
|
||||||
from packaging.version import Version
|
from packaging.version import Version
|
||||||
|
|
||||||
NEWEST_VER = "0.1.0" # 当前支持的最新版本
|
NEWEST_VER = "0.1.1" # 当前支持的最新版本
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class APIProvider:
|
class APIProvider:
|
||||||
|
|||||||
@@ -1,162 +0,0 @@
|
|||||||
import shutil
|
|
||||||
import tomlkit
|
|
||||||
from tomlkit.items import Table, KeyType
|
|
||||||
from pathlib import Path
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
|
|
||||||
def get_key_comment(toml_table, key):
|
|
||||||
# 获取key的注释(如果有)
|
|
||||||
if hasattr(toml_table, "trivia") and hasattr(toml_table.trivia, "comment"):
|
|
||||||
return toml_table.trivia.comment
|
|
||||||
if hasattr(toml_table, "value") and isinstance(toml_table.value, dict):
|
|
||||||
item = toml_table.value.get(key)
|
|
||||||
if item is not None and hasattr(item, "trivia"):
|
|
||||||
return item.trivia.comment
|
|
||||||
if hasattr(toml_table, "keys"):
|
|
||||||
for k in toml_table.keys():
|
|
||||||
if isinstance(k, KeyType) and k.key == key:
|
|
||||||
return k.trivia.comment
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def compare_dicts(new, old, path=None, new_comments=None, old_comments=None, logs=None):
|
|
||||||
# 递归比较两个dict,找出新增和删减项,收集注释
|
|
||||||
if path is None:
|
|
||||||
path = []
|
|
||||||
if logs is None:
|
|
||||||
logs = []
|
|
||||||
if new_comments is None:
|
|
||||||
new_comments = {}
|
|
||||||
if old_comments is None:
|
|
||||||
old_comments = {}
|
|
||||||
# 新增项
|
|
||||||
for key in new:
|
|
||||||
if key == "version":
|
|
||||||
continue
|
|
||||||
if key not in old:
|
|
||||||
comment = get_key_comment(new, key)
|
|
||||||
logs.append(f"新增: {'.'.join(path + [str(key)])} 注释: {comment if comment else '无'}")
|
|
||||||
elif isinstance(new[key], (dict, Table)) and isinstance(old.get(key), (dict, Table)):
|
|
||||||
compare_dicts(new[key], old[key], path + [str(key)], new_comments, old_comments, logs)
|
|
||||||
# 删减项
|
|
||||||
for key in old:
|
|
||||||
if key == "version":
|
|
||||||
continue
|
|
||||||
if key not in new:
|
|
||||||
comment = get_key_comment(old, key)
|
|
||||||
logs.append(f"删减: {'.'.join(path + [str(key)])} 注释: {comment if comment else '无'}")
|
|
||||||
return logs
|
|
||||||
|
|
||||||
|
|
||||||
def update_config():
|
|
||||||
print("开始更新配置文件...")
|
|
||||||
# 获取根目录路径
|
|
||||||
root_dir = Path(__file__).parent.parent.parent.parent
|
|
||||||
template_dir = root_dir / "template"
|
|
||||||
config_dir = root_dir / "config"
|
|
||||||
old_config_dir = config_dir / "old"
|
|
||||||
|
|
||||||
# 创建old目录(如果不存在)
|
|
||||||
old_config_dir.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
# 定义文件路径
|
|
||||||
template_path = template_dir / "bot_config_template.toml"
|
|
||||||
old_config_path = config_dir / "bot_config.toml"
|
|
||||||
new_config_path = config_dir / "bot_config.toml"
|
|
||||||
|
|
||||||
# 读取旧配置文件
|
|
||||||
old_config = {}
|
|
||||||
if old_config_path.exists():
|
|
||||||
print(f"发现旧配置文件: {old_config_path}")
|
|
||||||
with open(old_config_path, "r", encoding="utf-8") as f:
|
|
||||||
old_config = tomlkit.load(f)
|
|
||||||
|
|
||||||
# 生成带时间戳的新文件名
|
|
||||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
||||||
old_backup_path = old_config_dir / f"bot_config_{timestamp}.toml"
|
|
||||||
|
|
||||||
# 移动旧配置文件到old目录
|
|
||||||
shutil.move(old_config_path, old_backup_path)
|
|
||||||
print(f"已备份旧配置文件到: {old_backup_path}")
|
|
||||||
|
|
||||||
# 复制模板文件到配置目录
|
|
||||||
print(f"从模板文件创建新配置: {template_path}")
|
|
||||||
shutil.copy2(template_path, new_config_path)
|
|
||||||
|
|
||||||
# 读取新配置文件
|
|
||||||
with open(new_config_path, "r", encoding="utf-8") as f:
|
|
||||||
new_config = tomlkit.load(f)
|
|
||||||
|
|
||||||
# 检查version是否相同
|
|
||||||
if old_config and "inner" in old_config and "inner" in new_config:
|
|
||||||
old_version = old_config["inner"].get("version") # type: ignore
|
|
||||||
new_version = new_config["inner"].get("version") # type: ignore
|
|
||||||
if old_version and new_version and old_version == new_version:
|
|
||||||
print(f"检测到版本号相同 (v{old_version}),跳过更新")
|
|
||||||
# 如果version相同,恢复旧配置文件并返回
|
|
||||||
shutil.move(old_backup_path, old_config_path) # type: ignore
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
print(f"检测到版本号不同: 旧版本 v{old_version} -> 新版本 v{new_version}")
|
|
||||||
|
|
||||||
# 输出新增和删减项及注释
|
|
||||||
if old_config:
|
|
||||||
print("配置项变动如下:")
|
|
||||||
logs = compare_dicts(new_config, old_config)
|
|
||||||
if logs:
|
|
||||||
for log in logs:
|
|
||||||
print(log)
|
|
||||||
else:
|
|
||||||
print("无新增或删减项")
|
|
||||||
|
|
||||||
# 递归更新配置
|
|
||||||
def update_dict(target, source):
|
|
||||||
for key, value in source.items():
|
|
||||||
# 跳过version字段的更新
|
|
||||||
if key == "version":
|
|
||||||
continue
|
|
||||||
if key in target:
|
|
||||||
if isinstance(value, dict) and isinstance(target[key], (dict, Table)):
|
|
||||||
update_dict(target[key], value)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
# 对数组类型进行特殊处理
|
|
||||||
if isinstance(value, list):
|
|
||||||
# 如果是空数组,确保它保持为空数组
|
|
||||||
if not value:
|
|
||||||
target[key] = tomlkit.array()
|
|
||||||
else:
|
|
||||||
# 特殊处理正则表达式数组和包含正则表达式的结构
|
|
||||||
if key == "ban_msgs_regex":
|
|
||||||
# 直接使用原始值,不进行额外处理
|
|
||||||
target[key] = value
|
|
||||||
elif key == "regex_rules":
|
|
||||||
# 对于regex_rules,需要特殊处理其中的regex字段
|
|
||||||
target[key] = value
|
|
||||||
else:
|
|
||||||
# 检查是否包含正则表达式相关的字典项
|
|
||||||
contains_regex = False
|
|
||||||
if value and isinstance(value[0], dict) and "regex" in value[0]:
|
|
||||||
contains_regex = True
|
|
||||||
|
|
||||||
target[key] = value if contains_regex else tomlkit.array(str(value))
|
|
||||||
else:
|
|
||||||
# 其他类型使用item方法创建新值
|
|
||||||
target[key] = tomlkit.item(value)
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
# 如果转换失败,直接赋值
|
|
||||||
target[key] = value
|
|
||||||
|
|
||||||
# 将旧配置的值更新到新配置中
|
|
||||||
print("开始合并新旧配置...")
|
|
||||||
update_dict(new_config, old_config)
|
|
||||||
|
|
||||||
# 保存更新后的配置(保留注释和格式)
|
|
||||||
with open(new_config_path, "w", encoding="utf-8") as f:
|
|
||||||
f.write(tomlkit.dumps(new_config))
|
|
||||||
print("配置文件更新完成")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
update_config()
|
|
||||||
@@ -229,120 +229,84 @@ show_prompt = false # 是否显示prompt
|
|||||||
|
|
||||||
|
|
||||||
[model]
|
[model]
|
||||||
model_max_output_length = 1024 # 模型单次返回的最大token数
|
model_max_output_length = 800 # 模型单次返回的最大token数
|
||||||
|
|
||||||
#------------必填:组件模型------------
|
#------------模型任务配置------------
|
||||||
|
# 所有模型名称需要对应 model_config.toml 中配置的模型名称
|
||||||
|
|
||||||
[model.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,是麦麦必须的模型
|
[model.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,是麦麦必须的模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-v3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.2 # 模型温度,新V3建议0.1-0.3
|
||||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
max_tokens = 800 # 最大输出token数
|
||||||
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
|
||||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
|
||||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
|
||||||
|
|
||||||
[model.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
|
[model.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
|
||||||
# 强烈建议使用免费的小模型
|
model_name = "qwen3-8b" # 对应 model_config.toml 中的模型名称
|
||||||
name = "Qwen/Qwen3-8B"
|
temperature = 0.7
|
||||||
provider = "SILICONFLOW"
|
max_tokens = 800
|
||||||
pri_in = 0
|
|
||||||
pri_out = 0
|
|
||||||
temp = 0.7
|
|
||||||
enable_thinking = false # 是否启用思考
|
enable_thinking = false # 是否启用思考
|
||||||
|
|
||||||
[model.replyer_1] # 首要回复模型,还用于表达器和表达方式学习
|
[model.replyer_1] # 首要回复模型,还用于表达器和表达方式学习
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-v3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.2 # 模型温度,新V3建议0.1-0.3
|
||||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
max_tokens = 800
|
||||||
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
|
||||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
|
||||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
|
||||||
|
|
||||||
[model.replyer_2] # 次要回复模型
|
[model.replyer_2] # 次要回复模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-r1" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.7 # 模型温度
|
||||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
max_tokens = 800
|
||||||
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
|
||||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
|
||||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
|
||||||
|
|
||||||
[model.planner] #决策:负责决定麦麦该做什么的模型
|
[model.planner] #决策:负责决定麦麦该做什么的模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-v3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.3
|
||||||
pri_in = 2
|
max_tokens = 800
|
||||||
pri_out = 8
|
|
||||||
temp = 0.3
|
|
||||||
|
|
||||||
[model.emotion] #负责麦麦的情绪变化
|
[model.emotion] #负责麦麦的情绪变化
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-v3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.3
|
||||||
pri_in = 2
|
max_tokens = 800
|
||||||
pri_out = 8
|
|
||||||
temp = 0.3
|
|
||||||
|
|
||||||
|
|
||||||
[model.memory] # 记忆模型
|
[model.memory] # 记忆模型
|
||||||
name = "Qwen/Qwen3-30B-A3B"
|
model_name = "qwen3-30b" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.7
|
||||||
pri_in = 0.7
|
max_tokens = 800
|
||||||
pri_out = 2.8
|
|
||||||
temp = 0.7
|
|
||||||
enable_thinking = false # 是否启用思考
|
enable_thinking = false # 是否启用思考
|
||||||
|
|
||||||
[model.vlm] # 图像识别模型
|
[model.vlm] # 图像识别模型
|
||||||
name = "Pro/Qwen/Qwen2.5-VL-7B-Instruct"
|
model_name = "qwen2.5-vl-72b" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
max_tokens = 800
|
||||||
pri_in = 0.35
|
|
||||||
pri_out = 0.35
|
|
||||||
|
|
||||||
[model.voice] # 语音识别模型
|
[model.voice] # 语音识别模型
|
||||||
name = "FunAudioLLM/SenseVoiceSmall"
|
model_name = "sensevoice-small" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
|
||||||
pri_in = 0
|
|
||||||
pri_out = 0
|
|
||||||
|
|
||||||
[model.tool_use] #工具调用模型,需要使用支持工具调用的模型
|
[model.tool_use] #工具调用模型,需要使用支持工具调用的模型
|
||||||
name = "Qwen/Qwen3-14B"
|
model_name = "qwen3-14b" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.7
|
||||||
pri_in = 0.5
|
max_tokens = 800
|
||||||
pri_out = 2
|
|
||||||
temp = 0.7
|
|
||||||
enable_thinking = false # 是否启用思考(qwen3 only)
|
enable_thinking = false # 是否启用思考(qwen3 only)
|
||||||
|
|
||||||
#嵌入模型
|
#嵌入模型
|
||||||
[model.embedding]
|
[model.embedding]
|
||||||
name = "BAAI/bge-m3"
|
model_name = "bge-m3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
|
||||||
pri_in = 0
|
|
||||||
pri_out = 0
|
|
||||||
|
|
||||||
|
|
||||||
#------------LPMM知识库模型------------
|
#------------LPMM知识库模型------------
|
||||||
|
|
||||||
[model.lpmm_entity_extract] # 实体提取模型
|
[model.lpmm_entity_extract] # 实体提取模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-v3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.2
|
||||||
pri_in = 2
|
max_tokens = 800
|
||||||
pri_out = 8
|
|
||||||
temp = 0.2
|
|
||||||
|
|
||||||
|
|
||||||
[model.lpmm_rdf_build] # RDF构建模型
|
[model.lpmm_rdf_build] # RDF构建模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
model_name = "siliconflow-deepseek-v3" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.2
|
||||||
pri_in = 2
|
max_tokens = 800
|
||||||
pri_out = 8
|
|
||||||
temp = 0.2
|
|
||||||
|
|
||||||
|
|
||||||
[model.lpmm_qa] # 问答模型
|
[model.lpmm_qa] # 问答模型
|
||||||
name = "Qwen/Qwen3-30B-A3B"
|
model_name = "deepseek-r1-distill-qwen-32b" # 对应 model_config.toml 中的模型名称
|
||||||
provider = "SILICONFLOW"
|
temperature = 0.7
|
||||||
pri_in = 0.7
|
max_tokens = 800
|
||||||
pri_out = 2.8
|
|
||||||
temp = 0.7
|
|
||||||
enable_thinking = false # 是否启用思考
|
enable_thinking = false # 是否启用思考
|
||||||
|
|
||||||
|
|
||||||
[maim_message]
|
[maim_message]
|
||||||
auth_token = [] # 认证令牌,用于API验证,为空则不启用验证
|
auth_token = [] # 认证令牌,用于API验证,为空则不启用验证
|
||||||
# 以下项目若要使用需要打开use_custom,并单独配置maim_message的服务器
|
# 以下项目若要使用需要打开use_custom,并单独配置maim_message的服务器
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[inner]
|
[inner]
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
|
|
||||||
# 配置文件版本号迭代规则同bot_config.toml
|
# 配置文件版本号迭代规则同bot_config.toml
|
||||||
|
|
||||||
@@ -50,21 +50,79 @@ price_out = 8.0
|
|||||||
#(可选,若无该字段,默认值为false)
|
#(可选,若无该字段,默认值为false)
|
||||||
#force_stream_mode = true
|
#force_stream_mode = true
|
||||||
|
|
||||||
#[[models]]
|
[[models]]
|
||||||
#model_identifier = "deepseek-reasoner"
|
model_identifier = "deepseek-reasoner"
|
||||||
#name = "deepseek-r1"
|
name = "deepseek-r1"
|
||||||
#api_provider = "DeepSeek"
|
api_provider = "DeepSeek"
|
||||||
#model_flags = ["text", "tool_calling", "reasoning"]
|
model_flags = [ "text", "tool_calling", "reasoning",]
|
||||||
#price_in = 4.0
|
price_in = 4.0
|
||||||
#price_out = 16.0
|
price_out = 16.0
|
||||||
#
|
|
||||||
#[[models]]
|
[[models]]
|
||||||
#model_identifier = "BAAI/bge-m3"
|
model_identifier = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
#name = "siliconflow-bge-m3"
|
name = "siliconflow-deepseek-v3"
|
||||||
#api_provider = "SiliconFlow"
|
api_provider = "SiliconFlow"
|
||||||
#model_flags = ["text", "embedding"]
|
price_in = 2.0
|
||||||
#price_in = 0
|
price_out = 8.0
|
||||||
#price_out = 0
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Pro/deepseek-ai/DeepSeek-R1"
|
||||||
|
name = "siliconflow-deepseek-r1"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 4.0
|
||||||
|
price_out = 16.0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
||||||
|
name = "deepseek-r1-distill-qwen-32b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 4.0
|
||||||
|
price_out = 16.0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen3-8B"
|
||||||
|
name = "qwen3-8b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 0
|
||||||
|
price_out = 0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen3-14B"
|
||||||
|
name = "qwen3-14b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 0.5
|
||||||
|
price_out = 2.0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen3-30B-A3B"
|
||||||
|
name = "qwen3-30b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 0.7
|
||||||
|
price_out = 2.8
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen2.5-VL-72B-Instruct"
|
||||||
|
name = "qwen2.5-vl-72b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
model_flags = [ "vision", "text",]
|
||||||
|
price_in = 4.13
|
||||||
|
price_out = 4.13
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "FunAudioLLM/SenseVoiceSmall"
|
||||||
|
name = "sensevoice-small"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
model_flags = [ "audio",]
|
||||||
|
price_in = 0
|
||||||
|
price_out = 0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "BAAI/bge-m3"
|
||||||
|
name = "bge-m3"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
model_flags = [ "text", "embedding",]
|
||||||
|
price_in = 0
|
||||||
|
price_out = 0
|
||||||
|
|
||||||
|
|
||||||
[task_model_usage]
|
[task_model_usage]
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[inner]
|
[inner]
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
|
|
||||||
# 配置文件版本号迭代规则同bot_config.toml
|
# 配置文件版本号迭代规则同bot_config.toml
|
||||||
|
|
||||||
@@ -50,27 +50,85 @@ price_out = 8.0
|
|||||||
#(可选,若无该字段,默认值为false)
|
#(可选,若无该字段,默认值为false)
|
||||||
#force_stream_mode = true
|
#force_stream_mode = true
|
||||||
|
|
||||||
#[[models]]
|
[[models]]
|
||||||
#model_identifier = "deepseek-reasoner"
|
model_identifier = "deepseek-reasoner"
|
||||||
#name = "deepseek-r1"
|
name = "deepseek-r1"
|
||||||
#api_provider = "DeepSeek"
|
api_provider = "DeepSeek"
|
||||||
#model_flags = ["text", "tool_calling", "reasoning"]
|
model_flags = [ "text", "tool_calling", "reasoning",]
|
||||||
#price_in = 4.0
|
price_in = 4.0
|
||||||
#price_out = 16.0
|
price_out = 16.0
|
||||||
#
|
|
||||||
#[[models]]
|
[[models]]
|
||||||
#model_identifier = "BAAI/bge-m3"
|
model_identifier = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
#name = "siliconflow-bge-m3"
|
name = "siliconflow-deepseek-v3"
|
||||||
#api_provider = "SiliconFlow"
|
api_provider = "SiliconFlow"
|
||||||
#model_flags = ["text", "embedding"]
|
price_in = 2.0
|
||||||
#price_in = 0
|
price_out = 8.0
|
||||||
#price_out = 0
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Pro/deepseek-ai/DeepSeek-R1"
|
||||||
|
name = "siliconflow-deepseek-r1"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 4.0
|
||||||
|
price_out = 16.0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
||||||
|
name = "deepseek-r1-distill-qwen-32b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 4.0
|
||||||
|
price_out = 16.0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen3-8B"
|
||||||
|
name = "qwen3-8b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 0
|
||||||
|
price_out = 0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen3-14B"
|
||||||
|
name = "qwen3-14b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 0.5
|
||||||
|
price_out = 2.0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen3-30B-A3B"
|
||||||
|
name = "qwen3-30b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
price_in = 0.7
|
||||||
|
price_out = 2.8
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "Qwen/Qwen2.5-VL-72B-Instruct"
|
||||||
|
name = "qwen2.5-vl-72b"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
model_flags = [ "vision", "text",]
|
||||||
|
price_in = 4.13
|
||||||
|
price_out = 4.13
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "FunAudioLLM/SenseVoiceSmall"
|
||||||
|
name = "sensevoice-small"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
model_flags = [ "audio",]
|
||||||
|
price_in = 0
|
||||||
|
price_out = 0
|
||||||
|
|
||||||
|
[[models]]
|
||||||
|
model_identifier = "BAAI/bge-m3"
|
||||||
|
name = "bge-m3"
|
||||||
|
api_provider = "SiliconFlow"
|
||||||
|
model_flags = [ "text", "embedding",]
|
||||||
|
price_in = 0
|
||||||
|
price_out = 0
|
||||||
|
|
||||||
|
|
||||||
[task_model_usage]
|
[task_model_usage]
|
||||||
#llm_reasoning = {model="deepseek-r1", temperature=0.8, max_tokens=1024, max_retry=0}
|
llm_reasoning = {model="deepseek-r1", temperature=0.8, max_tokens=1024, max_retry=0}
|
||||||
#llm_normal = {model="deepseek-r1", max_tokens=1024, max_retry=0}
|
llm_normal = {model="deepseek-r1", max_tokens=1024, max_retry=0}
|
||||||
#embedding = "siliconflow-bge-m3"
|
embedding = "siliconflow-bge-m3"
|
||||||
#schedule = [
|
#schedule = [
|
||||||
# "deepseek-v3",
|
# "deepseek-v3",
|
||||||
# "deepseek-r1",
|
# "deepseek-r1",
|
||||||
|
|||||||
@@ -1,16 +1,2 @@
|
|||||||
HOST=127.0.0.1
|
HOST=127.0.0.1
|
||||||
PORT=8000
|
PORT=8000
|
||||||
|
|
||||||
#key and url
|
|
||||||
SILICONFLOW_BASE_URL=https://api.siliconflow.cn/v1/
|
|
||||||
DEEP_SEEK_BASE_URL=https://api.deepseek.com/v1
|
|
||||||
CHAT_ANY_WHERE_BASE_URL=https://api.chatanywhere.tech/v1
|
|
||||||
BAILIAN_BASE_URL = https://dashscope.aliyuncs.com/compatible-mode/v1
|
|
||||||
xxxxxxx_BASE_URL=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
||||||
|
|
||||||
# 定义你要用的api的key(需要去对应网站申请哦)
|
|
||||||
DEEP_SEEK_KEY=
|
|
||||||
CHAT_ANY_WHERE_KEY=
|
|
||||||
SILICONFLOW_KEY=
|
|
||||||
BAILIAN_KEY =
|
|
||||||
xxxxxxx_KEY=
|
|
||||||
Reference in New Issue
Block a user