- 将`api_key`类型从`str`扩展为`Union[str, List[str]]`,允许用户配置多个API密钥。 - 新增`get_api_key`方法,通过线程安全的方式实现API密钥的轮询使用,提高API请求的稳定性和可用性。 - 更新了`api_key`的验证逻辑,以同时支持字符串和字符串列表两种格式。 - 相应地更新了配置文件模板,以示例新的密钥列表配置方式。
224 lines
7.7 KiB
TOML
224 lines
7.7 KiB
TOML
[inner]
|
||
version = "1.3.1"
|
||
|
||
# 配置文件版本号迭代规则同bot_config.toml
|
||
|
||
[[api_providers]] # API服务提供商(可以配置多个)
|
||
name = "DeepSeek" # API服务商名称(可随意命名,在models的api-provider中需使用这个命名)
|
||
base_url = "https://api.deepseek.com/v1" # API服务商的BaseURL
|
||
api_key = ["your-api-key-here-1", "your-api-key-here-2"] # API密钥(支持单个密钥或密钥列表轮询)
|
||
client_type = "openai" # 请求客户端(可选,默认值为"openai",使用gimini等Google系模型时请配置为"gemini")
|
||
max_retry = 2 # 最大重试次数(单个模型API调用失败,最多重试的次数)
|
||
timeout = 30 # API请求超时时间(单位:秒)
|
||
retry_interval = 10 # 重试间隔时间(单位:秒)
|
||
|
||
[[api_providers]] # SiliconFlow的API服务商配置
|
||
name = "SiliconFlow"
|
||
base_url = "https://api.siliconflow.cn/v1"
|
||
api_key = "your-siliconflow-api-key-here"
|
||
client_type = "openai"
|
||
max_retry = 2
|
||
timeout = 30
|
||
retry_interval = 10
|
||
|
||
[[api_providers]] # 特殊:Google的Gimini使用特殊API,与OpenAI格式不兼容,需要配置client为"aiohttp_gemini"
|
||
name = "Google"
|
||
base_url = "https://api.google.com/v1"
|
||
api_key = ["your-google-api-key-1", "your-google-api-key-2"]
|
||
client_type = "aiohttp_gemini" # 官方的gemini客户端现在已经死了
|
||
max_retry = 2
|
||
timeout = 30
|
||
retry_interval = 10
|
||
|
||
# 内容混淆功能示例配置(可选)
|
||
[[api_providers]]
|
||
name = "ExampleProviderWithObfuscation" # 启用混淆功能的API提供商示例
|
||
base_url = "https://api.example.com/v1"
|
||
api_key = "your-api-key-here"
|
||
client_type = "openai"
|
||
max_retry = 2
|
||
timeout = 30
|
||
retry_interval = 10
|
||
enable_content_obfuscation = true # 启用内容混淆功能
|
||
obfuscation_intensity = 2 # 混淆强度(1-3级,1=低强度,2=中强度,3=高强度)
|
||
|
||
|
||
[[models]] # 模型(可以配置多个)
|
||
model_identifier = "deepseek-chat" # 模型标识符(API服务商提供的模型标识符)
|
||
name = "deepseek-v3" # 模型名称(可随意命名,在后面中需使用这个命名)
|
||
api_provider = "DeepSeek" # API服务商名称(对应在api_providers中配置的服务商名称)
|
||
price_in = 2.0 # 输入价格(用于API调用统计,单位:元/ M token)(可选,若无该字段,默认值为0)
|
||
price_out = 8.0 # 输出价格(用于API调用统计,单位:元/ M token)(可选,若无该字段,默认值为0)
|
||
#force_stream_mode = true # 强制流式输出模式(若模型不支持非流式输出,请取消该注释,启用强制流式输出,若无该字段,默认值为false)
|
||
#use_anti_truncation = true # [可选] 启用反截断功能。当模型输出不完整时,系统会自动重试。建议只为有需要的模型(如Gemini)开启。
|
||
|
||
[[models]]
|
||
model_identifier = "Pro/deepseek-ai/DeepSeek-V3"
|
||
name = "siliconflow-deepseek-v3"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 2.0
|
||
price_out = 8.0
|
||
|
||
[[models]]
|
||
model_identifier = "Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
||
name = "deepseek-r1-distill-qwen-32b"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 4.0
|
||
price_out = 16.0
|
||
|
||
[[models]]
|
||
model_identifier = "Qwen/Qwen3-8B"
|
||
name = "qwen3-8b"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 0
|
||
price_out = 0
|
||
[models.extra_params] # 可选的额外参数配置
|
||
enable_thinking = false # 不启用思考
|
||
|
||
[[models]]
|
||
model_identifier = "Qwen/Qwen3-14B"
|
||
name = "qwen3-14b"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 0.5
|
||
price_out = 2.0
|
||
[models.extra_params] # 可选的额外参数配置
|
||
enable_thinking = false # 不启用思考
|
||
|
||
[[models]]
|
||
model_identifier = "Qwen/Qwen3-30B-A3B"
|
||
name = "qwen3-30b"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 0.7
|
||
price_out = 2.8
|
||
[models.extra_params] # 可选的额外参数配置
|
||
enable_thinking = false # 不启用思考
|
||
|
||
[[models]]
|
||
model_identifier = "Qwen/Qwen2.5-VL-72B-Instruct"
|
||
name = "qwen2.5-vl-72b"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 4.13
|
||
price_out = 4.13
|
||
|
||
[[models]]
|
||
model_identifier = "FunAudioLLM/SenseVoiceSmall"
|
||
name = "sensevoice-small"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 0
|
||
price_out = 0
|
||
|
||
[[models]]
|
||
model_identifier = "BAAI/bge-m3"
|
||
name = "bge-m3"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 0
|
||
price_out = 0
|
||
|
||
[[models]]
|
||
model_identifier = "moonshotai/Kimi-K2-Instruct"
|
||
name = "moonshotai-Kimi-K2-Instruct"
|
||
api_provider = "SiliconFlow"
|
||
price_in = 4.0
|
||
price_out = 16.0
|
||
|
||
[model_task_config.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,是麦麦必须的模型
|
||
model_list = ["siliconflow-deepseek-v3"] # 使用的模型列表,每个子项对应上面的模型名称(name)
|
||
temperature = 0.2 # 模型温度,新V3建议0.1-0.3
|
||
max_tokens = 800 # 最大输出token数
|
||
#concurrency_count = 2 # 并发请求数量,默认为1(不并发),设置为2或更高启用并发
|
||
|
||
[model_task_config.utils_small] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
|
||
model_list = ["qwen3-8b"]
|
||
temperature = 0.7
|
||
max_tokens = 800
|
||
|
||
[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.2 # 模型温度,新V3建议0.1-0.3
|
||
max_tokens = 800
|
||
|
||
[model_task_config.planner] #决策:负责决定麦麦该做什么的模型
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.3
|
||
max_tokens = 800
|
||
|
||
[model_task_config.planner_small] #决策(小脑):负责决定具体action的模型,建议使用速度快的小模型
|
||
model_list = ["qwen3-30b"]
|
||
temperature = 0.5
|
||
max_tokens = 800
|
||
|
||
[model_task_config.emotion] #负责麦麦的情绪变化
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.3
|
||
max_tokens = 800
|
||
|
||
[model_task_config.mood] #负责麦麦的心情变化
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.3
|
||
max_tokens = 800
|
||
|
||
[model_task_config.maizone] # maizone模型
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.7
|
||
max_tokens = 800
|
||
|
||
[model_task_config.vlm] # 图像识别模型
|
||
model_list = ["qwen2.5-vl-72b"]
|
||
max_tokens = 800
|
||
|
||
[model_task_config.emoji_vlm] # 专用表情包识别模型
|
||
model_list = ["qwen2.5-vl-72b"]
|
||
max_tokens = 800
|
||
|
||
|
||
[model_task_config.utils_video] # 专用视频分析模型
|
||
model_list = ["qwen2.5-vl-72b"]
|
||
temperature = 0.3
|
||
max_tokens = 1500
|
||
|
||
[model_task_config.voice] # 语音识别模型
|
||
model_list = ["sensevoice-small"]
|
||
|
||
[model_task_config.tool_use] #工具调用模型,需要使用支持工具调用的模型
|
||
model_list = ["qwen3-14b"]
|
||
temperature = 0.7
|
||
max_tokens = 800
|
||
|
||
[model_task_config.schedule_generator]#日程表生成模型
|
||
model_list = ["deepseek-v3"]
|
||
temperature = 0.7
|
||
max_tokens = 1000
|
||
|
||
[model_task_config.anti_injection] # 反注入检测专用模型
|
||
model_list = ["moonshotai-Kimi-K2-Instruct"] # 使用快速的小模型进行检测
|
||
temperature = 0.1 # 低温度确保检测结果稳定
|
||
max_tokens = 200 # 检测结果不需要太长的输出
|
||
|
||
[model_task_config.monthly_plan_generator] # 月层计划生成模型
|
||
model_list = ["deepseek-v3"]
|
||
temperature = 0.7
|
||
max_tokens = 1000
|
||
|
||
#嵌入模型
|
||
[model_task_config.embedding]
|
||
model_list = ["bge-m3"]
|
||
|
||
|
||
|
||
#------------LPMM知识库模型------------
|
||
|
||
[model_task_config.lpmm_entity_extract] # 实体提取模型
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.2
|
||
max_tokens = 800
|
||
|
||
[model_task_config.lpmm_rdf_build] # RDF构建模型
|
||
model_list = ["siliconflow-deepseek-v3"]
|
||
temperature = 0.2
|
||
max_tokens = 800
|
||
|
||
[model_task_config.lpmm_qa] # 问答模型
|
||
model_list = ["deepseek-r1-distill-qwen-32b"]
|
||
temperature = 0.7
|
||
max_tokens = 800
|