v0.4.0 支持任意替换的模型,改进配置文件

好好好好好好好好好
This commit is contained in:
SengokuCola
2025-03-02 23:51:47 +08:00
parent 65d983f37f
commit 6462c3b360
16 changed files with 459 additions and 644 deletions

46
config/auto_format.py Normal file
View File

@@ -0,0 +1,46 @@
import tomli
import tomli_w
import sys
from pathlib import Path
import os
def sync_configs():
# 读取两个配置文件
try:
with open('bot_config_dev.toml', 'rb') as f: # tomli需要使用二进制模式读取
dev_config = tomli.load(f)
with open('bot_config.toml', 'rb') as f:
prod_config = tomli.load(f)
except FileNotFoundError as e:
print(f"错误:找不到配置文件 - {e}")
sys.exit(1)
except tomli.TOMLDecodeError as e:
print(f"错误TOML格式解析失败 - {e}")
sys.exit(1)
# 递归合并配置
def merge_configs(source, target):
for key, value in source.items():
if key not in target:
target[key] = value
elif isinstance(value, dict) and isinstance(target[key], dict):
merge_configs(value, target[key])
# 将dev配置的新属性合并到prod配置中
merge_configs(dev_config, prod_config)
# 保存更新后的配置
try:
with open('bot_config.toml', 'wb') as f: # tomli_w需要使用二进制模式写入
tomli_w.dump(prod_config, f)
print("配置文件同步完成!")
except Exception as e:
print(f"错误:保存配置文件失败 - {e}")
sys.exit(1)
if __name__ == '__main__':
# 确保在正确的目录下运行
script_dir = Path(__file__).parent
os.chdir(script_dir)
sync_configs()

View File

@@ -1,11 +1,11 @@
[bot]
qq = 123 #填入你的机器人QQ
nickname = "麦麦" #你希望bot被称呼的名字
qq = 123
nickname = "麦麦"
[message]
min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息
max_context_size = 15 # 麦麦获得的上下文数量,超出数量后自动丢弃
emoji_chance = 0.2 # 麦麦使用表情包的概率
min_text_length = 2
max_context_size = 15
emoji_chance = 0.2
[emoji]
check_interval = 120
@@ -14,34 +14,48 @@ register_interval = 10
[cq_code]
enable_pic_translate = false
[response]
api_using = "siliconflow" # 选择大模型API可选值为siliconflow,deepseek建议使用siliconflow因为识图api目前只支持siliconflow的deepseek-vl2模型
api_paid = true #是否使用付费api目前此选项只影响siliconflow其deepseek模型的api分为可用赠送余额和不可以用的此选项为false时使用赠送余额
model_r1_probability = 0.8 # 麦麦回答时选择R1模型的概率
model_v3_probability = 0.1 # 麦麦回答时选择V3模型的概率
model_r1_distill_probability = 0.1 # 麦麦回答时选择R1蒸馏模型的概率
api_using = "siliconflow"
api_paid = true
model_r1_probability = 0.8
model_v3_probability = 0.1
model_r1_distill_probability = 0.1
[memory]
build_memory_interval = 300 # 记忆构建间隔
build_memory_interval = 300
[others]
enable_advance_output = true # 开启后输出更多日志,false关闭true开启
enable_advance_output = true
[groups]
talk_allowed = [
123,
123,
] #可以回复消息的群
123,
123,
]
talk_frequency_down = []
ban_user_id = []
talk_frequency_down = [
[model.llm_reasoning]
name = "Pro/deepseek-ai/DeepSeek-R1"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
] #降低回复频率的群
[model.llm_reasoning_minor]
name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
ban_user_id = [
[model.llm_normal]
name = "Pro/deepseek-ai/DeepSeek-V3"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
] #禁止回复消息的QQ号
[model.llm_normal_minor]
name = "deepseek-ai/DeepSeek-V2.5"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
[model.vlm]
name = "deepseek-ai/deepseek-vl2"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"