Files
Mofox-Core/template/bot_config_template.toml
2025-03-08 00:23:56 +08:00

130 lines
3.7 KiB
TOML
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

[bot]
qq = 123
nickname = "麦麦"
[personality]
prompt_personality = [
"曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧", # 贴吧人格
"是一个女大学生,你有黑色头发,你会刷小红书" # 小红书人格
]
prompt_schedule = "一个曾经学习地质,现在学习心理学和脑科学的女大学生喜欢刷qq贴吧知乎和小红书"
[message]
min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息
max_context_size = 15 # 麦麦获得的上文数量
emoji_chance = 0.2 # 麦麦使用表情包的概率
ban_words = [
# "403","张三"
]
[emoji]
check_interval = 120 # 检查表情包的时间间隔
register_interval = 10 # 注册表情包的时间间隔
auto_save = true # 自动偷表情包
enable_check = false # 是否启用表情包过滤
check_prompt = "符合公序良俗" # 表情包过滤要求
[cq_code]
enable_pic_translate = false
[response]
model_r1_probability = 0.8 # 麦麦回答时选择主要回复模型1 模型的概率
model_v3_probability = 0.1 # 麦麦回答时选择次要回复模型2 模型的概率
model_r1_distill_probability = 0.1 # 麦麦回答时选择次要回复模型3 模型的概率
max_response_length = 1024 # 麦麦回答的最大token数
[memory]
build_memory_interval = 300 # 记忆构建间隔 单位秒
forget_memory_interval = 300 # 记忆遗忘间隔 单位秒
[others]
enable_advance_output = true # 是否启用高级输出
enable_kuuki_read = true # 是否启用读空气功能
[groups]
talk_allowed = [
123,
123,
] #可以回复消息的群
talk_frequency_down = [] #降低回复频率的群
ban_user_id = [] #禁止回复消息的QQ号
#V3
#name = "deepseek-chat"
#base_url = "DEEP_SEEK_BASE_URL"
#key = "DEEP_SEEK_KEY"
#R1
#name = "deepseek-reasoner"
#base_url = "DEEP_SEEK_BASE_URL"
#key = "DEEP_SEEK_KEY"
#下面的模型若使用硅基流动则不需要更改使用ds官方则改成.env.prod自定义的宏使用自定义模型则选择定位相似的模型自己填写
#推理模型:
[model.llm_reasoning] #回复模型1 主要回复模型
name = "Pro/deepseek-ai/DeepSeek-R1"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
pri_in = 0 #模型的输入价格(非必填,可以记录消耗)
pri_out = 0 #模型的输出价格(非必填,可以记录消耗)
[model.llm_reasoning_minor] #回复模型3 次要回复模型
name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
#非推理模型
[model.llm_normal] #V3 回复模型2 次要回复模型
name = "Pro/deepseek-ai/DeepSeek-V3"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
[model.llm_normal_minor] #V2.5
name = "deepseek-ai/DeepSeek-V2.5"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
[model.llm_emotion_judge] #主题判断 0.7/m
name = "Qwen/Qwen2.5-14B-Instruct"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
[model.llm_topic_judge] #主题判断建议使用qwen2.5 7b
name = "Pro/Qwen/Qwen2.5-7B-Instruct"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
[model.llm_summary_by_topic] #建议使用qwen2.5 32b 及以上
name = "Qwen/Qwen2.5-32B-Instruct"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
pri_in = 0
pri_out = 0
[model.moderation] #内容审核 未启用
name = ""
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
pri_in = 0
pri_out = 0
# 识图模型
[model.vlm] #图像识别 0.35/m
name = "Pro/Qwen/Qwen2-VL-7B-Instruct"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"
#嵌入模型
[model.embedding] #嵌入
name = "BAAI/bge-m3"
base_url = "SILICONFLOW_BASE_URL"
key = "SILICONFLOW_KEY"