better:更好的重新思考修复复读,记录循环信息,拆分模型配置

This commit is contained in:
SengokuCola
2025-04-25 21:38:16 +08:00
parent 75924bf499
commit 91ad729b0c
9 changed files with 317 additions and 184 deletions

View File

@@ -1,5 +1,5 @@
[inner]
version = "1.4.2"
version = "1.5.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更
@@ -81,12 +81,8 @@ model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概
reply_trigger_threshold = 3.0 # 心流聊天触发阈值,越低越容易进入心流聊天
probability_decay_factor_per_second = 0.2 # 概率衰减因子,越大衰减越快,越高越容易退出心流聊天
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入心流聊天
initial_duration = 60 # 初始持续时间,越大心流聊天持续的时间越长
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
# sub_heart_flow_update_interval = 60
# sub_heart_flow_freeze_time = 100
# heart_flow_update_interval = 600
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
@@ -247,6 +243,29 @@ provider = "SILICONFLOW"
pri_in = 0.35
pri_out = 0.35
[model.llm_observation] #观察模型,压缩聊天内容,建议用免费的
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-7B-Instruct"
provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
[model.llm_sub_heartflow] #子心流:激情水群时,生成麦麦的内心想法
name = "Qwen/Qwen2.5-72B-Instruct"
provider = "SILICONFLOW"
pri_in = 4.13
pri_out = 4.13
temp = 0.7 #模型的温度新V3建议0.1-0.3
[model.llm_plan] #决策模型:激情水群时,负责决定麦麦该做什么
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
pri_out = 1.26
#嵌入模型
[model.embedding] #嵌入
@@ -255,26 +274,6 @@ provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
[model.llm_observation] #观察模型建议用免费的建议使用qwen2.5 7b
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-7B-Instruct"
provider = "SILICONFLOW"
pri_in = 0
pri_out = 0
[model.llm_sub_heartflow] #子心流建议使用V3级别
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
temp = 0.2 #模型的温度新V3建议0.1-0.3
[model.llm_heartflow] #心流建议使用qwen2.5 32b
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
pri_out = 1.26
#私聊PFC需要开启PFC功能默认三个模型均为硅基流动v3如果需要支持多人同时私聊或频繁调用建议把其中的一个或两个换成官方v3或其它模型以免撞到429
@@ -299,4 +298,15 @@ pri_out = 8
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
pri_out = 8
#此模型暂时没有使用!!
#此模型暂时没有使用!!
#此模型暂时没有使用!!
[model.llm_heartflow] #心流
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
pri_in = 1.26
pri_out = 1.26