feat(config): 为小脑(sub-planner)新增独立模型和尺寸配置

为“小脑”(sub-planner)引入了独立的模型配置`planner_small`,使其可以与主planner(大脑)使用不同的、更轻量的模型,以提升并行处理性能。

同时,新增了`planner_size`配置项,用于控制每个小脑处理的action数量,允许用户根据需求调整并行度和单个小脑的上下文窗口。

- 在`model_config.toml`中添加`planner_small`任务配置
- 在`bot_config.toml`中添加`planner_size`参数
- 更新代码以使用新的配置项,并移除了相关的硬编码和TODO注释
This commit is contained in:
minecraft1024a
2025-09-06 17:14:32 +08:00
parent a0ddd525b3
commit e5247eba96
5 changed files with 15 additions and 7 deletions

View File

@@ -160,9 +160,8 @@ class ActionPlanner:
model_set=model_config.model_task_config.planner, request_type="planner" model_set=model_config.model_task_config.planner, request_type="planner"
) )
# --- 小脑 (新增) --- # --- 小脑 (新增) ---
# TODO: 可以在 model_config.toml 中为 planner_small 单独配置一个轻量级模型
self.planner_small_llm = LLMRequest( self.planner_small_llm = LLMRequest(
model_set=model_config.model_task_config.planner, request_type="planner_small" model_set=model_config.model_task_config.planner_small, request_type="planner_small"
) )
self.last_obs_time_mark = 0.0 self.last_obs_time_mark = 0.0
@@ -496,8 +495,7 @@ class ActionPlanner:
if sub_planner_actions: if sub_planner_actions:
sub_planner_actions_num = len(sub_planner_actions) sub_planner_actions_num = len(sub_planner_actions)
# TODO: 您可以在 config.toml 的 [chat] 部分添加 planner_size = 5.0 来自定义此值 planner_size_config = global_config.chat.planner_size
planner_size_config = getattr(global_config.chat, "planner_size", 5.0)
sub_planner_size = int(planner_size_config) + ( sub_planner_size = int(planner_size_config) + (
1 if random.random() < planner_size_config - int(planner_size_config) else 0 1 if random.random() < planner_size_config - int(planner_size_config) else 0
) )

View File

@@ -113,6 +113,7 @@ class ModelTaskConfig(ValidatedConfigBase):
voice: TaskConfig = Field(..., description="语音识别模型配置") voice: TaskConfig = Field(..., description="语音识别模型配置")
tool_use: TaskConfig = Field(..., description="专注工具使用模型配置") tool_use: TaskConfig = Field(..., description="专注工具使用模型配置")
planner: TaskConfig = Field(..., description="规划模型配置") planner: TaskConfig = Field(..., description="规划模型配置")
planner_small: TaskConfig = Field(..., description="小脑sub-planner规划模型配置")
embedding: TaskConfig = Field(..., description="嵌入模型配置") embedding: TaskConfig = Field(..., description="嵌入模型配置")
lpmm_entity_extract: TaskConfig = Field(..., description="LPMM实体提取模型配置") lpmm_entity_extract: TaskConfig = Field(..., description="LPMM实体提取模型配置")
lpmm_rdf_build: TaskConfig = Field(..., description="LPMM RDF构建模型配置") lpmm_rdf_build: TaskConfig = Field(..., description="LPMM RDF构建模型配置")
@@ -147,9 +148,9 @@ class ModelTaskConfig(ValidatedConfigBase):
class APIAdapterConfig(ValidatedConfigBase): class APIAdapterConfig(ValidatedConfigBase):
"""API Adapter配置类""" """API Adapter配置类"""
models: List[ModelInfo] = Field(..., min_items=1, description="模型列表") models: List[ModelInfo] = Field(..., min_length=1, description="模型列表")
model_task_config: ModelTaskConfig = Field(..., description="模型任务配置") model_task_config: ModelTaskConfig = Field(..., description="模型任务配置")
api_providers: List[APIProvider] = Field(..., min_items=1, description="API提供商列表") api_providers: List[APIProvider] = Field(..., min_length=1, description="API提供商列表")
def __init__(self, **data): def __init__(self, **data):
super().__init__(**data) super().__init__(**data)

View File

@@ -92,6 +92,7 @@ class ChatConfig(ValidatedConfigBase):
default_factory=list, description="启用主动思考的群聊范围格式platform:group_id为空则不限制" default_factory=list, description="启用主动思考的群聊范围格式platform:group_id为空则不限制"
) )
delta_sigma: int = Field(default=120, description="采用正态分布随机时间间隔") delta_sigma: int = Field(default=120, description="采用正态分布随机时间间隔")
planner_size: float = Field(default=5.0, ge=1.0, description="小脑sub-planner的尺寸决定每个小脑处理多少个action")
def get_current_talk_frequency(self, chat_stream_id: Optional[str] = None) -> float: def get_current_talk_frequency(self, chat_stream_id: Optional[str] = None) -> float:
""" """

View File

@@ -173,6 +173,9 @@ delta_sigma = 120 # 正态分布的标准差,控制时间间隔的随机程度
# 实验建议:试试 proactive_thinking_interval=0 + delta_sigma 非常大 的纯随机模式! # 实验建议:试试 proactive_thinking_interval=0 + delta_sigma 非常大 的纯随机模式!
# 结果保证生成的间隔永远为正数负数会取绝对值最小1秒最大24小时 # 结果保证生成的间隔永远为正数负数会取绝对值最小1秒最大24小时
# --- 大脑/小脑 Planner 配置 ---
planner_size = 5.0 # 小脑sub-planner的尺寸决定每个小脑处理多少个action。数值越小并行度越高但单个小脑的上下文越少。建议范围3.0-8.0
[relationship] [relationship]
enable_relationship = true # 是否启用关系系统 enable_relationship = true # 是否启用关系系统
relation_frequency = 1 # 关系频率MoFox-Bot构建关系的频率 relation_frequency = 1 # 关系频率MoFox-Bot构建关系的频率

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "1.3.0" version = "1.3.1"
# 配置文件版本号迭代规则同bot_config.toml # 配置文件版本号迭代规则同bot_config.toml
@@ -142,6 +142,11 @@ model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3 temperature = 0.3
max_tokens = 800 max_tokens = 800
[model_task_config.planner_small] #决策小脑负责决定具体action的模型建议使用速度快的小模型
model_list = ["qwen3-30b"]
temperature = 0.5
max_tokens = 800
[model_task_config.emotion] #负责麦麦的情绪变化 [model_task_config.emotion] #负责麦麦的情绪变化
model_list = ["siliconflow-deepseek-v3"] model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3 temperature = 0.3