fix:提供更自定义的max_token配置

弱智api服务商输出</think>输出一半被max_tokens截断了
This commit is contained in:
SengokuCola
2025-06-05 10:18:50 +08:00
parent 4ebfca096c
commit 303e920e3a
4 changed files with 35 additions and 8 deletions

View File

@@ -439,7 +439,6 @@ class ModelConfig(ConfigBase):
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""专注工作记忆模型配置"""
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""专注工具使用模型配置"""