fix:提供更自定义的max_token配置
弱智api服务商输出</think>输出一半被max_tokens截断了
This commit is contained in:
@@ -439,7 +439,6 @@ class ModelConfig(ConfigBase):
|
||||
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""专注工作记忆模型配置"""
|
||||
|
||||
|
||||
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""专注工具使用模型配置"""
|
||||
|
||||
|
||||
Reference in New Issue
Block a user