diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 48ef0c082..b7aa0a8b8 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -138,6 +138,7 @@ class LLMRequest: temperature: Optional[float] = None, max_tokens: Optional[int] = None, tools: Optional[List[Dict[str, Any]]] = None, + raise_when_empty: bool = True, ) -> Tuple[str, Tuple[str, str, Optional[List[ToolCall]]]]: """ 异步生成响应 @@ -183,7 +184,9 @@ class LLMRequest: endpoint="/chat/completions", ) if not content: - logger.warning("生成的响应为空") + if raise_when_empty: + logger.warning("生成的响应为空") + raise RuntimeError("生成的响应为空") content = "生成的响应为空,请检查模型配置或输入内容是否正确" return content, (reasoning_content, model_info.name, tool_calls) diff --git a/src/plugin_system/core/tool_use.py b/src/plugin_system/core/tool_use.py index 9a37bc1d8..17e236856 100644 --- a/src/plugin_system/core/tool_use.py +++ b/src/plugin_system/core/tool_use.py @@ -111,7 +111,7 @@ class ToolExecutor: # 调用LLM进行工具决策 response, (reasoning_content, model_name, tool_calls) = await self.llm_model.generate_response_async( - prompt=prompt, tools=tools + prompt=prompt, tools=tools, raise_when_empty=False ) # 执行工具调用