正确使用lpmm构建prompt
This commit is contained in:
@@ -7,8 +7,9 @@
|
||||
success, response, reasoning, model_name = await llm_api.generate_with_model(prompt, model_config)
|
||||
"""
|
||||
|
||||
from typing import Tuple, Dict
|
||||
from typing import Tuple, Dict, List, Any, Optional
|
||||
from src.common.logger import get_logger
|
||||
from src.llm_models.payload_content.tool_option import ToolCall
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config, model_config
|
||||
from src.config.api_ada_configs import TaskConfig
|
||||
@@ -52,7 +53,11 @@ def get_available_models() -> Dict[str, TaskConfig]:
|
||||
|
||||
|
||||
async def generate_with_model(
|
||||
prompt: str, model_config: TaskConfig, request_type: str = "plugin.generate", **kwargs
|
||||
prompt: str,
|
||||
model_config: TaskConfig,
|
||||
request_type: str = "plugin.generate",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> Tuple[bool, str, str, str]:
|
||||
"""使用指定模型生成内容
|
||||
|
||||
@@ -60,7 +65,6 @@ async def generate_with_model(
|
||||
prompt: 提示词
|
||||
model_config: 模型配置(从 get_available_models 获取的模型配置)
|
||||
request_type: 请求类型标识
|
||||
**kwargs: 其他模型特定参数,如temperature、max_tokens等
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str, str, str]: (是否成功, 生成的内容, 推理过程, 模型名称)
|
||||
@@ -70,12 +74,53 @@ async def generate_with_model(
|
||||
logger.info(f"[LLMAPI] 使用模型集合 {model_name_list} 生成内容")
|
||||
logger.debug(f"[LLMAPI] 完整提示词: {prompt}")
|
||||
|
||||
llm_request = LLMRequest(model_set=model_config, request_type=request_type, **kwargs)
|
||||
llm_request = LLMRequest(model_set=model_config, request_type=request_type)
|
||||
|
||||
response, (reasoning_content, model_name, _) = await llm_request.generate_response_async(prompt)
|
||||
response, (reasoning_content, model_name, _) = await llm_request.generate_response_async(prompt, temperature=temperature, max_tokens=max_tokens)
|
||||
return True, response, reasoning_content, model_name
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"生成内容时出错: {str(e)}"
|
||||
logger.error(f"[LLMAPI] {error_msg}")
|
||||
return False, error_msg, "", ""
|
||||
|
||||
async def generate_with_model_with_tools(
|
||||
prompt: str,
|
||||
model_config: TaskConfig,
|
||||
tool_options: List[Dict[str, Any]] | None = None,
|
||||
request_type: str = "plugin.generate",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> Tuple[bool, str, str, str, List[ToolCall] | None]:
|
||||
"""使用指定模型和工具生成内容
|
||||
|
||||
Args:
|
||||
prompt: 提示词
|
||||
model_config: 模型配置(从 get_available_models 获取的模型配置)
|
||||
tool_options: 工具选项列表
|
||||
request_type: 请求类型标识
|
||||
temperature: 温度参数
|
||||
max_tokens: 最大token数
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str, str, str]: (是否成功, 生成的内容, 推理过程, 模型名称)
|
||||
"""
|
||||
try:
|
||||
model_name_list = model_config.model_list
|
||||
logger.info(f"[LLMAPI] 使用模型集合 {model_name_list} 生成内容")
|
||||
logger.debug(f"[LLMAPI] 完整提示词: {prompt}")
|
||||
|
||||
llm_request = LLMRequest(model_set=model_config, request_type=request_type)
|
||||
|
||||
response, (reasoning_content, model_name, tool_call) = await llm_request.generate_response_async(
|
||||
prompt,
|
||||
tools=tool_options,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens
|
||||
)
|
||||
return True, response, reasoning_content, model_name, tool_call
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"生成内容时出错: {str(e)}"
|
||||
logger.error(f"[LLMAPI] {error_msg}")
|
||||
return False, error_msg, "", "", None
|
||||
|
||||
@@ -3,10 +3,11 @@ from typing import List, Type, Tuple, Union
|
||||
from .plugin_base import PluginBase
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.plugin_system.base.component_types import ActionInfo, CommandInfo, EventHandlerInfo
|
||||
from src.plugin_system.base.component_types import ActionInfo, CommandInfo, EventHandlerInfo, ToolInfo
|
||||
from .base_action import BaseAction
|
||||
from .base_command import BaseCommand
|
||||
from .base_events_handler import BaseEventHandler
|
||||
from .base_tool import BaseTool
|
||||
|
||||
logger = get_logger("base_plugin")
|
||||
|
||||
@@ -31,6 +32,7 @@ class BasePlugin(PluginBase):
|
||||
Tuple[ActionInfo, Type[BaseAction]],
|
||||
Tuple[CommandInfo, Type[BaseCommand]],
|
||||
Tuple[EventHandlerInfo, Type[BaseEventHandler]],
|
||||
Tuple[ToolInfo, Type[BaseTool]],
|
||||
]
|
||||
]:
|
||||
"""获取插件包含的组件列表
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import time
|
||||
from typing import List, Dict, Tuple, Optional, Any
|
||||
from src.plugin_system.apis.tool_api import get_llm_available_tool_definitions, get_tool_instance
|
||||
from src.plugin_system.base.base_tool import BaseTool
|
||||
from src.plugin_system.core.global_announcement_manager import global_announcement_manager
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.llm_models.payload_content import ToolCall
|
||||
@@ -114,7 +115,7 @@ class ToolExecutor:
|
||||
)
|
||||
|
||||
# 执行工具调用
|
||||
tool_results, used_tools = await self._execute_tool_calls(tool_calls)
|
||||
tool_results, used_tools = await self.execute_tool_calls(tool_calls)
|
||||
|
||||
# 缓存结果
|
||||
if tool_results:
|
||||
@@ -133,7 +134,7 @@ class ToolExecutor:
|
||||
user_disabled_tools = global_announcement_manager.get_disabled_chat_tools(self.chat_id)
|
||||
return [definition for name, definition in all_tools if name not in user_disabled_tools]
|
||||
|
||||
async def _execute_tool_calls(self, tool_calls: Optional[List[ToolCall]]) -> Tuple[List[Dict[str, Any]], List[str]]:
|
||||
async def execute_tool_calls(self, tool_calls: Optional[List[ToolCall]]) -> Tuple[List[Dict[str, Any]], List[str]]:
|
||||
"""执行工具调用
|
||||
|
||||
Args:
|
||||
@@ -158,7 +159,7 @@ class ToolExecutor:
|
||||
logger.debug(f"{self.log_prefix}执行工具: {tool_name}")
|
||||
|
||||
# 执行工具
|
||||
result = await self._execute_tool_call(tool_call)
|
||||
result = await self.execute_tool_call(tool_call)
|
||||
|
||||
if result:
|
||||
tool_info = {
|
||||
@@ -191,7 +192,7 @@ class ToolExecutor:
|
||||
|
||||
return tool_results, used_tools
|
||||
|
||||
async def _execute_tool_call(self, tool_call: ToolCall) -> Optional[Dict[str, Any]]:
|
||||
async def execute_tool_call(self, tool_call: ToolCall, tool_instance: Optional[BaseTool] = None) -> Optional[Dict[str, Any]]:
|
||||
# sourcery skip: use-assigned-variable
|
||||
"""执行单个工具调用
|
||||
|
||||
@@ -207,7 +208,7 @@ class ToolExecutor:
|
||||
function_args["llm_called"] = True # 标记为LLM调用
|
||||
|
||||
# 获取对应工具实例
|
||||
tool_instance = get_tool_instance(function_name)
|
||||
tool_instance = tool_instance or get_tool_instance(function_name)
|
||||
if not tool_instance:
|
||||
logger.warning(f"未知工具名称: {function_name}")
|
||||
return None
|
||||
@@ -294,7 +295,7 @@ class ToolExecutor:
|
||||
if expired_keys:
|
||||
logger.debug(f"{self.log_prefix}清理了{len(expired_keys)}个过期缓存")
|
||||
|
||||
async def execute_specific_tool(self, tool_name: str, tool_args: Dict) -> Optional[Dict]:
|
||||
async def execute_specific_tool_simple(self, tool_name: str, tool_args: Dict) -> Optional[Dict]:
|
||||
"""直接执行指定工具
|
||||
|
||||
Args:
|
||||
@@ -314,7 +315,7 @@ class ToolExecutor:
|
||||
|
||||
logger.info(f"{self.log_prefix}直接执行工具: {tool_name}")
|
||||
|
||||
result = await self._execute_tool_call(tool_call)
|
||||
result = await self.execute_tool_call(tool_call)
|
||||
|
||||
if result:
|
||||
tool_info = {
|
||||
@@ -405,7 +406,7 @@ results, used_tools, prompt = await executor.execute_from_chat_message(
|
||||
)
|
||||
|
||||
# 5. 直接执行特定工具
|
||||
result = await executor.execute_specific_tool(
|
||||
result = await executor.execute_specific_tool_simple(
|
||||
tool_name="get_knowledge",
|
||||
tool_args={"query": "机器学习"}
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user