refc:重构插件api,补全文档,合并expressor和replyer,分离reply和sender,新log浏览器

This commit is contained in:
SengokuCola
2025-06-19 20:20:34 +08:00
parent 7e05ede846
commit ab28b94e33
63 changed files with 5285 additions and 8316 deletions

View File

@@ -1,3 +1,12 @@
"""LLM API模块
提供了与LLM模型交互的功能
使用方式:
from src.plugin_system.apis import llm_api
models = llm_api.get_available_models()
success, response, reasoning, model_name = await llm_api.generate_with_model(prompt, model_config)
"""
from typing import Tuple, Dict, Any
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
@@ -6,49 +15,51 @@ from src.config.config import global_config
logger = get_logger("llm_api")
class LLMAPI:
"""LLM API模块
# =============================================================================
# LLM模型API函数
# =============================================================================
提供了与LLM模型交互的功能
def get_available_models() -> Dict[str, Any]:
"""获取所有可用的模型配置
Returns:
Dict[str, Any]: 模型配置字典key为模型名称value为模型配置
"""
def get_available_models(self) -> Dict[str, Any]:
"""获取所有可用的模型配置
Returns:
Dict[str, Any]: 模型配置字典key为模型名称value为模型配置
"""
try:
if not hasattr(global_config, "model"):
logger.error(f"{self.log_prefix} 无法获取模型列表:全局配置中未找到 model 配置")
logger.error("[LLMAPI] 无法获取模型列表:全局配置中未找到 model 配置")
return {}
models = global_config.model
return models
except Exception as e:
logger.error(f"[LLMAPI] 获取可用模型失败: {e}")
return {}
async def generate_with_model(
self, prompt: str, model_config: Dict[str, Any], request_type: str = "plugin.generate", **kwargs
) -> Tuple[bool, str, str, str]:
"""使用指定模型生成内容
Args:
prompt: 提示词
model_config: 模型配置(从 get_available_models 获取的模型配置)
request_type: 请求类型标识
**kwargs: 其他模型特定参数如temperature、max_tokens等
async def generate_with_model(
prompt: str, model_config: Dict[str, Any], request_type: str = "plugin.generate", **kwargs
) -> Tuple[bool, str, str, str]:
"""使用指定模型生成内容
Returns:
Tuple[bool, str, str, str]: (是否成功, 生成的内容, 推理过程, 模型名称)
"""
try:
logger.info(f"{self.log_prefix} 使用模型生成内容,提示词: {prompt[:100]}...")
Args:
prompt: 提示词
model_config: 模型配置(从 get_available_models 获取的模型配置)
request_type: 请求类型标识
**kwargs: 其他模型特定参数如temperature、max_tokens等
llm_request = LLMRequest(model=model_config, request_type=request_type, **kwargs)
Returns:
Tuple[bool, str, str, str]: (是否成功, 生成的内容, 推理过程, 模型名称)
"""
try:
logger.info(f"[LLMAPI] 使用模型生成内容,提示词: {prompt[:100]}...")
response, (reasoning, model_name) = await llm_request.generate_response_async(prompt)
return True, response, reasoning, model_name
llm_request = LLMRequest(model=model_config, request_type=request_type, **kwargs)
except Exception as e:
error_msg = f"生成内容时出错: {str(e)}"
logger.error(f"{self.log_prefix} {error_msg}")
return False, error_msg, "", ""
response, (reasoning, model_name) = await llm_request.generate_response_async(prompt)
return True, response, reasoning, model_name
except Exception as e:
error_msg = f"生成内容时出错: {str(e)}"
logger.error(f"[LLMAPI] {error_msg}")
return False, error_msg, "", ""