ruff: 清理代码并规范导入顺序
对整个代码库进行了大规模的清理和重构,主要包括: - 统一并修复了多个文件中的 `import` 语句顺序,使其符合 PEP 8 规范。 - 移除了大量未使用的导入和变量,减少了代码冗余。 - 修复了多处代码风格问题,例如多余的空行、不一致的引号使用等。 - 简化了异常处理逻辑,移除了不必要的 `noqa` 注释。 - 在多个文件中使用了更现代的类型注解语法(例如 `list[str]` 替代 `List[str]`)。
This commit is contained in:
32
bot.py
32
bot.py
@@ -1,22 +1,20 @@
|
||||
# import asyncio
|
||||
import asyncio
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
import platform
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from contextlib import asynccontextmanager
|
||||
import hashlib
|
||||
from typing import Optional, Dict, Any
|
||||
from pathlib import Path
|
||||
|
||||
# 初始化基础工具
|
||||
from colorama import init, Fore
|
||||
from colorama import Fore, init
|
||||
from dotenv import load_dotenv
|
||||
from rich.traceback import install
|
||||
|
||||
# 初始化日志系统
|
||||
from src.common.logger import initialize_logging, get_logger, shutdown_logging
|
||||
from src.common.logger import get_logger, initialize_logging, shutdown_logging
|
||||
|
||||
# 初始化日志和错误显示
|
||||
initialize_logging()
|
||||
@@ -24,7 +22,7 @@ logger = get_logger("main")
|
||||
install(extra_lines=3)
|
||||
|
||||
# 常量定义
|
||||
SUPPORTED_DATABASES = ['sqlite', 'mysql', 'postgresql']
|
||||
SUPPORTED_DATABASES = ["sqlite", "mysql", "postgresql"]
|
||||
SHUTDOWN_TIMEOUT = 10.0
|
||||
EULA_CHECK_INTERVAL = 2
|
||||
MAX_EULA_CHECK_ATTEMPTS = 30
|
||||
@@ -48,7 +46,7 @@ class ConfigManager:
|
||||
if template_env.exists():
|
||||
logger.info("未找到.env文件,正在从模板创建...")
|
||||
try:
|
||||
env_file.write_text(template_env.read_text(encoding='utf-8'), encoding='utf-8')
|
||||
env_file.write_text(template_env.read_text(encoding="utf-8"), encoding="utf-8")
|
||||
logger.info("已从template/template.env创建.env文件")
|
||||
logger.warning("请编辑.env文件,将EULA_CONFIRMED设置为true并配置其他必要参数")
|
||||
except Exception as e:
|
||||
@@ -73,8 +71,8 @@ class ConfigManager:
|
||||
|
||||
# 检查文件内容是否包含必要字段
|
||||
try:
|
||||
content = env_file.read_text(encoding='utf-8')
|
||||
if 'EULA_CONFIRMED' not in content:
|
||||
content = env_file.read_text(encoding="utf-8")
|
||||
if "EULA_CONFIRMED" not in content:
|
||||
logger.error(".env文件缺少EULA_CONFIRMED字段")
|
||||
return False
|
||||
except Exception as e:
|
||||
@@ -110,8 +108,8 @@ class EULAManager:
|
||||
confirm_logger.error("无法加载环境变量,EULA检查失败")
|
||||
sys.exit(1)
|
||||
|
||||
eula_confirmed = os.getenv('EULA_CONFIRMED', '').lower()
|
||||
if eula_confirmed == 'true':
|
||||
eula_confirmed = os.getenv("EULA_CONFIRMED", "").lower()
|
||||
if eula_confirmed == "true":
|
||||
logger.info("EULA已通过环境变量确认")
|
||||
return
|
||||
|
||||
@@ -130,8 +128,8 @@ class EULAManager:
|
||||
|
||||
# 重新加载环境变量
|
||||
ConfigManager.safe_load_dotenv()
|
||||
eula_confirmed = os.getenv('EULA_CONFIRMED', '').lower()
|
||||
if eula_confirmed == 'true':
|
||||
eula_confirmed = os.getenv("EULA_CONFIRMED", "").lower()
|
||||
if eula_confirmed == "true":
|
||||
confirm_logger.info("EULA确认成功,感谢您的同意")
|
||||
return
|
||||
|
||||
@@ -303,7 +301,7 @@ class ConfigurationValidator:
|
||||
from src.config.config import global_config
|
||||
|
||||
# 检查必要的配置节
|
||||
required_sections = ['database', 'bot']
|
||||
required_sections = ["database", "bot"]
|
||||
for section in required_sections:
|
||||
if not hasattr(global_config, section):
|
||||
logger.error(f"配置中缺少{section}配置节")
|
||||
@@ -311,7 +309,7 @@ class ConfigurationValidator:
|
||||
|
||||
# 验证数据库配置
|
||||
db_config = global_config.database
|
||||
if not hasattr(db_config, 'database_type') or not db_config.database_type:
|
||||
if not hasattr(db_config, "database_type") or not db_config.database_type:
|
||||
logger.error("数据库配置缺少database_type字段")
|
||||
return False
|
||||
|
||||
@@ -422,7 +420,7 @@ async def wait_for_user_input():
|
||||
"""等待用户输入(异步方式)"""
|
||||
try:
|
||||
# 在非生产环境下,使用异步方式等待输入
|
||||
if os.getenv('ENVIRONMENT') != 'production':
|
||||
if os.getenv("ENVIRONMENT") != "production":
|
||||
logger.info("程序执行完成,按 Ctrl+C 退出...")
|
||||
# 简单的异步等待,避免阻塞事件循环
|
||||
while True:
|
||||
|
||||
@@ -21,6 +21,7 @@ from .memory_chunk import MemoryChunk as Memory
|
||||
|
||||
# 遗忘引擎
|
||||
from .memory_forgetting_engine import ForgettingConfig, MemoryForgettingEngine, get_memory_forgetting_engine
|
||||
from .memory_formatter import format_memories_bracket_style
|
||||
|
||||
# 记忆管理器
|
||||
from .memory_manager import MemoryManager, MemoryResult, memory_manager
|
||||
@@ -30,7 +31,6 @@ from .memory_system import MemorySystem, MemorySystemConfig, get_memory_system,
|
||||
|
||||
# Vector DB存储系统
|
||||
from .vector_memory_storage_v2 import VectorMemoryStorage, VectorStorageConfig, get_vector_memory_storage
|
||||
from .memory_formatter import format_memories_bracket_style
|
||||
|
||||
__all__ = [
|
||||
# 核心数据结构
|
||||
|
||||
@@ -17,8 +17,9 @@
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterable
|
||||
import time
|
||||
from collections.abc import Iterable
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _format_timestamp(ts: Any) -> str:
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
记忆元数据索引。
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, asdict
|
||||
from dataclasses import asdict, dataclass
|
||||
from typing import Any
|
||||
from time import time
|
||||
|
||||
from src.common.logger import get_logger
|
||||
|
||||
@@ -12,6 +11,7 @@ logger = get_logger(__name__)
|
||||
|
||||
from inkfox.memory import PyMetadataIndex as _RustIndex # type: ignore
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryMetadataIndexEntry:
|
||||
memory_id: str
|
||||
@@ -51,7 +51,7 @@ class MemoryMetadataIndex:
|
||||
if payload:
|
||||
try:
|
||||
self._rust.batch_add(payload)
|
||||
except Exception as ex: # noqa: BLE001
|
||||
except Exception as ex:
|
||||
logger.error(f"Rust 元数据批量添加失败: {ex}")
|
||||
|
||||
def add_or_update(self, entry: MemoryMetadataIndexEntry):
|
||||
@@ -88,7 +88,7 @@ class MemoryMetadataIndex:
|
||||
if flexible_mode:
|
||||
return list(self._rust.search_flexible(params))
|
||||
return list(self._rust.search_strict(params))
|
||||
except Exception as ex: # noqa: BLE001
|
||||
except Exception as ex:
|
||||
logger.error(f"Rust 搜索失败返回空: {ex}")
|
||||
return []
|
||||
|
||||
@@ -105,18 +105,18 @@ class MemoryMetadataIndex:
|
||||
"keywords_count": raw.get("keywords_indexed", 0),
|
||||
"tags_count": raw.get("tags_indexed", 0),
|
||||
}
|
||||
except Exception as ex: # noqa: BLE001
|
||||
except Exception as ex:
|
||||
logger.warning(f"读取 Rust stats 失败: {ex}")
|
||||
return {"total_memories": 0}
|
||||
|
||||
def save(self): # 仅调用 rust save
|
||||
try:
|
||||
self._rust.save()
|
||||
except Exception as ex: # noqa: BLE001
|
||||
except Exception as ex:
|
||||
logger.warning(f"Rust save 失败: {ex}")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"MemoryMetadataIndexEntry",
|
||||
"MemoryMetadataIndex",
|
||||
"MemoryMetadataIndexEntry",
|
||||
]
|
||||
|
||||
@@ -263,7 +263,7 @@ class MessageRecv(Message):
|
||||
logger.warning("视频消息中没有base64数据")
|
||||
return "[收到视频消息,但数据异常]"
|
||||
except Exception as e:
|
||||
logger.error(f"视频处理失败: {str(e)}")
|
||||
logger.error(f"视频处理失败: {e!s}")
|
||||
import traceback
|
||||
|
||||
logger.error(f"错误详情: {traceback.format_exc()}")
|
||||
@@ -277,7 +277,7 @@ class MessageRecv(Message):
|
||||
logger.info("未启用视频识别")
|
||||
return "[视频]"
|
||||
except Exception as e:
|
||||
logger.error(f"处理消息段失败: {str(e)}, 类型: {segment.type}, 数据: {segment.data}")
|
||||
logger.error(f"处理消息段失败: {e!s}, 类型: {segment.type}, 数据: {segment.data}")
|
||||
return f"[处理失败的{segment.type}消息]"
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""纯 inkfox 视频关键帧分析工具
|
||||
|
||||
仅依赖 `inkfox.video` 提供的 Rust 扩展能力:
|
||||
@@ -14,25 +13,25 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import io
|
||||
import asyncio
|
||||
import base64
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple, Optional, Dict, Any
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import tempfile
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from src.common.database.sqlalchemy_models import Videos, get_db_session # type: ignore
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config, model_config
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.common.database.sqlalchemy_models import Videos, get_db_session # type: ignore
|
||||
|
||||
# 简易并发控制:同一 hash 只处理一次
|
||||
_video_locks: Dict[str, asyncio.Lock] = {}
|
||||
_video_locks: dict[str, asyncio.Lock] = {}
|
||||
_locks_guard = asyncio.Lock()
|
||||
|
||||
logger = get_logger("utils_video")
|
||||
@@ -90,7 +89,7 @@ class VideoAnalyzer:
|
||||
logger.debug(f"获取系统信息失败: {e}")
|
||||
|
||||
# ---- 关键帧提取 ----
|
||||
async def extract_keyframes(self, video_path: str) -> List[Tuple[str, float]]:
|
||||
async def extract_keyframes(self, video_path: str) -> list[tuple[str, float]]:
|
||||
"""提取关键帧并返回 (base64, timestamp_seconds) 列表"""
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
result = video.extract_keyframes_from_video( # type: ignore[attr-defined]
|
||||
@@ -105,7 +104,7 @@ class VideoAnalyzer:
|
||||
)
|
||||
files = sorted(Path(tmp).glob("keyframe_*.jpg"))[: self.max_frames]
|
||||
total_ms = getattr(result, "total_time_ms", 0)
|
||||
frames: List[Tuple[str, float]] = []
|
||||
frames: list[tuple[str, float]] = []
|
||||
for i, f in enumerate(files):
|
||||
img = Image.open(f).convert("RGB")
|
||||
if max(img.size) > self.max_image_size:
|
||||
@@ -119,7 +118,7 @@ class VideoAnalyzer:
|
||||
return frames
|
||||
|
||||
# ---- 批量分析 ----
|
||||
async def _analyze_batch(self, frames: List[Tuple[str, float]], question: Optional[str]) -> str:
|
||||
async def _analyze_batch(self, frames: list[tuple[str, float]], question: str | None) -> str:
|
||||
from src.llm_models.payload_content.message import MessageBuilder, RoleType
|
||||
from src.llm_models.utils_model import RequestType
|
||||
prompt = self.batch_analysis_prompt.format(
|
||||
@@ -149,8 +148,8 @@ class VideoAnalyzer:
|
||||
return resp.content or "❌ 未获得响应"
|
||||
|
||||
# ---- 逐帧分析 ----
|
||||
async def _analyze_sequential(self, frames: List[Tuple[str, float]], question: Optional[str]) -> str:
|
||||
results: List[str] = []
|
||||
async def _analyze_sequential(self, frames: list[tuple[str, float]], question: str | None) -> str:
|
||||
results: list[str] = []
|
||||
for i, (b64, ts) in enumerate(frames):
|
||||
prompt = f"分析第{i+1}帧" + (f" (时间: {ts:.2f}s)" if self.enable_frame_timing else "")
|
||||
if question:
|
||||
@@ -174,7 +173,7 @@ class VideoAnalyzer:
|
||||
return "\n".join(results)
|
||||
|
||||
# ---- 主入口 ----
|
||||
async def analyze_video(self, video_path: str, question: Optional[str] = None) -> Tuple[bool, str]:
|
||||
async def analyze_video(self, video_path: str, question: str | None = None) -> tuple[bool, str]:
|
||||
if not os.path.exists(video_path):
|
||||
return False, "❌ 文件不存在"
|
||||
frames = await self.extract_keyframes(video_path)
|
||||
@@ -189,10 +188,10 @@ class VideoAnalyzer:
|
||||
async def analyze_video_from_bytes(
|
||||
self,
|
||||
video_bytes: bytes,
|
||||
filename: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
question: Optional[str] = None,
|
||||
) -> Dict[str, str]:
|
||||
filename: str | None = None,
|
||||
prompt: str | None = None,
|
||||
question: str | None = None,
|
||||
) -> dict[str, str]:
|
||||
"""从内存字节分析视频,兼容旧调用 (prompt / question 二选一) 返回 {"summary": str}."""
|
||||
if not video_bytes:
|
||||
return {"summary": "❌ 空视频数据"}
|
||||
@@ -271,7 +270,7 @@ class VideoAnalyzer:
|
||||
|
||||
|
||||
# ---- 外部接口 ----
|
||||
_INSTANCE: Optional[VideoAnalyzer] = None
|
||||
_INSTANCE: VideoAnalyzer | None = None
|
||||
|
||||
|
||||
def get_video_analyzer() -> VideoAnalyzer:
|
||||
@@ -285,7 +284,7 @@ def is_video_analysis_available() -> bool:
|
||||
return True
|
||||
|
||||
|
||||
def get_video_analysis_status() -> Dict[str, Any]:
|
||||
def get_video_analysis_status() -> dict[str, Any]:
|
||||
try:
|
||||
info = video.get_system_info() # type: ignore[attr-defined]
|
||||
except Exception as e: # pragma: no cover
|
||||
|
||||
@@ -5,7 +5,6 @@ MCP (Model Context Protocol) SSE (Server-Sent Events) 客户端实现
|
||||
|
||||
import asyncio
|
||||
import io
|
||||
import json
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
|
||||
@@ -20,7 +19,6 @@ from ..exceptions import (
|
||||
NetworkConnectionError,
|
||||
ReqAbortException,
|
||||
RespNotOkException,
|
||||
RespParseException,
|
||||
)
|
||||
from ..payload_content.message import Message, RoleType
|
||||
from ..payload_content.resp_format import RespFormat
|
||||
|
||||
@@ -6,7 +6,7 @@ import time
|
||||
import traceback
|
||||
from functools import partial
|
||||
from random import choices
|
||||
from typing import Any, List, Tuple
|
||||
from typing import Any
|
||||
|
||||
from maim_message import MessageServer
|
||||
from rich.traceback import install
|
||||
@@ -36,7 +36,7 @@ install(extra_lines=3)
|
||||
logger = get_logger("main")
|
||||
|
||||
# 预定义彩蛋短语,避免在每次初始化时重新创建
|
||||
EGG_PHRASES: List[Tuple[str, int]] = [
|
||||
EGG_PHRASES: list[tuple[str, int]] = [
|
||||
("我们的代码里真的没有bug,只有'特性'。", 10),
|
||||
("你知道吗?阿范喜欢被切成臊子😡", 10),
|
||||
("你知道吗,雅诺狐的耳朵其实很好摸", 5),
|
||||
@@ -84,7 +84,7 @@ class MainSystem:
|
||||
self._setup_signal_handlers()
|
||||
|
||||
# 存储清理任务的引用
|
||||
self._cleanup_tasks: List[asyncio.Task] = []
|
||||
self._cleanup_tasks: list[asyncio.Task] = []
|
||||
|
||||
def _setup_signal_handlers(self) -> None:
|
||||
"""设置信号处理器"""
|
||||
@@ -330,7 +330,7 @@ class MainSystem:
|
||||
async def initialize(self) -> None:
|
||||
"""初始化系统组件"""
|
||||
# 检查必要的配置
|
||||
if not hasattr(global_config, 'bot') or not hasattr(global_config.bot, 'nickname'):
|
||||
if not hasattr(global_config, "bot") or not hasattr(global_config.bot, "nickname"):
|
||||
logger.error("缺少必要的bot配置")
|
||||
raise ValueError("Bot配置不完整")
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ from src.common.logger import get_logger
|
||||
from src.plugin_system.base.component_types import ActionInfo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from src.chat.replyer.default_generator import DefaultReplyer
|
||||
pass
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
|
||||
@@ -3,11 +3,9 @@ MCP (Model Context Protocol) 连接器
|
||||
负责连接MCP服务器,获取和执行工具
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
import orjson
|
||||
|
||||
from src.common.logger import get_logger
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ MCP工具提供器 - 简化版
|
||||
直接集成到工具系统,无需复杂的插件架构
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from src.common.logger import get_logger
|
||||
|
||||
@@ -4,9 +4,10 @@
|
||||
"""
|
||||
|
||||
import time
|
||||
import orjson
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import orjson
|
||||
|
||||
from src.chat.interest_system import bot_interest_manager
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
|
||||
@@ -230,11 +230,11 @@ class ChatterPlanExecutor:
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
logger.error(f"执行回复动作失败: {action_info.action_type}, 错误: {error_message}")
|
||||
'''
|
||||
"""
|
||||
# 记录用户关系追踪
|
||||
if success and action_info.action_message:
|
||||
await self._track_user_interaction(action_info, plan, reply_content)
|
||||
'''
|
||||
"""
|
||||
execution_time = time.time() - start_time
|
||||
self.execution_stats["execution_times"].append(execution_time)
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ from typing import TYPE_CHECKING, Any
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.mood.mood_manager import mood_manager
|
||||
from src.plugin_system.base.component_types import ChatMode
|
||||
from src.plugins.built_in.affinity_flow_chatter.plan_executor import ChatterPlanExecutor
|
||||
from src.plugins.built_in.affinity_flow_chatter.plan_filter import ChatterPlanFilter
|
||||
from src.plugins.built_in.affinity_flow_chatter.plan_generator import ChatterPlanGenerator
|
||||
from src.plugin_system.base.component_types import ChatMode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from src.chat.planner_actions.action_manager import ChatterActionManager
|
||||
|
||||
@@ -6,9 +6,7 @@ SearXNG search engine implementation
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
from typing import Any, List
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
@@ -39,13 +37,13 @@ class SearXNGSearchEngine(BaseSearchEngine):
|
||||
instances = config_api.get_global_config("web_search.searxng_instances", None)
|
||||
if isinstance(instances, list):
|
||||
# 过滤空值
|
||||
self.instances: List[str] = [u.rstrip("/") for u in instances if isinstance(u, str) and u.strip()]
|
||||
self.instances: list[str] = [u.rstrip("/") for u in instances if isinstance(u, str) and u.strip()]
|
||||
else:
|
||||
self.instances = []
|
||||
|
||||
api_keys = config_api.get_global_config("web_search.searxng_api_keys", None)
|
||||
if isinstance(api_keys, list):
|
||||
self.api_keys: List[str | None] = [k.strip() if isinstance(k, str) and k.strip() else None for k in api_keys]
|
||||
self.api_keys: list[str | None] = [k.strip() if isinstance(k, str) and k.strip() else None for k in api_keys]
|
||||
else:
|
||||
self.api_keys = []
|
||||
|
||||
@@ -85,7 +83,7 @@ class SearXNGSearchEngine(BaseSearchEngine):
|
||||
results.extend(instance_results)
|
||||
if len(results) >= num_results:
|
||||
break
|
||||
except Exception as e: # noqa: BLE001
|
||||
except Exception as e:
|
||||
logger.warning(f"SearXNG 实例 {base_url} 调用失败: {e}")
|
||||
continue
|
||||
|
||||
@@ -116,12 +114,12 @@ class SearXNGSearchEngine(BaseSearchEngine):
|
||||
try:
|
||||
resp = await self._client.get(url, params=params, headers=headers)
|
||||
resp.raise_for_status()
|
||||
except Exception as e: # noqa: BLE001
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"请求失败: {e}") from e
|
||||
|
||||
try:
|
||||
data = resp.json()
|
||||
except Exception as e: # noqa: BLE001
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"解析 JSON 失败: {e}") from e
|
||||
|
||||
raw_results = data.get("results", []) if isinstance(data, dict) else []
|
||||
@@ -141,5 +139,5 @@ class SearXNGSearchEngine(BaseSearchEngine):
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb): # noqa: D401
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
await self._client.aclose()
|
||||
|
||||
@@ -41,8 +41,8 @@ class WEBSEARCHPLUGIN(BasePlugin):
|
||||
from .engines.bing_engine import BingSearchEngine
|
||||
from .engines.ddg_engine import DDGSearchEngine
|
||||
from .engines.exa_engine import ExaSearchEngine
|
||||
from .engines.tavily_engine import TavilySearchEngine
|
||||
from .engines.searxng_engine import SearXNGSearchEngine
|
||||
from .engines.tavily_engine import TavilySearchEngine
|
||||
|
||||
# 实例化所有搜索引擎,这会触发API密钥管理器的初始化
|
||||
exa_engine = ExaSearchEngine()
|
||||
|
||||
@@ -13,8 +13,8 @@ from src.plugin_system.apis import config_api
|
||||
from ..engines.bing_engine import BingSearchEngine
|
||||
from ..engines.ddg_engine import DDGSearchEngine
|
||||
from ..engines.exa_engine import ExaSearchEngine
|
||||
from ..engines.tavily_engine import TavilySearchEngine
|
||||
from ..engines.searxng_engine import SearXNGSearchEngine
|
||||
from ..engines.tavily_engine import TavilySearchEngine
|
||||
from ..utils.formatters import deduplicate_results, format_search_results
|
||||
|
||||
logger = get_logger("web_search_tool")
|
||||
|
||||
Reference in New Issue
Block a user