Add LLM anti-prompt injection system

Introduces a comprehensive anti-prompt injection system for LLMs, including rule-based and LLM-based detection, user ban/whitelist management, message shielding, and statistics tracking. Adds new modules under src/chat/antipromptinjector, integrates anti-injection checks into the message receive flow, updates configuration and database models, and provides test scripts. Also updates templates and logger aliases to support the new system.
This commit is contained in:
雅诺狐
2025-08-18 17:27:59 +08:00
parent aaaf8f5ef7
commit 689aface9d
22 changed files with 2498 additions and 26 deletions

View File

@@ -41,7 +41,8 @@ from src.config.official_configs import (
DependencyManagementConfig,
ExaConfig,
WebSearchConfig,
TavilyConfig
TavilyConfig,
AntiPromptInjectionConfig
)
from .api_ada_configs import (
@@ -357,6 +358,8 @@ class Config(ConfigBase):
custom_prompt: CustomPromptConfig
voice: VoiceConfig
schedule: ScheduleConfig
# 有默认值的字段放在后面
anti_prompt_injection: AntiPromptInjectionConfig = field(default_factory=lambda: AntiPromptInjectionConfig())
video_analysis: VideoAnalysisConfig = field(default_factory=lambda: VideoAnalysisConfig())
dependency_management: DependencyManagementConfig = field(default_factory=lambda: DependencyManagementConfig())
exa: ExaConfig = field(default_factory=lambda: ExaConfig())