依旧修pyright喵~

This commit is contained in:
ikun-11451
2025-11-29 21:26:42 +08:00
parent 28719c1c89
commit 72e7492953
25 changed files with 170 additions and 104 deletions

View File

@@ -83,7 +83,7 @@ class ChatterManager:
inactive_streams = []
for stream_id, instance in self.instances.items():
if hasattr(instance, "get_activity_time"):
activity_time = instance.get_activity_time()
activity_time = getattr(instance, "get_activity_time")()
if (current_time - activity_time) > max_inactive_seconds:
inactive_streams.append(stream_id)

View File

@@ -104,7 +104,7 @@ class ChatterActionManager:
log_prefix=log_prefix,
shutting_down=shutting_down,
plugin_config=plugin_config,
action_message=action_message,
action_message=action_message, # type: ignore
)
logger.debug(f"创建Action实例成功: {action_name}")
@@ -173,6 +173,7 @@ class ChatterActionManager:
Returns:
执行结果
"""
assert global_config is not None
chat_stream = None
try:

View File

@@ -30,6 +30,7 @@ class ActionModifier:
def __init__(self, action_manager: ChatterActionManager, chat_id: str):
"""初始化动作处理器"""
assert model_config is not None
self.chat_id = chat_id
# chat_stream 和 log_prefix 将在异步方法中初始化
self.chat_stream: "ChatStream | None" = None
@@ -67,6 +68,7 @@ class ActionModifier:
处理后ActionManager 将包含最终的可用动作集,供规划器直接使用
"""
assert global_config is not None
# 初始化log_prefix
await self._initialize_log_prefix()
# 根据 stream_id 加载当前可用的动作

View File

@@ -240,6 +240,8 @@ class DefaultReplyer:
chat_stream: "ChatStream",
request_type: str = "replyer",
):
assert global_config is not None
assert model_config is not None
self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type)
self.chat_stream = chat_stream
# 这些将在异步初始化中设置
@@ -267,6 +269,7 @@ class DefaultReplyer:
async def _build_auth_role_prompt(self) -> str:
"""根据主人配置生成额外提示词"""
assert global_config is not None
master_config = global_config.permission.master_prompt
if not master_config or not master_config.enable:
return ""
@@ -515,6 +518,7 @@ class DefaultReplyer:
Returns:
str: 表达习惯信息字符串
"""
assert global_config is not None
# 检查是否允许在此聊天流中使用表达
use_expression, _, _ = global_config.expression.get_expression_config_for_chat(self.chat_stream.stream_id)
if not use_expression:
@@ -583,6 +587,7 @@ class DefaultReplyer:
Returns:
str: 记忆信息字符串
"""
assert global_config is not None
# 检查是否启用三层记忆系统
if not (global_config.memory and global_config.memory.enable):
return ""
@@ -776,6 +781,7 @@ class DefaultReplyer:
Returns:
str: 关键词反应提示字符串,如果没有触发任何反应则为空字符串
"""
assert global_config is not None
if target is None:
return ""
@@ -834,6 +840,7 @@ class DefaultReplyer:
Returns:
str: 格式化的notice信息文本如果没有notice或未启用则返回空字符串
"""
assert global_config is not None
try:
logger.debug(f"开始构建notice块chat_id={chat_id}")
@@ -902,6 +909,7 @@ class DefaultReplyer:
Returns:
Tuple[str, str]: (已读历史消息prompt, 未读历史消息prompt)
"""
assert global_config is not None
try:
# 从message_manager获取真实的已读/未读消息
@@ -1002,6 +1010,7 @@ class DefaultReplyer:
"""
回退的已读/未读历史消息构建方法
"""
assert global_config is not None
# 通过is_read字段分离已读和未读消息
read_messages = []
unread_messages = []
@@ -1115,6 +1124,7 @@ class DefaultReplyer:
Returns:
str: 构建好的上下文
"""
assert global_config is not None
if available_actions is None:
available_actions = {}
chat_stream = self.chat_stream
@@ -1607,6 +1617,7 @@ class DefaultReplyer:
reply_to: str,
reply_message: dict[str, Any] | DatabaseMessages | None = None,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
assert global_config is not None
chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
is_group_chat = bool(chat_stream.group_info)
@@ -1767,6 +1778,7 @@ class DefaultReplyer:
return prompt_text
async def llm_generate_content(self, prompt: str):
assert global_config is not None
with Timer("LLM生成", {}): # 内部计时器,可选保留
# 直接使用已初始化的模型实例
logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}")
@@ -1792,6 +1804,8 @@ class DefaultReplyer:
return content, reasoning_content, model_name, tool_calls
async def get_prompt_info(self, message: str, sender: str, target: str):
assert global_config is not None
assert model_config is not None
related_info = ""
start_time = time.time()
from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool
@@ -1843,6 +1857,7 @@ class DefaultReplyer:
return ""
async def build_relation_info(self, sender: str, target: str):
assert global_config is not None
# 获取用户ID
if sender == f"{global_config.bot.nickname}(你)":
return "你将要回复的是你自己发送的消息。"
@@ -1927,6 +1942,7 @@ class DefaultReplyer:
reply_to: 回复对象
reply_message: 回复的原始消息
"""
assert global_config is not None
return # 已禁用,保留函数签名以防其他地方有引用
# 以下代码已废弃,不再执行

View File

@@ -173,9 +173,10 @@ class SecurityManager:
pre_check_results = await asyncio.gather(*pre_check_tasks, return_exceptions=True)
# 筛选需要完整检查的检测器
checkers_to_run = [
c for c, need_check in zip(enabled_checkers, pre_check_results) if need_check is True
]
checkers_to_run = []
for c, need_check in zip(enabled_checkers, pre_check_results):
if need_check is True:
checkers_to_run.append(c)
if not checkers_to_run:
return SecurityCheckResult(
@@ -192,20 +193,22 @@ class SecurityManager:
results = await asyncio.gather(*check_tasks, return_exceptions=True)
# 过滤异常结果
valid_results = []
valid_results: list[SecurityCheckResult] = []
for checker, result in zip(checkers_to_run, results):
if isinstance(result, Exception):
if isinstance(result, BaseException):
logger.error(f"检测器 '{checker.name}' 执行失败: {result}")
continue
result.checker_name = checker.name
valid_results.append(result)
if isinstance(result, SecurityCheckResult):
result.checker_name = checker.name
valid_results.append(result)
# 合并结果
return self._merge_results(valid_results, time.time() - start_time)
async def _check_all(self, message: str, context: dict, start_time: float) -> SecurityCheckResult:
"""检测所有模式(顺序执行所有检测器)"""
results = []
results: list[SecurityCheckResult] = []
for checker in self._checkers:
if not checker.enabled:

View File

@@ -39,11 +39,13 @@ def replace_user_references_sync(
Returns:
str: 处理后的内容字符串
"""
assert global_config is not None
if not content:
return ""
if name_resolver is None:
def default_resolver(platform: str, user_id: str) -> str:
assert global_config is not None
# 检查是否是机器人自己
if replace_bot_name and (user_id == str(global_config.bot.qq_account)):
return f"{global_config.bot.nickname}(你)"
@@ -116,10 +118,12 @@ async def replace_user_references_async(
Returns:
str: 处理后的内容字符串
"""
assert global_config is not None
if name_resolver is None:
person_info_manager = get_person_info_manager()
async def default_resolver(platform: str, user_id: str) -> str:
assert global_config is not None
# 检查是否是机器人自己
if replace_bot_name and (user_id == str(global_config.bot.qq_account)):
return f"{global_config.bot.nickname}(你)"
@@ -392,7 +396,7 @@ async def get_actions_by_timestamp_with_chat_inclusive(
actions = list(result.scalars())
return [action.__dict__ for action in reversed(actions)]
else: # earliest
result = await session.execute(
query = await session.execute(
select(ActionRecords)
.where(
and_(
@@ -540,6 +544,7 @@ async def _build_readable_messages_internal(
Returns:
包含格式化消息的字符串、原始消息详情列表、图片映射字典和更新后的计数器的元组。
"""
assert global_config is not None
if not messages:
return "", [], pic_id_mapping or {}, pic_counter
@@ -694,6 +699,7 @@ async def _build_readable_messages_internal(
percentile = i / n_messages # 计算消息在列表中的位置百分比 (0 <= percentile < 1)
original_len = len(content)
limit = -1 # 默认不截断
replace_content = ""
if percentile < 0.2: # 60% 之前的消息 (即最旧的 60%)
limit = 50
@@ -973,6 +979,7 @@ async def build_readable_messages(
truncate: 是否截断长消息
show_actions: 是否显示动作记录
"""
assert global_config is not None
# 创建messages的深拷贝避免修改原始列表
if not messages:
return ""
@@ -1112,6 +1119,7 @@ async def build_anonymous_messages(messages: list[dict[str, Any]]) -> str:
构建匿名可读消息将不同人的名称转为唯一占位符A、B、C...bot自己用SELF。
处理 回复<aaa:bbb> 和 @<aaa:bbb> 字段将bbb映射为匿名占位符。
"""
assert global_config is not None
if not messages:
print("111111111111没有消息无法构建匿名消息")
return ""
@@ -1127,6 +1135,7 @@ async def build_anonymous_messages(messages: list[dict[str, Any]]) -> str:
def get_anon_name(platform, user_id):
# print(f"get_anon_name: platform:{platform}, user_id:{user_id}")
# print(f"global_config.bot.qq_account:{global_config.bot.qq_account}")
assert global_config is not None
if user_id == global_config.bot.qq_account:
# print("SELF11111111111111")
@@ -1204,6 +1213,7 @@ async def get_person_id_list(messages: list[dict[str, Any]]) -> list[str]:
Returns:
一个包含唯一 person_id 的列表。
"""
assert global_config is not None
person_ids_set = set() # 使用集合来自动去重
for msg in messages:

View File

@@ -649,6 +649,7 @@ class Prompt:
async def _build_expression_habits(self) -> dict[str, Any]:
"""构建表达习惯(如表情、口癖)的上下文块."""
assert global_config is not None
# 检查当前聊天是否启用了表达习惯功能
use_expression, _, _ = global_config.expression.get_expression_config_for_chat(
self.parameters.chat_id
@@ -728,6 +729,7 @@ class Prompt:
async def _build_tool_info(self) -> dict[str, Any]:
"""构建工具调用结果的上下文块."""
assert global_config is not None
if not global_config.tool.enable_tool:
return {"tool_info_block": ""}
@@ -779,6 +781,7 @@ class Prompt:
async def _build_knowledge_info(self) -> dict[str, Any]:
"""构建从知识库检索到的相关信息的上下文块."""
assert global_config is not None
if not global_config.lpmm_knowledge.enable:
return {"knowledge_prompt": ""}
@@ -873,6 +876,7 @@ class Prompt:
def _prepare_s4u_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
"""为S4UScene for You模式准备最终用于格式化的参数字典."""
assert global_config is not None
return {
**context_data,
"expression_habits_block": context_data.get("expression_habits_block", ""),
@@ -915,6 +919,7 @@ class Prompt:
def _prepare_normal_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
"""为Normal模式准备最终用于格式化的参数字典."""
assert global_config is not None
return {
**context_data,
"expression_habits_block": context_data.get("expression_habits_block", ""),
@@ -959,6 +964,7 @@ class Prompt:
def _prepare_default_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
"""为默认模式(或其他未指定模式)准备最终用于格式化的参数字典."""
assert global_config is not None
return {
"expression_habits_block": context_data.get("expression_habits_block", ""),
"relation_info_block": context_data.get("relation_info_block", ""),
@@ -1143,6 +1149,7 @@ class Prompt:
Returns:
str: 构建好的跨群聊上下文字符串。
"""
assert global_config is not None
if not global_config.cross_context.enable:
return ""

View File

@@ -338,6 +338,7 @@ class HTMLReportGenerator:
# 渲染模板
# 读取CSS和JS文件内容
assert isinstance(self.jinja_env.loader, FileSystemLoader)
async with aiofiles.open(os.path.join(self.jinja_env.loader.searchpath[0], "report.css"), encoding="utf-8") as f:
report_css = await f.read()
async with aiofiles.open(os.path.join(self.jinja_env.loader.searchpath[0], "report.js"), encoding="utf-8") as f:

View File

@@ -192,7 +192,7 @@ class StatisticOutputTask(AsyncTask):
self._statistic_console_output(stats, now)
# 使用新的 HTMLReportGenerator 生成报告
chart_data = await self._collect_chart_data(stats)
deploy_time = datetime.fromtimestamp(local_storage.get("deploy_time", now.timestamp()))
deploy_time = datetime.fromtimestamp(float(local_storage.get("deploy_time", now.timestamp()))) # type: ignore
report_generator = HTMLReportGenerator(
name_mapping=self.name_mapping,
stat_period=self.stat_period,
@@ -219,7 +219,7 @@ class StatisticOutputTask(AsyncTask):
# 使用新的 HTMLReportGenerator 生成报告
chart_data = await self._collect_chart_data(stats)
deploy_time = datetime.fromtimestamp(local_storage.get("deploy_time", now.timestamp()))
deploy_time = datetime.fromtimestamp(float(local_storage.get("deploy_time", now.timestamp()))) # type: ignore
report_generator = HTMLReportGenerator(
name_mapping=self.name_mapping,
stat_period=self.stat_period,

View File

@@ -49,6 +49,7 @@ def is_mentioned_bot_in_message(message) -> tuple[bool, float]:
tuple[bool, float]: (是否提及, 提及类型)
提及类型: 0=未提及, 1=弱提及(文本匹配), 2=强提及(@/回复/私聊)
"""
assert global_config is not None
nicknames = global_config.bot.alias_names
mention_type = 0 # 0=未提及, 1=弱提及, 2=强提及
@@ -132,6 +133,7 @@ def is_mentioned_bot_in_message(message) -> tuple[bool, float]:
async def get_embedding(text, request_type="embedding") -> list[float] | None:
"""获取文本的embedding向量"""
assert model_config is not None
# 每次都创建新的LLMRequest实例以避免事件循环冲突
llm = LLMRequest(model_set=model_config.model_task_config.embedding, request_type=request_type)
try:
@@ -139,11 +141,12 @@ async def get_embedding(text, request_type="embedding") -> list[float] | None:
except Exception as e:
logger.error(f"获取embedding失败: {e!s}")
embedding = None
return embedding
return embedding # type: ignore
async def get_recent_group_speaker(chat_stream_id: str, sender, limit: int = 12) -> list:
# 获取当前群聊记录内发言的人
assert global_config is not None
filter_query = {"chat_id": chat_stream_id}
sort_order = [("time", -1)]
recent_messages = await find_messages(message_filter=filter_query, sort=sort_order, limit=limit)
@@ -400,11 +403,12 @@ def recover_quoted_content(sentences: list[str], placeholder_map: dict[str, str]
recovered_sentences.append(sentence)
return recovered_sentences
def process_llm_response(text: str, enable_splitter: bool = True, enable_chinese_typo: bool = True) -> list[str]:
assert global_config is not None
if not global_config.response_post_process.enable_response_post_process:
return [text]
# --- 三层防护系统 ---
# --- 三层防护系统 ---
# 第一层:保护颜文字
protected_text, kaomoji_mapping = protect_kaomoji(text) if global_config.response_splitter.enable_kaomoji_protection else (text, {})

View File

@@ -64,8 +64,6 @@ class ImageManager:
# except Exception as e:
# logger.error(f"数据库连接失败: {e}")
self._initialized = True
def _ensure_image_dir(self):
"""确保图像存储目录存在"""
os.makedirs(self.IMAGE_DIR, exist_ok=True)
@@ -159,6 +157,7 @@ class ImageManager:
async def get_emoji_description(self, image_base64: str) -> str:
"""获取表情包描述统一使用EmojiManager中的逻辑进行处理和缓存"""
try:
assert global_config is not None
from src.chat.emoji_system.emoji_manager import get_emoji_manager
emoji_manager = get_emoji_manager()
@@ -190,7 +189,7 @@ class ImageManager:
return "[表情包(描述生成失败)]"
# 4. (可选) 如果启用了“偷表情包”,则将图片和完整描述存入待注册区
if global_config and global_config.emoji and global_config.emoji.steal_emoji:
if global_config.emoji and global_config.emoji.steal_emoji:
logger.debug(f"偷取表情包功能已开启,保存待注册表情包: {image_hash}")
try:
image_format = (Image.open(io.BytesIO(image_bytes)).format or "jpeg").lower()

View File

@@ -44,6 +44,8 @@ class VideoAnalyzer:
"""基于 inkfox 的视频关键帧 + LLM 描述分析器"""
def __init__(self) -> None:
assert global_config is not None
assert model_config is not None
cfg = getattr(global_config, "video_analysis", object())
self.max_frames: int = getattr(cfg, "max_frames", 20)
self.frame_quality: int = getattr(cfg, "frame_quality", 85)

View File

@@ -135,6 +135,8 @@ class LegacyVideoAnalyzer:
def __init__(self):
"""初始化视频分析器"""
assert global_config is not None
assert model_config is not None
# 使用专用的视频分析配置
try:
self.video_llm = LLMRequest(

View File

@@ -11,6 +11,8 @@ logger = get_logger("chat_voice")
async def get_voice_text(voice_base64: str) -> str:
"""获取音频文件转录文本"""
assert global_config is not None
assert model_config is not None
if not global_config.voice.enable_asr:
logger.warning("语音识别未启用,无法处理语音消息")
return "[语音]"