依旧修pyright喵~
This commit is contained in:
@@ -39,11 +39,13 @@ def replace_user_references_sync(
|
||||
Returns:
|
||||
str: 处理后的内容字符串
|
||||
"""
|
||||
assert global_config is not None
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
if name_resolver is None:
|
||||
def default_resolver(platform: str, user_id: str) -> str:
|
||||
assert global_config is not None
|
||||
# 检查是否是机器人自己
|
||||
if replace_bot_name and (user_id == str(global_config.bot.qq_account)):
|
||||
return f"{global_config.bot.nickname}(你)"
|
||||
@@ -116,10 +118,12 @@ async def replace_user_references_async(
|
||||
Returns:
|
||||
str: 处理后的内容字符串
|
||||
"""
|
||||
assert global_config is not None
|
||||
if name_resolver is None:
|
||||
person_info_manager = get_person_info_manager()
|
||||
|
||||
async def default_resolver(platform: str, user_id: str) -> str:
|
||||
assert global_config is not None
|
||||
# 检查是否是机器人自己
|
||||
if replace_bot_name and (user_id == str(global_config.bot.qq_account)):
|
||||
return f"{global_config.bot.nickname}(你)"
|
||||
@@ -392,7 +396,7 @@ async def get_actions_by_timestamp_with_chat_inclusive(
|
||||
actions = list(result.scalars())
|
||||
return [action.__dict__ for action in reversed(actions)]
|
||||
else: # earliest
|
||||
result = await session.execute(
|
||||
query = await session.execute(
|
||||
select(ActionRecords)
|
||||
.where(
|
||||
and_(
|
||||
@@ -540,6 +544,7 @@ async def _build_readable_messages_internal(
|
||||
Returns:
|
||||
包含格式化消息的字符串、原始消息详情列表、图片映射字典和更新后的计数器的元组。
|
||||
"""
|
||||
assert global_config is not None
|
||||
if not messages:
|
||||
return "", [], pic_id_mapping or {}, pic_counter
|
||||
|
||||
@@ -694,6 +699,7 @@ async def _build_readable_messages_internal(
|
||||
percentile = i / n_messages # 计算消息在列表中的位置百分比 (0 <= percentile < 1)
|
||||
original_len = len(content)
|
||||
limit = -1 # 默认不截断
|
||||
replace_content = ""
|
||||
|
||||
if percentile < 0.2: # 60% 之前的消息 (即最旧的 60%)
|
||||
limit = 50
|
||||
@@ -973,6 +979,7 @@ async def build_readable_messages(
|
||||
truncate: 是否截断长消息
|
||||
show_actions: 是否显示动作记录
|
||||
"""
|
||||
assert global_config is not None
|
||||
# 创建messages的深拷贝,避免修改原始列表
|
||||
if not messages:
|
||||
return ""
|
||||
@@ -1112,6 +1119,7 @@ async def build_anonymous_messages(messages: list[dict[str, Any]]) -> str:
|
||||
构建匿名可读消息,将不同人的名称转为唯一占位符(A、B、C...),bot自己用SELF。
|
||||
处理 回复<aaa:bbb> 和 @<aaa:bbb> 字段,将bbb映射为匿名占位符。
|
||||
"""
|
||||
assert global_config is not None
|
||||
if not messages:
|
||||
print("111111111111没有消息,无法构建匿名消息")
|
||||
return ""
|
||||
@@ -1127,6 +1135,7 @@ async def build_anonymous_messages(messages: list[dict[str, Any]]) -> str:
|
||||
def get_anon_name(platform, user_id):
|
||||
# print(f"get_anon_name: platform:{platform}, user_id:{user_id}")
|
||||
# print(f"global_config.bot.qq_account:{global_config.bot.qq_account}")
|
||||
assert global_config is not None
|
||||
|
||||
if user_id == global_config.bot.qq_account:
|
||||
# print("SELF11111111111111")
|
||||
@@ -1204,6 +1213,7 @@ async def get_person_id_list(messages: list[dict[str, Any]]) -> list[str]:
|
||||
Returns:
|
||||
一个包含唯一 person_id 的列表。
|
||||
"""
|
||||
assert global_config is not None
|
||||
person_ids_set = set() # 使用集合来自动去重
|
||||
|
||||
for msg in messages:
|
||||
|
||||
@@ -649,6 +649,7 @@ class Prompt:
|
||||
|
||||
async def _build_expression_habits(self) -> dict[str, Any]:
|
||||
"""构建表达习惯(如表情、口癖)的上下文块."""
|
||||
assert global_config is not None
|
||||
# 检查当前聊天是否启用了表达习惯功能
|
||||
use_expression, _, _ = global_config.expression.get_expression_config_for_chat(
|
||||
self.parameters.chat_id
|
||||
@@ -728,6 +729,7 @@ class Prompt:
|
||||
|
||||
async def _build_tool_info(self) -> dict[str, Any]:
|
||||
"""构建工具调用结果的上下文块."""
|
||||
assert global_config is not None
|
||||
if not global_config.tool.enable_tool:
|
||||
return {"tool_info_block": ""}
|
||||
|
||||
@@ -779,6 +781,7 @@ class Prompt:
|
||||
|
||||
async def _build_knowledge_info(self) -> dict[str, Any]:
|
||||
"""构建从知识库检索到的相关信息的上下文块."""
|
||||
assert global_config is not None
|
||||
if not global_config.lpmm_knowledge.enable:
|
||||
return {"knowledge_prompt": ""}
|
||||
|
||||
@@ -873,6 +876,7 @@ class Prompt:
|
||||
|
||||
def _prepare_s4u_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""为S4U(Scene for You)模式准备最终用于格式化的参数字典."""
|
||||
assert global_config is not None
|
||||
return {
|
||||
**context_data,
|
||||
"expression_habits_block": context_data.get("expression_habits_block", ""),
|
||||
@@ -915,6 +919,7 @@ class Prompt:
|
||||
|
||||
def _prepare_normal_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""为Normal模式准备最终用于格式化的参数字典."""
|
||||
assert global_config is not None
|
||||
return {
|
||||
**context_data,
|
||||
"expression_habits_block": context_data.get("expression_habits_block", ""),
|
||||
@@ -959,6 +964,7 @@ class Prompt:
|
||||
|
||||
def _prepare_default_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""为默认模式(或其他未指定模式)准备最终用于格式化的参数字典."""
|
||||
assert global_config is not None
|
||||
return {
|
||||
"expression_habits_block": context_data.get("expression_habits_block", ""),
|
||||
"relation_info_block": context_data.get("relation_info_block", ""),
|
||||
@@ -1143,6 +1149,7 @@ class Prompt:
|
||||
Returns:
|
||||
str: 构建好的跨群聊上下文字符串。
|
||||
"""
|
||||
assert global_config is not None
|
||||
if not global_config.cross_context.enable:
|
||||
return ""
|
||||
|
||||
|
||||
@@ -338,6 +338,7 @@ class HTMLReportGenerator:
|
||||
|
||||
# 渲染模板
|
||||
# 读取CSS和JS文件内容
|
||||
assert isinstance(self.jinja_env.loader, FileSystemLoader)
|
||||
async with aiofiles.open(os.path.join(self.jinja_env.loader.searchpath[0], "report.css"), encoding="utf-8") as f:
|
||||
report_css = await f.read()
|
||||
async with aiofiles.open(os.path.join(self.jinja_env.loader.searchpath[0], "report.js"), encoding="utf-8") as f:
|
||||
|
||||
@@ -192,7 +192,7 @@ class StatisticOutputTask(AsyncTask):
|
||||
self._statistic_console_output(stats, now)
|
||||
# 使用新的 HTMLReportGenerator 生成报告
|
||||
chart_data = await self._collect_chart_data(stats)
|
||||
deploy_time = datetime.fromtimestamp(local_storage.get("deploy_time", now.timestamp()))
|
||||
deploy_time = datetime.fromtimestamp(float(local_storage.get("deploy_time", now.timestamp()))) # type: ignore
|
||||
report_generator = HTMLReportGenerator(
|
||||
name_mapping=self.name_mapping,
|
||||
stat_period=self.stat_period,
|
||||
@@ -219,7 +219,7 @@ class StatisticOutputTask(AsyncTask):
|
||||
|
||||
# 使用新的 HTMLReportGenerator 生成报告
|
||||
chart_data = await self._collect_chart_data(stats)
|
||||
deploy_time = datetime.fromtimestamp(local_storage.get("deploy_time", now.timestamp()))
|
||||
deploy_time = datetime.fromtimestamp(float(local_storage.get("deploy_time", now.timestamp()))) # type: ignore
|
||||
report_generator = HTMLReportGenerator(
|
||||
name_mapping=self.name_mapping,
|
||||
stat_period=self.stat_period,
|
||||
|
||||
@@ -49,6 +49,7 @@ def is_mentioned_bot_in_message(message) -> tuple[bool, float]:
|
||||
tuple[bool, float]: (是否提及, 提及类型)
|
||||
提及类型: 0=未提及, 1=弱提及(文本匹配), 2=强提及(@/回复/私聊)
|
||||
"""
|
||||
assert global_config is not None
|
||||
nicknames = global_config.bot.alias_names
|
||||
mention_type = 0 # 0=未提及, 1=弱提及, 2=强提及
|
||||
|
||||
@@ -132,6 +133,7 @@ def is_mentioned_bot_in_message(message) -> tuple[bool, float]:
|
||||
|
||||
async def get_embedding(text, request_type="embedding") -> list[float] | None:
|
||||
"""获取文本的embedding向量"""
|
||||
assert model_config is not None
|
||||
# 每次都创建新的LLMRequest实例以避免事件循环冲突
|
||||
llm = LLMRequest(model_set=model_config.model_task_config.embedding, request_type=request_type)
|
||||
try:
|
||||
@@ -139,11 +141,12 @@ async def get_embedding(text, request_type="embedding") -> list[float] | None:
|
||||
except Exception as e:
|
||||
logger.error(f"获取embedding失败: {e!s}")
|
||||
embedding = None
|
||||
return embedding
|
||||
return embedding # type: ignore
|
||||
|
||||
|
||||
async def get_recent_group_speaker(chat_stream_id: str, sender, limit: int = 12) -> list:
|
||||
# 获取当前群聊记录内发言的人
|
||||
assert global_config is not None
|
||||
filter_query = {"chat_id": chat_stream_id}
|
||||
sort_order = [("time", -1)]
|
||||
recent_messages = await find_messages(message_filter=filter_query, sort=sort_order, limit=limit)
|
||||
@@ -400,11 +403,12 @@ def recover_quoted_content(sentences: list[str], placeholder_map: dict[str, str]
|
||||
recovered_sentences.append(sentence)
|
||||
return recovered_sentences
|
||||
|
||||
|
||||
def process_llm_response(text: str, enable_splitter: bool = True, enable_chinese_typo: bool = True) -> list[str]:
|
||||
assert global_config is not None
|
||||
if not global_config.response_post_process.enable_response_post_process:
|
||||
return [text]
|
||||
|
||||
# --- 三层防护系统 ---
|
||||
# --- 三层防护系统 ---
|
||||
# 第一层:保护颜文字
|
||||
protected_text, kaomoji_mapping = protect_kaomoji(text) if global_config.response_splitter.enable_kaomoji_protection else (text, {})
|
||||
|
||||
@@ -64,8 +64,6 @@ class ImageManager:
|
||||
# except Exception as e:
|
||||
# logger.error(f"数据库连接失败: {e}")
|
||||
|
||||
self._initialized = True
|
||||
|
||||
def _ensure_image_dir(self):
|
||||
"""确保图像存储目录存在"""
|
||||
os.makedirs(self.IMAGE_DIR, exist_ok=True)
|
||||
@@ -159,6 +157,7 @@ class ImageManager:
|
||||
async def get_emoji_description(self, image_base64: str) -> str:
|
||||
"""获取表情包描述,统一使用EmojiManager中的逻辑进行处理和缓存"""
|
||||
try:
|
||||
assert global_config is not None
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
|
||||
emoji_manager = get_emoji_manager()
|
||||
@@ -190,7 +189,7 @@ class ImageManager:
|
||||
return "[表情包(描述生成失败)]"
|
||||
|
||||
# 4. (可选) 如果启用了“偷表情包”,则将图片和完整描述存入待注册区
|
||||
if global_config and global_config.emoji and global_config.emoji.steal_emoji:
|
||||
if global_config.emoji and global_config.emoji.steal_emoji:
|
||||
logger.debug(f"偷取表情包功能已开启,保存待注册表情包: {image_hash}")
|
||||
try:
|
||||
image_format = (Image.open(io.BytesIO(image_bytes)).format or "jpeg").lower()
|
||||
|
||||
@@ -44,6 +44,8 @@ class VideoAnalyzer:
|
||||
"""基于 inkfox 的视频关键帧 + LLM 描述分析器"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
assert global_config is not None
|
||||
assert model_config is not None
|
||||
cfg = getattr(global_config, "video_analysis", object())
|
||||
self.max_frames: int = getattr(cfg, "max_frames", 20)
|
||||
self.frame_quality: int = getattr(cfg, "frame_quality", 85)
|
||||
|
||||
@@ -135,6 +135,8 @@ class LegacyVideoAnalyzer:
|
||||
|
||||
def __init__(self):
|
||||
"""初始化视频分析器"""
|
||||
assert global_config is not None
|
||||
assert model_config is not None
|
||||
# 使用专用的视频分析配置
|
||||
try:
|
||||
self.video_llm = LLMRequest(
|
||||
|
||||
@@ -11,6 +11,8 @@ logger = get_logger("chat_voice")
|
||||
|
||||
async def get_voice_text(voice_base64: str) -> str:
|
||||
"""获取音频文件转录文本"""
|
||||
assert global_config is not None
|
||||
assert model_config is not None
|
||||
if not global_config.voice.enable_asr:
|
||||
logger.warning("语音识别未启用,无法处理语音消息")
|
||||
return "[语音]"
|
||||
|
||||
Reference in New Issue
Block a user