diff --git a/bot.py b/bot.py
index 4cdb54ede..aeec5d0d6 100644
--- a/bot.py
+++ b/bot.py
@@ -296,8 +296,10 @@ class DatabaseManager:
# 使用线程执行器运行潜在的阻塞操作
await initialize_sql_database()
elapsed_time = time.time() - start_time
+
+ db_type = global_config.database.database_type if global_config else "unknown"
logger.info(
- f"数据库连接初始化成功,使用 {global_config.database.database_type} 数据库,耗时: {elapsed_time:.2f}秒"
+ f"数据库连接初始化成功,使用 {db_type} 数据库,耗时: {elapsed_time:.2f}秒"
)
return self
@@ -321,6 +323,10 @@ class ConfigurationValidator:
try:
from src.config.config import global_config
+ if global_config is None:
+ logger.error("全局配置未初始化")
+ return False
+
# 检查必要的配置节
required_sections = ["database", "bot"]
for section in required_sections:
diff --git a/scripts/clean_permission_nodes.py b/scripts/clean_permission_nodes.py
index ece37c8d5..38dfa94e0 100644
--- a/scripts/clean_permission_nodes.py
+++ b/scripts/clean_permission_nodes.py
@@ -29,7 +29,7 @@ async def clean_permission_nodes():
result = await session.execute(stmt)
await session.commit()
- deleted_count = result.rowcount if hasattr(result, "rowcount") else 0
+ deleted_count = getattr(result, "rowcount", 0)
logger.info(f"✅ 已清理 {deleted_count} 个权限节点记录")
print(f"✅ 已清理 {deleted_count} 个权限节点记录")
print("请重启应用以重新注册权限节点")
diff --git a/scripts/log_viewer.py b/scripts/log_viewer.py
new file mode 100644
index 000000000..88fff24ac
--- /dev/null
+++ b/scripts/log_viewer.py
@@ -0,0 +1,1107 @@
+#!/usr/bin/env python3
+"""
+MoFox-Core 日志查看器
+一个基于 HTTP 的日志查看服务,支持实时查看、搜索和筛选日志。
+
+用法:
+ python scripts/log_viewer.py
+ python -m scripts.log_viewer [--port PORT] [--host HOST]
+"""
+
+import argparse
+import gzip
+import json
+import re
+import sys
+import tarfile
+import threading
+import webbrowser
+from collections import defaultdict
+from dataclasses import dataclass
+from datetime import datetime
+from http.server import HTTPServer, SimpleHTTPRequestHandler
+from pathlib import Path
+from typing import Any
+from urllib.parse import parse_qs, urlparse
+
+# 添加项目根目录到路径(支持直接运行和模块运行)
+PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent
+if str(PROJECT_ROOT) not in sys.path:
+ sys.path.insert(0, str(PROJECT_ROOT))
+
+# 切换工作目录到项目根目录
+import os
+os.chdir(PROJECT_ROOT)
+
+# 日志目录
+LOG_DIR = PROJECT_ROOT / "logs"
+
+# 从 logger.py 导入颜色和别名配置
+DEFAULT_MODULE_COLORS = {}
+DEFAULT_MODULE_ALIASES = {}
+try:
+ from src.common.logger import (
+ DEFAULT_MODULE_ALIASES,
+ DEFAULT_MODULE_COLORS,
+ )
+except ImportError:
+ pass # 使用空字典
+
+
+@dataclass
+class LogEntry:
+ """日志条目"""
+
+ timestamp: str
+ level: str
+ logger_name: str
+ event: str
+ color: str | None = None
+ alias: str | None = None
+ extra: dict | None = None
+ line_number: int = 0
+ file_name: str = ""
+
+
+class LogReader:
+ """日志文件读取器"""
+
+ def __init__(self, log_dir: Path):
+ self.log_dir = log_dir
+ self._cache: dict[str, list[LogEntry]] = {}
+ self._cache_mtime: dict[str, float] = {}
+ self._filter_cache: dict[str, tuple[list[LogEntry], str]] = {} # 筛选结果缓存
+ self._lock = threading.Lock()
+
+ def get_log_files(self) -> list[dict[str, Any]]:
+ """获取所有日志文件列表"""
+ files = []
+ if not self.log_dir.exists():
+ return files
+
+ for f in sorted(self.log_dir.glob("app_*.log.jsonl*"), reverse=True):
+ try:
+ stat = f.stat()
+ is_compressed = f.suffix == ".gz" or ".tar.gz" in f.name
+ files.append(
+ {
+ "name": f.name,
+ "size": stat.st_size,
+ "size_human": self._human_size(stat.st_size),
+ "mtime": stat.st_mtime,
+ "mtime_human": datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S"),
+ "compressed": is_compressed,
+ }
+ )
+ except OSError:
+ continue
+ return files
+
+ def _human_size(self, size: int) -> str:
+ """转换为人类可读的文件大小"""
+ for unit in ["B", "KB", "MB", "GB"]:
+ if size < 1024:
+ return f"{size:.1f} {unit}"
+ size /= 1024
+ return f"{size:.1f} TB"
+
+ def read_log_file(self, filename: str, use_cache: bool = True) -> list[LogEntry]:
+ """读取日志文件内容"""
+ filepath = self.log_dir / filename
+ if not filepath.exists():
+ return []
+
+ # 检查缓存
+ with self._lock:
+ if use_cache and filename in self._cache:
+ try:
+ current_mtime = filepath.stat().st_mtime
+ if self._cache_mtime.get(filename) == current_mtime:
+ return self._cache[filename]
+ except OSError:
+ pass
+
+ entries = []
+ try:
+ # 处理压缩文件
+ if ".tar.gz" in filename:
+ entries = self._read_tar_gz(filepath)
+ elif filename.endswith(".gz"):
+ entries = self._read_gzip(filepath)
+ else:
+ entries = self._read_plain(filepath)
+
+ # 更新缓存
+ with self._lock:
+ self._cache[filename] = entries
+ try:
+ self._cache_mtime[filename] = filepath.stat().st_mtime
+ except OSError:
+ pass
+
+ except Exception as e:
+ print(f"读取日志文件 {filename} 时出错: {e}")
+
+ return entries
+
+ def _read_plain(self, filepath: Path) -> list[LogEntry]:
+ """读取普通日志文件"""
+ entries = []
+ with open(filepath, encoding="utf-8", errors="replace") as f:
+ for line_num, line in enumerate(f, 1):
+ entry = self._parse_line(line, line_num, filepath.name)
+ if entry:
+ entries.append(entry)
+ return entries
+
+ def _read_gzip(self, filepath: Path) -> list[LogEntry]:
+ """读取 gzip 压缩的日志文件"""
+ entries = []
+ with gzip.open(filepath, "rt", encoding="utf-8", errors="replace") as f:
+ for line_num, line in enumerate(f, 1):
+ entry = self._parse_line(line, line_num, filepath.name)
+ if entry:
+ entries.append(entry)
+ return entries
+
+ def _read_tar_gz(self, filepath: Path) -> list[LogEntry]:
+ """读取 tar.gz 压缩的日志文件"""
+ entries = []
+ try:
+ with tarfile.open(filepath, "r:gz") as tar:
+ for member in tar.getmembers():
+ if member.isfile():
+ f = tar.extractfile(member)
+ if f:
+ content = f.read().decode("utf-8", errors="replace")
+ for line_num, line in enumerate(content.splitlines(), 1):
+ entry = self._parse_line(line, line_num, filepath.name)
+ if entry:
+ entries.append(entry)
+ except Exception as e:
+ print(f"读取 tar.gz 文件 {filepath} 时出错: {e}")
+ return entries
+
+ def _parse_line(self, line: str, line_num: int, filename: str) -> LogEntry | None:
+ """解析单行日志"""
+ line = line.strip()
+ if not line:
+ return None
+
+ try:
+ data = json.loads(line)
+ logger_name = data.get("logger_name", "unknown")
+
+ # 获取颜色和别名(优先使用日志中的,否则使用默认配置)
+ color = data.get("color") or DEFAULT_MODULE_COLORS.get(logger_name)
+ alias = data.get("alias") or DEFAULT_MODULE_ALIASES.get(logger_name)
+
+ # 提取额外字段
+ extra = {k: v for k, v in data.items() if k not in ("timestamp", "level", "logger_name", "event", "color", "alias")}
+
+ return LogEntry(
+ timestamp=data.get("timestamp", ""),
+ level=data.get("level", "info"),
+ logger_name=logger_name,
+ event=data.get("event", ""),
+ color=color,
+ alias=alias,
+ extra=extra if extra else None,
+ line_number=line_num,
+ file_name=filename,
+ )
+ except json.JSONDecodeError:
+ # 非 JSON 格式的行,尝试作为纯文本处理
+ return LogEntry(
+ timestamp="",
+ level="info",
+ logger_name="raw",
+ event=line,
+ line_number=line_num,
+ file_name=filename,
+ )
+
+ def search_logs(
+ self,
+ filename: str,
+ query: str = "",
+ level: str = "",
+ logger: str = "",
+ start_time: str = "",
+ end_time: str = "",
+ limit: int = 1000,
+ offset: int = 0,
+ regex: bool = False,
+ ) -> tuple[list[LogEntry], int]:
+ """搜索和筛选日志"""
+ entries = self.read_log_file(filename)
+
+ # 如果没有筛选条件,直接返回分页结果
+ if not query and not level and not logger and not start_time and not end_time:
+ total = len(entries)
+ return entries[offset : offset + limit], total
+
+ # 生成筛选条件的缓存 key
+ cache_key = f"{filename}:{query}:{level}:{logger}:{start_time}:{end_time}:{regex}"
+
+ # 检查筛选缓存
+ with self._lock:
+ cached = self._filter_cache.get(filename)
+ if cached and cached[1] == cache_key:
+ filtered = cached[0]
+ return filtered[offset : offset + limit], len(filtered)
+
+ # 编译正则表达式(如果需要)
+ query_pattern = None
+ query_lower = ""
+ if query:
+ if regex:
+ try:
+ query_pattern = re.compile(query, re.IGNORECASE)
+ except re.error:
+ query_pattern = None
+ else:
+ query_lower = query.lower()
+
+ filtered = []
+ for entry in entries:
+ # 日志级别筛选
+ if level and entry.level.lower() != level.lower():
+ continue
+
+ # Logger 名称筛选
+ if logger and entry.logger_name.lower() != logger.lower():
+ continue
+
+ # 时间范围筛选
+ if start_time and entry.timestamp < start_time:
+ continue
+ if end_time and entry.timestamp > end_time:
+ continue
+
+ # 关键词搜索
+ if query:
+ if query_pattern:
+ if not (query_pattern.search(entry.event) or query_pattern.search(entry.logger_name) or (entry.alias and query_pattern.search(entry.alias))):
+ continue
+ else:
+ search_text = f"{entry.event} {entry.logger_name} {entry.alias or ''}".lower()
+ if query_lower not in search_text:
+ continue
+
+ filtered.append(entry)
+
+ # 更新筛选缓存
+ with self._lock:
+ self._filter_cache[filename] = (filtered, cache_key)
+
+ total = len(filtered)
+ return filtered[offset : offset + limit], total
+
+ def get_loggers(self, filename: str) -> list[dict[str, str]]:
+ """获取日志文件中的所有 logger"""
+ entries = self.read_log_file(filename)
+ loggers = {}
+ for entry in entries:
+ if entry.logger_name not in loggers:
+ loggers[entry.logger_name] = {
+ "name": entry.logger_name,
+ "alias": entry.alias or DEFAULT_MODULE_ALIASES.get(entry.logger_name, ""),
+ "color": entry.color or DEFAULT_MODULE_COLORS.get(entry.logger_name, ""),
+ }
+ return sorted(loggers.values(), key=lambda x: x["name"])
+
+ def get_stats(self, filename: str) -> dict[str, Any]:
+ """获取日志统计信息"""
+ entries = self.read_log_file(filename)
+
+ level_counts = defaultdict(int)
+ logger_counts = defaultdict(int)
+
+ for entry in entries:
+ level_counts[entry.level] += 1
+ logger_counts[entry.logger_name] += 1
+
+ return {
+ "total": len(entries),
+ "by_level": dict(level_counts),
+ "by_logger": dict(sorted(logger_counts.items(), key=lambda x: -x[1])[:20]),
+ }
+
+
+# HTML 模板
+HTML_TEMPLATE = """
+
+
+
+
+ 日志查看器
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+
+class LogViewerHandler(SimpleHTTPRequestHandler):
+ """HTTP 请求处理器"""
+
+ log_reader: LogReader = None # type: ignore
+
+ def log_message(self, format, *args):
+ """自定义日志格式"""
+ print(f"[{datetime.now().strftime('%H:%M:%S')}] {args[0]}")
+
+ def do_GET(self):
+ """处理 GET 请求"""
+ parsed = urlparse(self.path)
+ path = parsed.path
+ query = parse_qs(parsed.query)
+
+ # API 路由
+ if path == "/":
+ self.send_html(HTML_TEMPLATE)
+ elif path == "/api/files":
+ self.send_json(self.log_reader.get_log_files())
+ elif path == "/api/logs":
+ self.handle_logs_api(query)
+ elif path == "/api/loggers":
+ filename = query.get("file", [""])[0]
+ self.send_json(self.log_reader.get_loggers(filename))
+ elif path == "/api/stats":
+ filename = query.get("file", [""])[0]
+ self.send_json(self.log_reader.get_stats(filename))
+ else:
+ self.send_error(404, "Not Found")
+
+ def handle_logs_api(self, query: dict):
+ """处理日志搜索 API"""
+ filename = query.get("file", [""])[0]
+ search_query = query.get("query", [""])[0]
+ level = query.get("level", [""])[0]
+ logger = query.get("logger", [""])[0]
+ regex = query.get("regex", ["false"])[0].lower() == "true"
+ limit = int(query.get("limit", ["100"])[0])
+ offset = int(query.get("offset", ["0"])[0])
+
+ logs, total = self.log_reader.search_logs(
+ filename=filename,
+ query=search_query,
+ level=level,
+ logger=logger,
+ limit=limit,
+ offset=offset,
+ regex=regex,
+ )
+
+ # 转换为可序列化的格式
+ logs_data = [
+ {
+ "timestamp": log.timestamp,
+ "level": log.level,
+ "logger_name": log.logger_name,
+ "event": log.event,
+ "color": log.color,
+ "alias": log.alias,
+ "extra": log.extra,
+ "line_number": log.line_number,
+ }
+ for log in logs
+ ]
+
+ self.send_json({"logs": logs_data, "total": total})
+
+ def send_html(self, content: str):
+ """发送 HTML 响应"""
+ encoded = content.encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "text/html; charset=utf-8")
+ self.send_header("Content-Length", str(len(encoded)))
+ self.end_headers()
+ self.wfile.write(encoded)
+
+ def send_json(self, data: Any):
+ """发送 JSON 响应"""
+ content = json.dumps(data, ensure_ascii=False, default=str)
+ encoded = content.encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "application/json; charset=utf-8")
+ self.send_header("Content-Length", str(len(encoded)))
+ self.end_headers()
+ self.wfile.write(encoded)
+
+
+def run_server(host: str = "127.0.0.1", port: int = 8765, open_browser: bool = True):
+ """启动 HTTP 服务器"""
+ # 初始化日志读取器
+ LogViewerHandler.log_reader = LogReader(LOG_DIR)
+
+ server = HTTPServer((host, port), LogViewerHandler)
+ url = f"http://{host}:{port}"
+
+ print(f"\n 📋 日志查看器已启动: {url}\n")
+
+ # 自动打开浏览器
+ if open_browser:
+ webbrowser.open(url)
+
+ try:
+ server.serve_forever()
+ except KeyboardInterrupt:
+ print("\n 服务器已停止")
+ server.shutdown()
+
+
+def main():
+ parser = argparse.ArgumentParser(description="日志查看器")
+ parser.add_argument("--host", default="127.0.0.1", help="服务器地址 (默认: 127.0.0.1)")
+ parser.add_argument("--port", type=int, default=8765, help="服务器端口 (默认: 8765)")
+ parser.add_argument("--no-browser", action="store_true", help="不自动打开浏览器")
+ args = parser.parse_args()
+
+ run_server(args.host, args.port, open_browser=not args.no_browser)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/__init__.py b/src/__init__.py
index 907af17a3..0ac0c9576 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -8,7 +8,7 @@ from src.common.logger import get_logger
egg = get_logger("小彩蛋")
-def weighted_choice(data: Sequence[str], weights: list[float] | None = None) -> str:
+def weighted_choice(data: Sequence[str], weights: Sequence[float] | None = None) -> str:
"""
从 data 中按权重随机返回一条。
若 weights 为 None,则所有元素权重默认为 1。
diff --git a/src/api/memory_visualizer_router.py b/src/api/memory_visualizer_router.py
index 9a74d9596..2a197651e 100644
--- a/src/api/memory_visualizer_router.py
+++ b/src/api/memory_visualizer_router.py
@@ -63,7 +63,7 @@ def find_available_data_files() -> list[Path]:
return sorted(files, key=lambda f: f.stat().st_mtime, reverse=True)
-def load_graph_data_from_file(file_path: Path | None = None) -> dict[str, Any]:
+async def load_graph_data_from_file(file_path: Path | None = None) -> dict[str, Any]:
"""从磁盘加载图数据"""
global graph_data_cache, current_data_file
diff --git a/src/api/message_router.py b/src/api/message_router.py
index 5e707cc95..b01103864 100644
--- a/src/api/message_router.py
+++ b/src/api/message_router.py
@@ -31,6 +31,8 @@ async def get_message_stats(
sent_count = 0
received_count = 0
+ if global_config is None:
+ raise HTTPException(status_code=500, detail="Global config is not initialized")
bot_qq = str(global_config.bot.qq_account)
for msg in messages:
@@ -73,6 +75,8 @@ async def get_message_stats_by_chat(
start_time = end_time - (days * 24 * 3600)
# 从数据库获取指定时间范围内的所有消息
messages = await message_api.get_messages_by_time(start_time, end_time)
+ if global_config is None:
+ raise HTTPException(status_code=500, detail="Global config is not initialized")
bot_qq = str(global_config.bot.qq_account)
# --- 2. 消息筛选 ---
diff --git a/src/chat/chatter_manager.py b/src/chat/chatter_manager.py
index 8cdf3fe43..18d3b8f09 100644
--- a/src/chat/chatter_manager.py
+++ b/src/chat/chatter_manager.py
@@ -111,7 +111,7 @@ class ChatterManager:
inactive_streams = []
for stream_id, instance in self.instances.items():
if hasattr(instance, "get_activity_time"):
- activity_time = instance.get_activity_time()
+ activity_time = getattr(instance, "get_activity_time")()
if (current_time - activity_time) > max_inactive_seconds:
inactive_streams.append(stream_id)
diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py
index 6a9437757..125907a6d 100644
--- a/src/chat/emoji_system/emoji_manager.py
+++ b/src/chat/emoji_system/emoji_manager.py
@@ -9,7 +9,7 @@ import random
import re
import time
import traceback
-from typing import Any, Optional
+from typing import Any, Optional, cast
from PIL import Image
from rich.traceback import install
@@ -401,6 +401,11 @@ class EmojiManager:
self._scan_task = None
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
self.vlm = LLMRequest(model_set=model_config.model_task_config.emoji_vlm, request_type="emoji")
self.llm_emotion_judge = LLMRequest(
model_set=model_config.model_task_config.utils, request_type="emoji"
@@ -480,6 +485,8 @@ class EmojiManager:
return None
# 2. 根据全局配置决定候选表情包的数量
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
max_candidates = global_config.emoji.max_context_emojis
# 如果配置为0或者大于等于总数,则选择所有表情包
@@ -622,6 +629,8 @@ class EmojiManager:
async def start_periodic_check_register(self) -> None:
"""定期检查表情包完整性和数量"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
await self.get_all_emoji_from_db()
while True:
# logger.info("[扫描] 开始检查表情包完整性...")
@@ -771,8 +780,9 @@ class EmojiManager:
try:
emoji_record = await self.get_emoji_from_db(emoji_hash)
if emoji_record and emoji_record[0].emotion:
- logger.info(f"[缓存命中] 从数据库获取表情包描述: {emoji_record.emotion[:50]}...") # type: ignore # type: ignore
- return emoji_record.emotion # type: ignore
+ emotion_str = ",".join(emoji_record[0].emotion)
+ logger.info(f"[缓存命中] 从数据库获取表情包描述: {emotion_str[:50]}...")
+ return emotion_str
except Exception as e:
logger.error(f"从数据库查询表情包描述时出错: {e}")
@@ -803,7 +813,7 @@ class EmojiManager:
try:
from src.common.database.api.query import QueryBuilder
- emoji_record = await QueryBuilder(Emoji).filter(emoji_hash=emoji_hash).first()
+ emoji_record = cast(Emoji | None, await QueryBuilder(Emoji).filter(emoji_hash=emoji_hash).first())
if emoji_record and emoji_record.description:
logger.info(f"[缓存命中] 从数据库获取表情包描述: {emoji_record.description[:50]}...")
return emoji_record.description
@@ -880,6 +890,9 @@ class EmojiManager:
# 将表情包信息转换为可读的字符串
emoji_info_list = _emoji_objects_to_readable_list(selected_emojis)
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 构建提示词
prompt = (
f"{global_config.bot.nickname}的表情包存储已满({self.emoji_num}/{self.emoji_num_max}),"
@@ -954,6 +967,8 @@ class EmojiManager:
Tuple[str, List[str]]: 返回一个元组,第一个元素是详细描述,第二个元素是情感关键词列表。
如果处理失败,则返回空的描述和列表。
"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
try:
# 1. 解码图片,计算哈希值,并获取格式
if isinstance(image_base64, str):
@@ -967,7 +982,7 @@ class EmojiManager:
try:
from src.common.database.api.query import QueryBuilder
- existing_image = await QueryBuilder(Images).filter(emoji_hash=image_hash, type="emoji").first()
+ existing_image = cast(Images | None, await QueryBuilder(Images).filter(emoji_hash=image_hash, type="emoji").first())
if existing_image and existing_image.description:
existing_description = existing_image.description
logger.info(f"[复用描述] 找到已有详细描述: {existing_description[:50]}...")
diff --git a/src/chat/energy_system/energy_manager.py b/src/chat/energy_system/energy_manager.py
index 21e7f6e31..8f2fbe268 100644
--- a/src/chat/energy_system/energy_manager.py
+++ b/src/chat/energy_system/energy_manager.py
@@ -7,7 +7,7 @@ import time
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
-from typing import Any, TypedDict
+from typing import Any, Awaitable, TypedDict, cast
from src.common.database.api.crud import CRUDBase
from src.common.logger import get_logger
@@ -70,7 +70,7 @@ class EnergyCalculator(ABC):
"""能量计算器抽象基类"""
@abstractmethod
- def calculate(self, context: dict[str, Any]) -> float:
+ def calculate(self, context: "EnergyContext") -> float | Awaitable[float]:
"""计算能量值"""
pass
@@ -83,7 +83,7 @@ class EnergyCalculator(ABC):
class InterestEnergyCalculator(EnergyCalculator):
"""兴趣度能量计算器"""
- def calculate(self, context: dict[str, Any]) -> float:
+ def calculate(self, context: "EnergyContext") -> float:
"""基于消息兴趣度计算能量"""
messages = context.get("messages", [])
if not messages:
@@ -117,7 +117,7 @@ class ActivityEnergyCalculator(EnergyCalculator):
def __init__(self):
self.action_weights = {"reply": 0.4, "react": 0.3, "mention": 0.2, "other": 0.1}
- def calculate(self, context: dict[str, Any]) -> float:
+ def calculate(self, context: "EnergyContext") -> float:
"""基于活跃度计算能量"""
messages = context.get("messages", [])
if not messages:
@@ -147,7 +147,7 @@ class ActivityEnergyCalculator(EnergyCalculator):
class RecencyEnergyCalculator(EnergyCalculator):
"""最近性能量计算器"""
- def calculate(self, context: dict[str, Any]) -> float:
+ def calculate(self, context: "EnergyContext") -> float:
"""基于最近性计算能量"""
messages = context.get("messages", [])
if not messages:
@@ -194,7 +194,7 @@ class RecencyEnergyCalculator(EnergyCalculator):
class RelationshipEnergyCalculator(EnergyCalculator):
"""关系能量计算器 - 基于聊天流兴趣度"""
- async def calculate(self, context: dict[str, Any]) -> float:
+ async def calculate(self, context: "EnergyContext") -> float:
"""基于聊天流兴趣度计算能量"""
stream_id = context.get("stream_id")
if not stream_id:
@@ -260,6 +260,8 @@ class EnergyManager:
def _load_thresholds_from_config(self) -> None:
"""从配置加载AFC阈值"""
try:
+ if global_config is None:
+ return
if hasattr(global_config, "affinity_flow") and global_config.affinity_flow is not None:
self.thresholds["high_match"] = getattr(
global_config.affinity_flow, "high_match_interest_threshold", 0.8
@@ -283,17 +285,17 @@ class EnergyManager:
start_time = time.time()
# 更新统计
- self.stats["total_calculations"] += 1
+ self.stats["total_calculations"] = cast(int, self.stats["total_calculations"]) + 1
# 检查缓存
if stream_id in self.energy_cache:
cached_energy, cached_time = self.energy_cache[stream_id]
if time.time() - cached_time < self.cache_ttl:
- self.stats["cache_hits"] += 1
+ self.stats["cache_hits"] = cast(int, self.stats["cache_hits"]) + 1
logger.debug(f"使用缓存能量: {stream_id} = {cached_energy:.3f}")
return cached_energy
else:
- self.stats["cache_misses"] += 1
+ self.stats["cache_misses"] = cast(int, self.stats["cache_misses"]) + 1
# 构建计算上下文
context: EnergyContext = {
@@ -358,9 +360,10 @@ class EnergyManager:
# 更新平均计算时间
calculation_time = time.time() - start_time
- total_calculations = self.stats["total_calculations"]
+ total_calculations = cast(int, self.stats["total_calculations"])
+ current_avg = cast(float, self.stats["average_calculation_time"])
self.stats["average_calculation_time"] = (
- self.stats["average_calculation_time"] * (total_calculations - 1) + calculation_time
+ current_avg * (total_calculations - 1) + calculation_time
) / total_calculations
logger.debug(
@@ -424,8 +427,11 @@ class EnergyManager:
final_interval = base_interval * jitter
# 确保在配置范围内
- min_interval = getattr(global_config.chat, "dynamic_distribution_min_interval", 1.0)
- max_interval = getattr(global_config.chat, "dynamic_distribution_max_interval", 60.0)
+ min_interval = 1.0
+ max_interval = 60.0
+ if global_config is not None and hasattr(global_config, "chat"):
+ min_interval = getattr(global_config.chat, "dynamic_distribution_min_interval", 1.0)
+ max_interval = getattr(global_config.chat, "dynamic_distribution_max_interval", 60.0)
return max(min_interval, min(max_interval, final_interval))
@@ -487,10 +493,12 @@ class EnergyManager:
def get_cache_hit_rate(self) -> float:
"""获取缓存命中率"""
- total_requests = self.stats.get("cache_hits", 0) + self.stats.get("cache_misses", 0)
+ hits = cast(int, self.stats.get("cache_hits", 0))
+ misses = cast(int, self.stats.get("cache_misses", 0))
+ total_requests = hits + misses
if total_requests == 0:
return 0.0
- return self.stats["cache_hits"] / total_requests
+ return hits / total_requests
# 全局能量管理器实例
diff --git a/src/chat/express/expression_learner.py b/src/chat/express/expression_learner.py
index b0da82652..8ddf296d9 100644
--- a/src/chat/express/expression_learner.py
+++ b/src/chat/express/expression_learner.py
@@ -110,6 +110,8 @@ def init_prompt() -> None:
class ExpressionLearner:
def __init__(self, chat_id: str) -> None:
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
self.express_learn_model: LLMRequest = LLMRequest(
model_set=model_config.model_task_config.replyer, request_type="expressor.learner"
)
@@ -143,7 +145,10 @@ class ExpressionLearner:
"""
# 从配置读取过期天数
if expiration_days is None:
- expiration_days = global_config.expression.expiration_days
+ if global_config is None:
+ expiration_days = 30 # Default value if config is missing
+ else:
+ expiration_days = global_config.expression.expiration_days
current_time = time.time()
expiration_threshold = current_time - (expiration_days * 24 * 3600)
@@ -192,6 +197,8 @@ class ExpressionLearner:
bool: 是否允许学习
"""
try:
+ if global_config is None:
+ return False
use_expression, enable_learning, _ = global_config.expression.get_expression_config_for_chat(self.chat_id)
return enable_learning
except Exception as e:
@@ -212,6 +219,8 @@ class ExpressionLearner:
# 获取该聊天流的学习强度
try:
+ if global_config is None:
+ return False
use_expression, enable_learning, learning_intensity = (
global_config.expression.get_expression_config_for_chat(self.chat_id)
)
@@ -424,8 +433,10 @@ class ExpressionLearner:
group_name = f"聊天流 {chat_id}"
elif chat_stream.group_info:
group_name = chat_stream.group_info.group_name
- else:
+ elif chat_stream.user_info and chat_stream.user_info.user_nickname:
group_name = f"{chat_stream.user_info.user_nickname}的私聊"
+ else:
+ group_name = f"聊天流 {chat_id}"
learnt_expressions_str = ""
for _chat_id, situation, style in learnt_expressions:
learnt_expressions_str += f"{situation}->{style}\n"
diff --git a/src/chat/express/expression_selector.py b/src/chat/express/expression_selector.py
index fe8500194..83fd0c4b8 100644
--- a/src/chat/express/expression_selector.py
+++ b/src/chat/express/expression_selector.py
@@ -78,6 +78,8 @@ def weighted_sample(population: list[dict], weights: list[float], k: int) -> lis
class ExpressionSelector:
def __init__(self, chat_id: str = ""):
self.chat_id = chat_id
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
self.llm_model = LLMRequest(
model_set=model_config.model_task_config.utils_small, request_type="expression.selector"
)
@@ -94,6 +96,8 @@ class ExpressionSelector:
bool: 是否允许使用表达
"""
try:
+ if global_config is None:
+ return False
use_expression, _, _ = global_config.expression.get_expression_config_for_chat(chat_id)
return use_expression
except Exception as e:
@@ -122,6 +126,8 @@ class ExpressionSelector:
def get_related_chat_ids(self, chat_id: str) -> list[str]:
"""根据expression.rules配置,获取与当前chat_id相关的所有chat_id(包括自身)"""
+ if global_config is None:
+ return [chat_id]
rules = global_config.expression.rules
current_group = None
@@ -280,6 +286,9 @@ class ExpressionSelector:
else:
chat_info = chat_history
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 根据配置选择模式
mode = global_config.expression.mode
logger.debug(f"使用表达选择模式: {mode}")
@@ -582,6 +591,9 @@ class ExpressionSelector:
target_message_str = ""
target_message_extra_block = ""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 3. 构建prompt(只包含情境,不包含完整的表达方式)
prompt = (await global_prompt_manager.get_prompt_async("expression_evaluation_prompt")).format(
bot_name=global_config.bot.nickname,
diff --git a/src/chat/express/situation_extractor.py b/src/chat/express/situation_extractor.py
index f9924090c..2fd6c9205 100644
--- a/src/chat/express/situation_extractor.py
+++ b/src/chat/express/situation_extractor.py
@@ -42,6 +42,8 @@ class SituationExtractor:
"""情境提取器,从聊天历史中提取当前情境"""
def __init__(self):
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
self.llm_model = LLMRequest(
model_set=model_config.model_task_config.utils_small,
request_type="expression.situation_extractor"
@@ -81,6 +83,8 @@ class SituationExtractor:
# 构建 prompt
try:
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
prompt = (await global_prompt_manager.get_prompt_async("situation_extraction_prompt")).format(
bot_name=global_config.bot.nickname,
chat_history=chat_info,
diff --git a/src/chat/interest_system/bot_interest_manager.py b/src/chat/interest_system/bot_interest_manager.py
index 21b1e9a6a..7843588be 100644
--- a/src/chat/interest_system/bot_interest_manager.py
+++ b/src/chat/interest_system/bot_interest_manager.py
@@ -5,7 +5,7 @@
import traceback
from datetime import datetime
-from typing import Any
+from typing import Any, cast
import numpy as np
from sqlalchemy import select
@@ -77,6 +77,9 @@ class BotInterestManager:
from src.config.config import model_config
from src.llm_models.utils_model import LLMRequest
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
+
# 检查embedding配置是否存在
if not hasattr(model_config.model_task_config, "embedding"):
raise RuntimeError("❌ 未找到embedding模型配置")
@@ -251,6 +254,9 @@ class BotInterestManager:
from src.config.config import model_config
from src.plugin_system.apis import llm_api
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
+
# 构建完整的提示词,明确要求只返回纯JSON
full_prompt = f"""你是一个专业的机器人人设分析师,擅长根据人设描述生成合适的兴趣标签。
@@ -348,9 +354,15 @@ class BotInterestManager:
embedding, model_name = await self.embedding_request.get_embedding(text)
if embedding and len(embedding) > 0:
- self.embedding_cache[text] = embedding
+ if isinstance(embedding[0], list):
+ # If it's a list of lists, take the first one (though get_embedding(str) should return list[float])
+ embedding = embedding[0]
+
+ # Now we can safely cast to list[float] as we've handled the nested list case
+ embedding_float = cast(list[float], embedding)
+ self.embedding_cache[text] = embedding_float
- current_dim = len(embedding)
+ current_dim = len(embedding_float)
if self._detected_embedding_dimension is None:
self._detected_embedding_dimension = current_dim
if self.embedding_dimension and self.embedding_dimension != current_dim:
@@ -367,7 +379,7 @@ class BotInterestManager:
self.embedding_dimension,
current_dim,
)
- return embedding
+ return embedding_float
else:
raise RuntimeError(f"❌ 返回的embedding为空: {embedding}")
@@ -416,7 +428,10 @@ class BotInterestManager:
for idx_offset, message_id in enumerate(chunk_keys):
vector = normalized[idx_offset] if idx_offset < len(normalized) else []
- results[message_id] = vector
+ if isinstance(vector, list) and vector and isinstance(vector[0], float):
+ results[message_id] = cast(list[float], vector)
+ else:
+ results[message_id] = []
return results
@@ -493,6 +508,9 @@ class BotInterestManager:
medium_similarity_count = 0
low_similarity_count = 0
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 分级相似度阈值 - 优化后可以提高阈值,因为匹配更准确了
affinity_config = global_config.affinity_flow
high_threshold = affinity_config.high_match_interest_threshold
@@ -711,6 +729,9 @@ class BotInterestManager:
if not keywords or not matched_tags:
return {}
+ if global_config is None:
+ return {}
+
affinity_config = global_config.affinity_flow
bonus_dict = {}
@@ -1010,7 +1031,10 @@ class BotInterestManager:
# 验证缓存版本和embedding模型
cache_version = cache_data.get("version", 1)
cache_embedding_model = cache_data.get("embedding_model", "")
- current_embedding_model = self.embedding_config.model_list[0] if hasattr(self.embedding_config, "model_list") else ""
+
+ current_embedding_model = ""
+ if self.embedding_config and hasattr(self.embedding_config, "model_list") and self.embedding_config.model_list:
+ current_embedding_model = self.embedding_config.model_list[0]
if cache_embedding_model != current_embedding_model:
logger.warning(f"⚠️ Embedding模型已变更 ({cache_embedding_model} → {current_embedding_model}),忽略旧缓存")
@@ -1044,7 +1068,10 @@ class BotInterestManager:
cache_file = cache_dir / f"{personality_id}_embeddings.json"
# 准备缓存数据
- current_embedding_model = self.embedding_config.model_list[0] if hasattr(self.embedding_config, "model_list") and self.embedding_config.model_list else ""
+ current_embedding_model = ""
+ if self.embedding_config and hasattr(self.embedding_config, "model_list") and self.embedding_config.model_list:
+ current_embedding_model = self.embedding_config.model_list[0]
+
cache_data = {
"version": 1,
"personality_id": personality_id,
diff --git a/src/chat/interest_system/interest_manager.py b/src/chat/interest_system/interest_manager.py
index fee57e8d7..8c749af36 100644
--- a/src/chat/interest_system/interest_manager.py
+++ b/src/chat/interest_system/interest_manager.py
@@ -144,6 +144,15 @@ class InterestManager:
start_time = time.time()
self._total_calculations += 1
+ if not self._current_calculator:
+ return InterestCalculationResult(
+ success=False,
+ message_id=getattr(message, "message_id", ""),
+ interest_value=0.0,
+ error_message="没有可用的兴趣值计算组件",
+ calculation_time=time.time() - start_time,
+ )
+
try:
# 使用组件的安全执行方法
result = await self._current_calculator._safe_execute(message)
diff --git a/src/chat/knowledge/embedding_store.py b/src/chat/knowledge/embedding_store.py
index b286fa968..1751b198d 100644
--- a/src/chat/knowledge/embedding_store.py
+++ b/src/chat/knowledge/embedding_store.py
@@ -2,6 +2,7 @@ import asyncio
import math
import os
from dataclasses import dataclass
+from typing import Any
# import tqdm
import aiofiles
@@ -121,7 +122,7 @@ class EmbeddingStore:
self.store = {}
- self.faiss_index = None
+ self.faiss_index: Any = None
self.idx2hash = None
@staticmethod
@@ -158,6 +159,8 @@ class EmbeddingStore:
from src.config.config import model_config
from src.llm_models.utils_model import LLMRequest
+ assert model_config is not None
+
# 限制 chunk_size 和 max_workers 在合理范围内
chunk_size = max(MIN_CHUNK_SIZE, min(chunk_size, MAX_CHUNK_SIZE))
max_workers = max(MIN_WORKERS, min(max_workers, MAX_WORKERS))
@@ -402,6 +405,7 @@ class EmbeddingStore:
def build_faiss_index(self) -> None:
"""重新构建Faiss索引,以余弦相似度为度量"""
+ assert global_config is not None
# 获取所有的embedding
array = []
self.idx2hash = {}
diff --git a/src/chat/knowledge/kg_manager.py b/src/chat/knowledge/kg_manager.py
index 87be8a405..05e7fe98c 100644
--- a/src/chat/knowledge/kg_manager.py
+++ b/src/chat/knowledge/kg_manager.py
@@ -1,5 +1,6 @@
import os
import time
+from typing import cast
import numpy as np
import orjson
@@ -139,6 +140,9 @@ class KGManager:
embedding_manager: EmbeddingManager,
) -> int:
"""同义词连接"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
new_edge_cnt = 0
# 获取所有实体节点的hash值
ent_hash_list = set()
@@ -242,7 +246,8 @@ class KGManager:
else:
# 已存在的边
edge_item = self.graph[src_tgt[0], src_tgt[1]]
- edge_item["weight"] += weight
+ edge_item = cast(di_graph.DiEdge, edge_item)
+ edge_item["weight"] = cast(float, edge_item["weight"]) + weight
edge_item["update_time"] = now_time
self.graph.update_edge(edge_item)
@@ -258,6 +263,7 @@ class KGManager:
continue
assert isinstance(node, EmbeddingStoreItem)
node_item = self.graph[node_hash]
+ node_item = cast(di_graph.DiNode, node_item)
node_item["content"] = node.str
node_item["type"] = "ent"
node_item["create_time"] = now_time
@@ -271,6 +277,7 @@ class KGManager:
assert isinstance(node, EmbeddingStoreItem)
content = node.str.replace("\n", " ")
node_item = self.graph[node_hash]
+ node_item = cast(di_graph.DiNode, node_item)
node_item["content"] = content if len(content) < 8 else content[:8] + "..."
node_item["type"] = "pg"
node_item["create_time"] = now_time
@@ -326,6 +333,9 @@ class KGManager:
paragraph_search_result: ParagraphEmbedding的搜索结果(paragraph_hash, similarity)
embed_manager: EmbeddingManager对象
"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 图中存在的节点总集
existed_nodes = self.graph.get_node_list()
@@ -339,9 +349,12 @@ class KGManager:
# 针对每个关系,提取出其中的主宾短语作为两个实体,并记录对应的三元组的相似度作为权重依据
ent_sim_scores = {}
- for relation_hash, similarity, _ in relation_search_result:
+ for relation_hash, similarity in relation_search_result:
# 提取主宾短语
- relation = embed_manager.relation_embedding_store.store.get(relation_hash).str
+ relation_item = embed_manager.relation_embedding_store.store.get(relation_hash)
+ if relation_item is None:
+ continue
+ relation = relation_item.str
assert relation is not None # 断言:relation不为空
# 关系三元组
triple = relation[2:-2].split("', '")
diff --git a/src/chat/knowledge/knowledge_lib.py b/src/chat/knowledge/knowledge_lib.py
index a1f49f314..51fdb34b1 100644
--- a/src/chat/knowledge/knowledge_lib.py
+++ b/src/chat/knowledge/knowledge_lib.py
@@ -36,6 +36,9 @@ def initialize_lpmm_knowledge():
"""初始化LPMM知识库"""
global qa_manager, inspire_manager
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 检查LPMM知识库是否启用
if global_config.lpmm_knowledge.enable:
logger.info("正在初始化Mai-LPMM")
diff --git a/src/chat/knowledge/qa_manager.py b/src/chat/knowledge/qa_manager.py
index 56fa6275f..35e268c24 100644
--- a/src/chat/knowledge/qa_manager.py
+++ b/src/chat/knowledge/qa_manager.py
@@ -1,5 +1,5 @@
import time
-from typing import Any
+from typing import Any, cast
from src.chat.utils.utils import get_embedding
from src.config.config import global_config, model_config
@@ -21,6 +21,8 @@ class QAManager:
embed_manager: EmbeddingManager,
kg_manager: KGManager,
):
+ if model_config is None:
+ raise RuntimeError("Model config is not initialized")
self.embed_manager = embed_manager
self.kg_manager = kg_manager
self.qa_model = LLMRequest(model_set=model_config.model_task_config.lpmm_qa, request_type="lpmm.qa")
@@ -29,6 +31,8 @@ class QAManager:
self, question: str
) -> tuple[list[tuple[str, float, float]], dict[str, float] | None] | None:
"""处理查询"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
# 生成问题的Embedding
part_start_time = time.perf_counter()
@@ -61,7 +65,7 @@ class QAManager:
for res in relation_search_res:
if store_item := self.embed_manager.relation_embedding_store.store.get(res[0]):
rel_str = store_item.str
- print(f"找到相关关系,相似度:{(res[1] * 100):.2f}% - {rel_str}")
+ print(f"找到相关关系,相似度:{(res[1] * 100):.2f}% - {rel_str}")
# TODO: 使用LLM过滤三元组结果
# logger.info(f"LLM过滤三元组用时:{time.time() - part_start_time:.2f}s")
@@ -80,8 +84,52 @@ class QAManager:
logger.info("找到相关关系,将使用RAG进行检索")
# 使用KG检索
part_start_time = time.perf_counter()
+ # Cast relation_search_res to the expected type for kg_search
+ # The search_top_k returns list[tuple[Any, float, float]], but kg_search expects list[tuple[tuple[str, str, str], float]]
+ # We assume the ID (res[0]) in relation_search_res is actually a tuple[str, str, str] (the relation triple)
+ # or at least compatible. However, looking at kg_manager.py, it expects relation_hash (str) in relation_search_result?
+ # Wait, let's check kg_manager.py again.
+ # kg_search signature: relation_search_result: list[tuple[tuple[str, str, str], float]]
+ # But in kg_manager.py:
+ # for relation_hash, similarity, _ in relation_search_result:
+ # relation = embed_manager.relation_embedding_store.store.get(relation_hash).str
+ # This implies relation_search_result items are tuples of (relation_hash, similarity, ...)
+ # So the type hint in kg_manager.py might be wrong or I am misinterpreting it.
+ # The error says: "tuple[Any, float, float]" vs "tuple[tuple[str, str, str], float]"
+ # It seems kg_search expects the first element to be a tuple of strings?
+ # But the implementation uses it as a hash key to look up in store.
+ # Let's look at kg_manager.py again.
+
+ # In kg_manager.py:
+ # def kg_search(self, relation_search_result: list[tuple[tuple[str, str, str], float]], ...)
+ # ...
+ # for relation_hash, similarity in relation_search_result:
+ # relation_item = embed_manager.relation_embedding_store.store.get(relation_hash)
+
+ # Wait, I just fixed kg_manager.py to:
+ # for relation_hash, similarity in relation_search_result:
+
+ # So it expects a tuple of 2 elements?
+ # But search_top_k returns (id, score, vector).
+ # So relation_search_res is list[tuple[Any, float, float]].
+
+ # I need to adapt the data or cast it.
+ # If I pass it directly, it has 3 elements.
+ # If kg_manager expects 2, I should probably slice it.
+
+ # Let's cast it for now to silence the error, assuming the runtime behavior is compatible (unpacking first 2 of 3 is fine in python if not strict, but here it is strict unpacking in loop?)
+ # In kg_manager.py I changed it to:
+ # for relation_hash, similarity in relation_search_result:
+ # This will fail if the tuple has 3 elements! "too many values to unpack"
+
+ # So I should probably fix the data passed to kg_search to be list[tuple[str, float]].
+
+ relation_search_result_for_kg = [(str(res[0]), float(res[1])) for res in relation_search_res]
+
result, ppr_node_weights = self.kg_manager.kg_search(
- relation_search_res, paragraph_search_res, self.embed_manager
+ cast(list[tuple[tuple[str, str, str], float]], relation_search_result_for_kg), # The type hint in kg_manager is weird, but let's match it or cast to Any
+ paragraph_search_res,
+ self.embed_manager
)
part_end_time = time.perf_counter()
logger.info(f"RAG检索用时:{part_end_time - part_start_time:.5f}s")
diff --git a/src/chat/message_manager/batch_database_writer.py b/src/chat/message_manager/batch_database_writer.py
index 0d379c01a..f5b9e1c18 100644
--- a/src/chat/message_manager/batch_database_writer.py
+++ b/src/chat/message_manager/batch_database_writer.py
@@ -51,13 +51,13 @@ class BatchDatabaseWriter:
self.writer_task: asyncio.Task | None = None
# 统计信息
- self.stats = {
+ self.stats: dict[str, int | float] = {
"total_writes": 0,
"batch_writes": 0,
"failed_writes": 0,
"queue_size": 0,
- "avg_batch_size": 0,
- "last_flush_time": 0,
+ "avg_batch_size": 0.0,
+ "last_flush_time": 0.0,
}
# 按优先级分类的批次
@@ -220,6 +220,9 @@ class BatchDatabaseWriter:
async def _batch_write_to_database(self, payloads: list[StreamUpdatePayload]):
"""批量写入数据库"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
async with get_db_session() as session:
for payload in payloads:
stream_id = payload.stream_id
@@ -254,11 +257,11 @@ class BatchDatabaseWriter:
stmt = stmt.on_conflict_do_update(index_elements=["stream_id"], set_=update_data)
await session.execute(stmt)
-
- await session.commit()
-
async def _direct_write(self, stream_id: str, update_data: dict[str, Any]):
"""直接写入数据库(降级方案)"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
async with get_db_session() as session:
if global_config.database.database_type == "sqlite":
from sqlalchemy.dialects.sqlite import insert as sqlite_insert
diff --git a/src/chat/message_manager/distribution_manager.py b/src/chat/message_manager/distribution_manager.py
index 5af3bb7d4..fc43e0959 100644
--- a/src/chat/message_manager/distribution_manager.py
+++ b/src/chat/message_manager/distribution_manager.py
@@ -23,6 +23,9 @@ class StreamLoopManager:
"""流循环管理器 - 每个流一个独立的无限循环任务"""
def __init__(self, max_concurrent_streams: int | None = None):
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 统计信息
self.stats: dict[str, Any] = {
"active_streams": 0,
@@ -570,7 +573,6 @@ class StreamLoopManager:
except Exception as e:
logger.warning(f"刷新StreamContext缓存失败: stream={stream_id}, error={e}")
return []
-
async def _update_stream_energy(self, stream_id: str, context: Any) -> None:
"""更新流的能量值
@@ -578,6 +580,9 @@ class StreamLoopManager:
stream_id: 流ID
context: 流上下文 (StreamContext)
"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
try:
from src.chat.message_receive.chat_stream import get_chat_manager
@@ -635,6 +640,9 @@ class StreamLoopManager:
Returns:
float: 间隔时间(秒)
"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
# 基础间隔
base_interval = getattr(global_config.chat, "distribution_interval", 5.0)
diff --git a/src/chat/message_manager/global_notice_manager.py b/src/chat/message_manager/global_notice_manager.py
index 4215cb939..255fa6d8c 100644
--- a/src/chat/message_manager/global_notice_manager.py
+++ b/src/chat/message_manager/global_notice_manager.py
@@ -66,7 +66,7 @@ class GlobalNoticeManager:
self._last_cleanup_time = time.time()
# 统计信息
- self.stats = {
+ self.stats: dict[str, Any] = {
"total_notices": 0,
"public_notices": 0,
"stream_notices": 0,
diff --git a/src/chat/message_manager/message_manager.py b/src/chat/message_manager/message_manager.py
index 5366c57c2..211a2f8fc 100644
--- a/src/chat/message_manager/message_manager.py
+++ b/src/chat/message_manager/message_manager.py
@@ -277,6 +277,9 @@ class MessageManager:
async def _check_and_handle_interruption(self, chat_stream: "ChatStream | None" = None, message: DatabaseMessages | None = None):
"""检查并处理消息打断 - 通过取消 stream_loop_task 实现"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
if not global_config.chat.interruption_enabled or not chat_stream or not message:
return
diff --git a/src/chat/message_receive/chat_stream.py b/src/chat/message_receive/chat_stream.py
index de04fbc7e..0d647687b 100644
--- a/src/chat/message_receive/chat_stream.py
+++ b/src/chat/message_receive/chat_stream.py
@@ -240,6 +240,9 @@ class ChatStream:
async def calculate_focus_energy(self) -> float:
"""异步计算focus_energy"""
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
try:
# 使用单流上下文管理器获取消息
all_messages = self.context.get_messages(limit=global_config.chat.max_context_size)
@@ -629,6 +632,9 @@ class ChatManager:
# 回退到原始方法(最终方案)
async def _db_save_stream_async(s_data_dict: dict):
+ if global_config is None:
+ raise RuntimeError("Global config is not initialized")
+
async with get_db_session() as session:
user_info_d = s_data_dict.get("user_info")
group_info_d = s_data_dict.get("group_info")
diff --git a/src/chat/message_receive/message_handler.py b/src/chat/message_receive/message_handler.py
index 29d78e2b3..46c86bcee 100644
--- a/src/chat/message_receive/message_handler.py
+++ b/src/chat/message_receive/message_handler.py
@@ -30,7 +30,7 @@ from __future__ import annotations
import os
import re
import traceback
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING, Any, cast
from mofox_wire import MessageEnvelope, MessageRuntime
@@ -55,6 +55,8 @@ PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
def _check_ban_words(text: str, chat: "ChatStream", userinfo) -> bool:
"""检查消息是否包含过滤词"""
+ if global_config is None:
+ return False
for word in global_config.message_receive.ban_words:
if word in text:
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
@@ -62,10 +64,10 @@ def _check_ban_words(text: str, chat: "ChatStream", userinfo) -> bool:
logger.info(f"[过滤词识别]消息中含有{word},filtered")
return True
return False
-
-
def _check_ban_regex(text: str, chat: "ChatStream", userinfo) -> bool:
"""检查消息是否匹配过滤正则表达式"""
+ if global_config is None:
+ return False
for pattern in global_config.message_receive.ban_msgs_regex:
if re.search(pattern, text):
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
@@ -281,8 +283,8 @@ class MessageHandler:
from src.chat.message_receive.chat_stream import get_chat_manager
chat = await get_chat_manager().get_or_create_stream(
platform=platform,
- user_info=DatabaseUserInfo.from_dict(user_info) if user_info else None, # type: ignore
- group_info=DatabaseGroupInfo.from_dict(group_info) if group_info else None,
+ user_info=DatabaseUserInfo.from_dict(cast(dict[str, Any], user_info)) if user_info else None, # type: ignore
+ group_info=DatabaseGroupInfo.from_dict(cast(dict[str, Any], group_info)) if group_info else None,
)
# 将消息信封转换为 DatabaseMessages
@@ -431,8 +433,8 @@ class MessageHandler:
from src.chat.message_receive.chat_stream import get_chat_manager
chat = await get_chat_manager().get_or_create_stream(
platform=platform,
- user_info=DatabaseUserInfo.from_dict(user_info) if user_info else None, # type: ignore
- group_info=DatabaseGroupInfo.from_dict(group_info) if group_info else None,
+ user_info=DatabaseUserInfo.from_dict(cast(dict[str, Any], user_info)) if user_info else None, # type: ignore
+ group_info=DatabaseGroupInfo.from_dict(cast(dict[str, Any], group_info)) if group_info else None,
)
# 将消息信封转换为 DatabaseMessages
@@ -536,6 +538,8 @@ class MessageHandler:
text = message.processed_plain_text or ""
# 获取配置的命令前缀
+ if global_config is None:
+ return False, None, True
prefixes = global_config.command.command_prefixes
# 检查是否以任何前缀开头
@@ -704,6 +708,9 @@ class MessageHandler:
async def _preprocess_message(self, message: DatabaseMessages, chat: "ChatStream") -> None:
"""预处理消息:存储、情绪更新等"""
try:
+ if global_config is None:
+ return
+
group_info = chat.group_info
# 检查是否需要处理消息
diff --git a/src/chat/message_receive/message_processor.py b/src/chat/message_receive/message_processor.py
index 96aa56650..02f5597a4 100644
--- a/src/chat/message_receive/message_processor.py
+++ b/src/chat/message_receive/message_processor.py
@@ -256,7 +256,7 @@ async def _process_single_segment(
# 检查消息是否由机器人自己发送
user_info = message_info.get("user_info", {})
user_id_str = str(user_info.get("user_id", ""))
- if user_id_str == str(global_config.bot.qq_account):
+ if global_config and user_id_str == str(global_config.bot.qq_account):
logger.info(f"检测到机器人自身发送的语音消息 (User ID: {user_id_str}),尝试从缓存获取文本。")
if isinstance(seg_data, str):
cached_text = consume_self_voice_text(seg_data)
@@ -299,7 +299,7 @@ async def _process_single_segment(
logger.warning("⚠️ Rust视频处理模块不可用,跳过视频分析")
return "[视频]"
- if global_config.video_analysis.enable:
+ if global_config and global_config.video_analysis.enable:
logger.info("已启用视频识别,开始识别")
if isinstance(seg_data, dict):
try:
diff --git a/src/chat/message_receive/storage.py b/src/chat/message_receive/storage.py
index 7836c8423..9aafef73c 100644
--- a/src/chat/message_receive/storage.py
+++ b/src/chat/message_receive/storage.py
@@ -3,10 +3,11 @@ import re
import time
import traceback
from collections import deque
-from typing import Optional, TYPE_CHECKING
+from typing import Optional, TYPE_CHECKING, cast
import orjson
from sqlalchemy import desc, select, update
+from sqlalchemy.engine import CursorResult
from src.common.data_models.database_data_model import DatabaseMessages
from src.common.database.core import get_db_session
@@ -343,7 +344,7 @@ class MessageUpdateBatcher:
.where(Messages.message_id == mmc_id)
.values(message_id=qq_id)
)
- if result.rowcount > 0:
+ if cast(CursorResult, result).rowcount > 0:
updated_count += 1
await session.commit()
@@ -571,7 +572,7 @@ class MessageStorage:
result = await session.execute(stmt)
await session.commit()
- if result.rowcount > 0:
+ if cast(CursorResult, result).rowcount > 0:
logger.debug(f"成功更新消息 {message_id} 的interest_value为 {interest_value}")
else:
logger.warning(f"未找到消息 {message_id},无法更新interest_value")
@@ -667,7 +668,7 @@ class MessageStorage:
)
result = await session.execute(update_stmt)
- if result.rowcount > 0:
+ if cast(CursorResult, result).rowcount > 0:
fixed_count += 1
logger.debug(f"修复消息 {msg.message_id} 的interest_value为 {default_interest}")
diff --git a/src/chat/message_receive/uni_message_sender.py b/src/chat/message_receive/uni_message_sender.py
index 7f3e1076f..f9da33f15 100644
--- a/src/chat/message_receive/uni_message_sender.py
+++ b/src/chat/message_receive/uni_message_sender.py
@@ -133,7 +133,7 @@ class HeartFCSender:
# 将发送的消息写入上下文历史
try:
- if chat_stream and chat_stream.context and global_config.chat:
+ if chat_stream and chat_stream.context and global_config and global_config.chat:
context = chat_stream.context
chat_config = global_config.chat
if chat_config:
diff --git a/src/chat/planner_actions/action_manager.py b/src/chat/planner_actions/action_manager.py
index 84100f6b7..c4e36abb4 100644
--- a/src/chat/planner_actions/action_manager.py
+++ b/src/chat/planner_actions/action_manager.py
@@ -94,7 +94,7 @@ class ChatterActionManager:
log_prefix=log_prefix,
shutting_down=shutting_down,
plugin_config=plugin_config,
- action_message=action_message,
+ action_message=action_message, # type: ignore
)
logger.debug(f"创建Action实例成功: {action_name}")
@@ -154,6 +154,8 @@ class ChatterActionManager:
Returns:
执行结果字典
"""
+ assert global_config is not None
+
chat_stream = None
try:
# 获取 chat_stream
diff --git a/src/chat/planner_actions/action_modifier.py b/src/chat/planner_actions/action_modifier.py
index bfd92a72e..63423fc43 100644
--- a/src/chat/planner_actions/action_modifier.py
+++ b/src/chat/planner_actions/action_modifier.py
@@ -30,6 +30,7 @@ class ActionModifier:
def __init__(self, action_manager: ChatterActionManager, chat_id: str):
"""初始化动作处理器"""
+ assert model_config is not None
self.chat_id = chat_id
# chat_stream 和 log_prefix 将在异步方法中初始化
self.chat_stream: "ChatStream | None" = None
@@ -72,6 +73,7 @@ class ActionModifier:
message_content: 消息内容
chatter_name: 当前使用的 Chatter 名称,用于过滤只允许特定 Chatter 使用的动作
"""
+ assert global_config is not None
# 初始化log_prefix
await self._initialize_log_prefix()
# 根据 stream_id 加载当前可用的动作
diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py
index fe9be0494..560d518ef 100644
--- a/src/chat/replyer/default_generator.py
+++ b/src/chat/replyer/default_generator.py
@@ -240,6 +240,8 @@ class DefaultReplyer:
chat_stream: "ChatStream",
request_type: str = "replyer",
):
+ assert global_config is not None
+ assert model_config is not None
self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type)
self.chat_stream = chat_stream
# 这些将在异步初始化中设置
@@ -267,6 +269,7 @@ class DefaultReplyer:
async def _build_auth_role_prompt(self) -> str:
"""根据主人配置生成额外提示词"""
+ assert global_config is not None
master_config = global_config.permission.master_prompt
if not master_config or not master_config.enable:
return ""
@@ -515,6 +518,7 @@ class DefaultReplyer:
Returns:
str: 表达习惯信息字符串
"""
+ assert global_config is not None
# 检查是否允许在此聊天流中使用表达
use_expression, _, _ = global_config.expression.get_expression_config_for_chat(self.chat_stream.stream_id)
if not use_expression:
@@ -583,6 +587,7 @@ class DefaultReplyer:
Returns:
str: 记忆信息字符串
"""
+ assert global_config is not None
# 检查是否启用三层记忆系统
if not (global_config.memory and global_config.memory.enable):
return ""
@@ -776,6 +781,7 @@ class DefaultReplyer:
Returns:
str: 关键词反应提示字符串,如果没有触发任何反应则为空字符串
"""
+ assert global_config is not None
if target is None:
return ""
@@ -834,6 +840,7 @@ class DefaultReplyer:
Returns:
str: 格式化的notice信息文本,如果没有notice或未启用则返回空字符串
"""
+ assert global_config is not None
try:
logger.debug(f"开始构建notice块,chat_id={chat_id}")
@@ -902,6 +909,7 @@ class DefaultReplyer:
Returns:
Tuple[str, str]: (已读历史消息prompt, 未读历史消息prompt)
"""
+ assert global_config is not None
try:
# 从message_manager获取真实的已读/未读消息
@@ -1002,6 +1010,7 @@ class DefaultReplyer:
"""
回退的已读/未读历史消息构建方法
"""
+ assert global_config is not None
# 通过is_read字段分离已读和未读消息
read_messages = []
unread_messages = []
@@ -1115,6 +1124,7 @@ class DefaultReplyer:
Returns:
str: 构建好的上下文
"""
+ assert global_config is not None
if available_actions is None:
available_actions = {}
chat_stream = self.chat_stream
@@ -1607,6 +1617,7 @@ class DefaultReplyer:
reply_to: str,
reply_message: dict[str, Any] | DatabaseMessages | None = None,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
+ assert global_config is not None
chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
is_group_chat = bool(chat_stream.group_info)
@@ -1767,6 +1778,7 @@ class DefaultReplyer:
return prompt_text
async def llm_generate_content(self, prompt: str):
+ assert global_config is not None
with Timer("LLM生成", {}): # 内部计时器,可选保留
# 直接使用已初始化的模型实例
logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}")
@@ -1792,6 +1804,8 @@ class DefaultReplyer:
return content, reasoning_content, model_name, tool_calls
async def get_prompt_info(self, message: str, sender: str, target: str):
+ assert global_config is not None
+ assert model_config is not None
related_info = ""
start_time = time.time()
from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool
@@ -1843,6 +1857,7 @@ class DefaultReplyer:
return ""
async def build_relation_info(self, sender: str, target: str):
+ assert global_config is not None
# 获取用户ID
if sender == f"{global_config.bot.nickname}(你)":
return "你将要回复的是你自己发送的消息。"
@@ -1927,6 +1942,7 @@ class DefaultReplyer:
reply_to: 回复对象
reply_message: 回复的原始消息
"""
+ assert global_config is not None
return # 已禁用,保留函数签名以防其他地方有引用
# 以下代码已废弃,不再执行
diff --git a/src/chat/security/manager.py b/src/chat/security/manager.py
index a8c3a5716..1c8c2ecfa 100644
--- a/src/chat/security/manager.py
+++ b/src/chat/security/manager.py
@@ -173,9 +173,10 @@ class SecurityManager:
pre_check_results = await asyncio.gather(*pre_check_tasks, return_exceptions=True)
# 筛选需要完整检查的检测器
- checkers_to_run = [
- c for c, need_check in zip(enabled_checkers, pre_check_results) if need_check is True
- ]
+ checkers_to_run = []
+ for c, need_check in zip(enabled_checkers, pre_check_results):
+ if need_check is True:
+ checkers_to_run.append(c)
if not checkers_to_run:
return SecurityCheckResult(
@@ -192,20 +193,22 @@ class SecurityManager:
results = await asyncio.gather(*check_tasks, return_exceptions=True)
# 过滤异常结果
- valid_results = []
+ valid_results: list[SecurityCheckResult] = []
for checker, result in zip(checkers_to_run, results):
- if isinstance(result, Exception):
+ if isinstance(result, BaseException):
logger.error(f"检测器 '{checker.name}' 执行失败: {result}")
continue
- result.checker_name = checker.name
- valid_results.append(result)
+
+ if isinstance(result, SecurityCheckResult):
+ result.checker_name = checker.name
+ valid_results.append(result)
# 合并结果
return self._merge_results(valid_results, time.time() - start_time)
async def _check_all(self, message: str, context: dict, start_time: float) -> SecurityCheckResult:
"""检测所有模式(顺序执行所有检测器)"""
- results = []
+ results: list[SecurityCheckResult] = []
for checker in self._checkers:
if not checker.enabled:
diff --git a/src/chat/utils/chat_message_builder.py b/src/chat/utils/chat_message_builder.py
index b0991d53d..7cd4e0596 100644
--- a/src/chat/utils/chat_message_builder.py
+++ b/src/chat/utils/chat_message_builder.py
@@ -39,11 +39,13 @@ def replace_user_references_sync(
Returns:
str: 处理后的内容字符串
"""
+ assert global_config is not None
if not content:
return ""
if name_resolver is None:
def default_resolver(platform: str, user_id: str) -> str:
+ assert global_config is not None
# 检查是否是机器人自己
if replace_bot_name and (user_id == str(global_config.bot.qq_account)):
return f"{global_config.bot.nickname}(你)"
@@ -116,10 +118,12 @@ async def replace_user_references_async(
Returns:
str: 处理后的内容字符串
"""
+ assert global_config is not None
if name_resolver is None:
person_info_manager = get_person_info_manager()
async def default_resolver(platform: str, user_id: str) -> str:
+ assert global_config is not None
# 检查是否是机器人自己
if replace_bot_name and (user_id == str(global_config.bot.qq_account)):
return f"{global_config.bot.nickname}(你)"
@@ -392,7 +396,7 @@ async def get_actions_by_timestamp_with_chat_inclusive(
actions = list(result.scalars())
return [action.__dict__ for action in reversed(actions)]
else: # earliest
- result = await session.execute(
+ query = await session.execute(
select(ActionRecords)
.where(
and_(
@@ -540,6 +544,7 @@ async def _build_readable_messages_internal(
Returns:
包含格式化消息的字符串、原始消息详情列表、图片映射字典和更新后的计数器的元组。
"""
+ assert global_config is not None
if not messages:
return "", [], pic_id_mapping or {}, pic_counter
@@ -694,6 +699,7 @@ async def _build_readable_messages_internal(
percentile = i / n_messages # 计算消息在列表中的位置百分比 (0 <= percentile < 1)
original_len = len(content)
limit = -1 # 默认不截断
+ replace_content = ""
if percentile < 0.2: # 60% 之前的消息 (即最旧的 60%)
limit = 50
@@ -973,6 +979,7 @@ async def build_readable_messages(
truncate: 是否截断长消息
show_actions: 是否显示动作记录
"""
+ assert global_config is not None
# 创建messages的深拷贝,避免修改原始列表
if not messages:
return ""
@@ -1112,6 +1119,7 @@ async def build_anonymous_messages(messages: list[dict[str, Any]]) -> str:
构建匿名可读消息,将不同人的名称转为唯一占位符(A、B、C...),bot自己用SELF。
处理 回复 和 @ 字段,将bbb映射为匿名占位符。
"""
+ assert global_config is not None
if not messages:
print("111111111111没有消息,无法构建匿名消息")
return ""
@@ -1127,6 +1135,7 @@ async def build_anonymous_messages(messages: list[dict[str, Any]]) -> str:
def get_anon_name(platform, user_id):
# print(f"get_anon_name: platform:{platform}, user_id:{user_id}")
# print(f"global_config.bot.qq_account:{global_config.bot.qq_account}")
+ assert global_config is not None
if user_id == global_config.bot.qq_account:
# print("SELF11111111111111")
@@ -1204,6 +1213,7 @@ async def get_person_id_list(messages: list[dict[str, Any]]) -> list[str]:
Returns:
一个包含唯一 person_id 的列表。
"""
+ assert global_config is not None
person_ids_set = set() # 使用集合来自动去重
for msg in messages:
diff --git a/src/chat/utils/prompt.py b/src/chat/utils/prompt.py
index 35234c352..ae1149b05 100644
--- a/src/chat/utils/prompt.py
+++ b/src/chat/utils/prompt.py
@@ -649,6 +649,7 @@ class Prompt:
async def _build_expression_habits(self) -> dict[str, Any]:
"""构建表达习惯(如表情、口癖)的上下文块."""
+ assert global_config is not None
# 检查当前聊天是否启用了表达习惯功能
use_expression, _, _ = global_config.expression.get_expression_config_for_chat(
self.parameters.chat_id
@@ -728,6 +729,7 @@ class Prompt:
async def _build_tool_info(self) -> dict[str, Any]:
"""构建工具调用结果的上下文块."""
+ assert global_config is not None
if not global_config.tool.enable_tool:
return {"tool_info_block": ""}
@@ -779,6 +781,7 @@ class Prompt:
async def _build_knowledge_info(self) -> dict[str, Any]:
"""构建从知识库检索到的相关信息的上下文块."""
+ assert global_config is not None
if not global_config.lpmm_knowledge.enable:
return {"knowledge_prompt": ""}
@@ -873,6 +876,7 @@ class Prompt:
def _prepare_s4u_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
"""为S4U(Scene for You)模式准备最终用于格式化的参数字典."""
+ assert global_config is not None
return {
**context_data,
"expression_habits_block": context_data.get("expression_habits_block", ""),
@@ -915,6 +919,7 @@ class Prompt:
def _prepare_normal_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
"""为Normal模式准备最终用于格式化的参数字典."""
+ assert global_config is not None
return {
**context_data,
"expression_habits_block": context_data.get("expression_habits_block", ""),
@@ -959,6 +964,7 @@ class Prompt:
def _prepare_default_params(self, context_data: dict[str, Any]) -> dict[str, Any]:
"""为默认模式(或其他未指定模式)准备最终用于格式化的参数字典."""
+ assert global_config is not None
return {
"expression_habits_block": context_data.get("expression_habits_block", ""),
"relation_info_block": context_data.get("relation_info_block", ""),
@@ -1143,6 +1149,7 @@ class Prompt:
Returns:
str: 构建好的跨群聊上下文字符串。
"""
+ assert global_config is not None
if not global_config.cross_context.enable:
return ""
diff --git a/src/chat/utils/report_generator.py b/src/chat/utils/report_generator.py
index 8c8756070..874451efc 100644
--- a/src/chat/utils/report_generator.py
+++ b/src/chat/utils/report_generator.py
@@ -92,6 +92,7 @@ class HTMLReportGenerator:
f"{stat_data[TPS_BY_PROVIDER].get(provider_name, 0):.2f} | "
f"{stat_data[COST_PER_KTOK_BY_PROVIDER].get(provider_name, 0):.4f} ¥ | "
f"{stat_data[COST_BY_PROVIDER].get(provider_name, 0):.4f} ¥ | "
+ f"{stat_data.get(AVG_TIME_COST_BY_PROVIDER, {}).get(provider_name, 0):.3f} 秒 | "
f""
for provider_name, count in sorted(stat_data[REQ_CNT_BY_PROVIDER].items())
]
@@ -135,27 +136,123 @@ class HTMLReportGenerator:
for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items())
]
)
+
+ # 先计算基础数据
+ total_tokens = sum(stat_data.get(TOTAL_TOK_BY_MODEL, {}).values())
+ total_requests = stat_data.get(TOTAL_REQ_CNT, 0)
+ total_cost = stat_data.get(TOTAL_COST, 0)
+ total_messages = stat_data.get(TOTAL_MSG_CNT, 0)
+ online_seconds = stat_data.get(ONLINE_TIME, 0)
+ online_hours = online_seconds / 3600 if online_seconds > 0 else 0
+
+ # 大模型相关效率指标
+ avg_cost_per_req = (total_cost / total_requests) if total_requests > 0 else 0
+ avg_cost_per_msg = (total_cost / total_messages) if total_messages > 0 else 0
+ avg_tokens_per_msg = (total_tokens / total_messages) if total_messages > 0 else 0
+ avg_tokens_per_req = (total_tokens / total_requests) if total_requests > 0 else 0
+ msg_to_req_ratio = (total_messages / total_requests) if total_requests > 0 else 0
+ cost_per_hour = (total_cost / online_hours) if online_hours > 0 else 0
+ req_per_hour = (total_requests / online_hours) if online_hours > 0 else 0
+
+ # Token效率 (输出/输入比率)
+ total_in_tokens = sum(stat_data.get(IN_TOK_BY_MODEL, {}).values())
+ total_out_tokens = sum(stat_data.get(OUT_TOK_BY_MODEL, {}).values())
+ token_efficiency = (total_out_tokens / total_in_tokens) if total_in_tokens > 0 else 0
+
+ # 生成效率指标表格数据
+ efficiency_data = [
+ ("💸 平均每条消息成本", f"{avg_cost_per_msg:.6f} ¥", "处理每条用户消息的平均AI成本"),
+ ("🎯 平均每条消息Token", f"{avg_tokens_per_msg:.0f}", "每条消息平均消耗的Token数量"),
+ ("📊 平均每次请求Token", f"{avg_tokens_per_req:.0f}", "每次AI请求平均消耗的Token数"),
+ ("🔄 消息/请求比率", f"{msg_to_req_ratio:.2f}", "平均每个AI请求处理的消息数"),
+ ("⚡ Token效率(输出/输入)", f"{token_efficiency:.3f}x", "输出Token与输入Token的比率"),
+ ("💵 每小时运行成本", f"{cost_per_hour:.4f} ¥/h", "在线每小时的AI成本"),
+ ("🚀 每小时请求数", f"{req_per_hour:.1f} 次/h", "在线每小时的AI请求次数"),
+ ("💰 每千Token成本", f"{(total_cost / total_tokens * 1000) if total_tokens > 0 else 0:.4f} ¥", "平均每1000个Token的成本"),
+ ("📈 Token/在线小时", f"{(total_tokens / online_hours) if online_hours > 0 else 0:.0f}", "每在线小时处理的Token数"),
+ ("💬 消息/在线小时", f"{(total_messages / online_hours) if online_hours > 0 else 0:.1f}", "每在线小时处理的消息数"),
+ ]
+
+ efficiency_rows = "\n".join(
+ [
+ f"| {metric} | {value} | {desc} |
"
+ for metric, value, desc in efficiency_data
+ ]
+ )
+
+ # 计算活跃聊天数和最活跃聊天
+ msg_by_chat = stat_data.get(MSG_CNT_BY_CHAT, {})
+ active_chats = len(msg_by_chat)
+ most_active_chat = ""
+ if msg_by_chat:
+ most_active_id = max(msg_by_chat, key=msg_by_chat.get)
+ most_active_chat = self.name_mapping.get(most_active_id, (most_active_id, 0))[0]
+ most_active_count = msg_by_chat[most_active_id]
+ most_active_chat = f"{most_active_chat} ({most_active_count}条)"
+
+ avg_msg_per_chat = (total_messages / active_chats) if active_chats > 0 else 0
+
summary_cards = f"""
-
总花费
-
{stat_data.get(TOTAL_COST, 0):.4f} ¥
+
💰 总花费
+
{total_cost:.4f} ¥
-
总请求数
-
{stat_data.get(TOTAL_REQ_CNT, 0)}
+
📞 AI请求数
+
{total_requests:,}
-
总Token数
-
{sum(stat_data.get(TOTAL_TOK_BY_MODEL, {}).values())}
-
-
-
总消息数
-
{stat_data.get(TOTAL_MSG_CNT, 0)}
+
🎯 总Token数
+
{total_tokens:,}
-
总在线时间
-
{format_online_time(int(stat_data.get(ONLINE_TIME, 0)))}
+
💬 总消息数
+
{total_messages:,}
+
+
+
⏱️ 在线时间
+
{format_online_time(int(online_seconds))}
+
+
+
💸 每条消息成本
+
{avg_cost_per_msg:.4f} ¥
+
+
+
📊 每请求Token
+
{avg_tokens_per_req:.0f}
+
+
+
� 消息/请求比
+
{msg_to_req_ratio:.2f}
+
+
+
⚡ Token效率
+
{token_efficiency:.2f}x
+
+
+
💵 每小时成本
+
{cost_per_hour:.4f} ¥
+
+
+
🚀 每小时请求
+
{req_per_hour:.1f}
+
+
+
👥 活跃聊天数
+
{active_chats}
+
+
+
🔥 最活跃聊天
+
{most_active_chat if most_active_chat else "无"}
+
+
+
📈 平均消息/聊天
+
{avg_msg_per_chat:.1f}
+
+
+
🎯 每消息Token
+
{avg_tokens_per_msg:.0f}
"""
@@ -173,6 +270,7 @@ class HTMLReportGenerator:
module_rows=module_rows,
type_rows=type_rows,
chat_rows=chat_rows,
+ efficiency_rows=efficiency_rows,
)
def _generate_chart_tab(self, chart_data: dict) -> str:
@@ -219,28 +317,41 @@ class HTMLReportGenerator:
period_id = period[0]
static_chart_data[period_id] = {
"provider_cost_data": stat[period_id].get(PIE_CHART_COST_BY_PROVIDER, {}),
+ "module_cost_data": stat[period_id].get(PIE_CHART_COST_BY_MODULE, {}),
"model_cost_data": stat[period_id].get(BAR_CHART_COST_BY_MODEL, {}),
+ "token_comparison_data": stat[period_id].get(BAR_CHART_TOKEN_COMPARISON, {}),
+ "response_time_scatter_data": stat[period_id].get(SCATTER_CHART_RESPONSE_TIME, []),
+ "model_efficiency_radar_data": stat[period_id].get(RADAR_CHART_MODEL_EFFICIENCY, {}),
+ "provider_requests_data": stat[period_id].get(DOUGHNUT_CHART_PROVIDER_REQUESTS, {}),
+ "avg_response_time_data": stat[period_id].get(BAR_CHART_AVG_RESPONSE_TIME, {}),
}
static_chart_data["all_time"] = {
"provider_cost_data": stat["all_time"].get(PIE_CHART_COST_BY_PROVIDER, {}),
+ "module_cost_data": stat["all_time"].get(PIE_CHART_COST_BY_MODULE, {}),
"model_cost_data": stat["all_time"].get(BAR_CHART_COST_BY_MODEL, {}),
+ "token_comparison_data": stat["all_time"].get(BAR_CHART_TOKEN_COMPARISON, {}),
+ "response_time_scatter_data": stat["all_time"].get(SCATTER_CHART_RESPONSE_TIME, []),
+ "model_efficiency_radar_data": stat["all_time"].get(RADAR_CHART_MODEL_EFFICIENCY, {}),
+ "provider_requests_data": stat["all_time"].get(DOUGHNUT_CHART_PROVIDER_REQUESTS, {}),
+ "avg_response_time_data": stat["all_time"].get(BAR_CHART_AVG_RESPONSE_TIME, {}),
}
# 渲染模板
# 读取CSS和JS文件内容
+ assert isinstance(self.jinja_env.loader, FileSystemLoader)
async with aiofiles.open(os.path.join(self.jinja_env.loader.searchpath[0], "report.css"), encoding="utf-8") as f:
report_css = await f.read()
async with aiofiles.open(os.path.join(self.jinja_env.loader.searchpath[0], "report.js"), encoding="utf-8") as f:
report_js = await f.read()
- # 渲染模板
+ # 渲染模板(使用紧凑的JSON格式减少文件大小)
template = self.jinja_env.get_template("report.html")
rendered_html = template.render(
report_title="MoFox-Bot运行统计报告",
generation_time=now.strftime("%Y-%m-%d %H:%M:%S"),
tab_list="\n".join(tab_list_html),
tab_content="\n".join(tab_content_html_list),
- all_chart_data=json.dumps(chart_data),
- static_chart_data=json.dumps(static_chart_data),
+ all_chart_data=json.dumps(chart_data, separators=(',', ':'), ensure_ascii=False),
+ static_chart_data=json.dumps(static_chart_data, separators=(',', ':'), ensure_ascii=False),
report_css=report_css,
report_js=report_js,
)
diff --git a/src/chat/utils/statistic.py b/src/chat/utils/statistic.py
index a94278b04..c6fdcec44 100644
--- a/src/chat/utils/statistic.py
+++ b/src/chat/utils/statistic.py
@@ -192,7 +192,7 @@ class StatisticOutputTask(AsyncTask):
self._statistic_console_output(stats, now)
# 使用新的 HTMLReportGenerator 生成报告
chart_data = await self._collect_chart_data(stats)
- deploy_time = datetime.fromtimestamp(local_storage.get("deploy_time", now.timestamp()))
+ deploy_time = datetime.fromtimestamp(float(local_storage.get("deploy_time", now.timestamp()))) # type: ignore
report_generator = HTMLReportGenerator(
name_mapping=self.name_mapping,
stat_period=self.stat_period,
@@ -219,7 +219,7 @@ class StatisticOutputTask(AsyncTask):
# 使用新的 HTMLReportGenerator 生成报告
chart_data = await self._collect_chart_data(stats)
- deploy_time = datetime.fromtimestamp(local_storage.get("deploy_time", now.timestamp()))
+ deploy_time = datetime.fromtimestamp(float(local_storage.get("deploy_time", now.timestamp()))) # type: ignore
report_generator = HTMLReportGenerator(
name_mapping=self.name_mapping,
stat_period=self.stat_period,
@@ -299,8 +299,16 @@ class StatisticOutputTask(AsyncTask):
# Chart data
PIE_CHART_COST_BY_PROVIDER: {},
PIE_CHART_REQ_BY_PROVIDER: {},
+ PIE_CHART_COST_BY_MODULE: {},
BAR_CHART_COST_BY_MODEL: {},
BAR_CHART_REQ_BY_MODEL: {},
+ BAR_CHART_TOKEN_COMPARISON: {},
+ SCATTER_CHART_RESPONSE_TIME: {},
+ RADAR_CHART_MODEL_EFFICIENCY: {},
+ HEATMAP_CHAT_ACTIVITY: {},
+ DOUGHNUT_CHART_PROVIDER_REQUESTS: {},
+ LINE_CHART_COST_TREND: {},
+ BAR_CHART_AVG_RESPONSE_TIME: {},
}
for period_key, _ in collect_period
}
@@ -457,6 +465,15 @@ class StatisticOutputTask(AsyncTask):
"data": [round(item[1], 4) for item in sorted_providers],
}
+ # 按模块花费饼图
+ module_costs = period_stats[COST_BY_MODULE]
+ if module_costs:
+ sorted_modules = sorted(module_costs.items(), key=lambda item: item[1], reverse=True)
+ period_stats[PIE_CHART_COST_BY_MODULE] = {
+ "labels": [item[0] for item in sorted_modules],
+ "data": [round(item[1], 4) for item in sorted_modules],
+ }
+
# 按模型花费条形图
model_costs = period_stats[COST_BY_MODEL]
if model_costs:
@@ -465,6 +482,91 @@ class StatisticOutputTask(AsyncTask):
"labels": [item[0] for item in sorted_models],
"data": [round(item[1], 4) for item in sorted_models],
}
+
+ # 1. Token输入输出对比条形图
+ model_names = list(period_stats[REQ_CNT_BY_MODEL].keys())
+ if model_names:
+ period_stats[BAR_CHART_TOKEN_COMPARISON] = {
+ "labels": model_names,
+ "input_tokens": [period_stats[IN_TOK_BY_MODEL].get(m, 0) for m in model_names],
+ "output_tokens": [period_stats[OUT_TOK_BY_MODEL].get(m, 0) for m in model_names],
+ }
+
+ # 2. 响应时间分布散点图数据(限制数据点以提高加载速度)
+ scatter_data = []
+ max_points_per_model = 50 # 每个模型最多50个点
+ for model_name, time_costs in period_stats[TIME_COST_BY_MODEL].items():
+ # 如果数据点太多,进行采样
+ if len(time_costs) > max_points_per_model:
+ step = len(time_costs) // max_points_per_model
+ sampled_costs = time_costs[::step][:max_points_per_model]
+ else:
+ sampled_costs = time_costs
+
+ for idx, time_cost in enumerate(sampled_costs):
+ scatter_data.append({
+ "model": model_name,
+ "x": idx,
+ "y": round(time_cost, 3),
+ "tokens": period_stats[TOTAL_TOK_BY_MODEL].get(model_name, 0) // len(time_costs) if time_costs else 0
+ })
+ period_stats[SCATTER_CHART_RESPONSE_TIME] = scatter_data
+
+ # 3. 模型效率雷达图
+ if model_names:
+ # 取前5个最常用的模型
+ top_models = sorted(period_stats[REQ_CNT_BY_MODEL].items(), key=lambda x: x[1], reverse=True)[:5]
+ radar_data = []
+ for model_name, _ in top_models:
+ # 归一化各项指标到0-100
+ req_count = period_stats[REQ_CNT_BY_MODEL].get(model_name, 0)
+ tps = period_stats[TPS_BY_MODEL].get(model_name, 0)
+ avg_time = period_stats[AVG_TIME_COST_BY_MODEL].get(model_name, 0)
+ cost_per_ktok = period_stats[COST_PER_KTOK_BY_MODEL].get(model_name, 0)
+ avg_tokens = period_stats[AVG_TOK_BY_MODEL].get(model_name, 0)
+
+ # 简单的归一化(反向归一化时间和成本,值越小越好)
+ max_req = max([period_stats[REQ_CNT_BY_MODEL].get(m[0], 1) for m in top_models])
+ max_tps = max([period_stats[TPS_BY_MODEL].get(m[0], 1) for m in top_models])
+ max_time = max([period_stats[AVG_TIME_COST_BY_MODEL].get(m[0], 0.1) for m in top_models])
+ max_cost = max([period_stats[COST_PER_KTOK_BY_MODEL].get(m[0], 0.001) for m in top_models])
+ max_tokens = max([period_stats[AVG_TOK_BY_MODEL].get(m[0], 1) for m in top_models])
+
+ radar_data.append({
+ "model": model_name,
+ "metrics": [
+ round((req_count / max_req) * 100, 2) if max_req > 0 else 0, # 请求量
+ round((tps / max_tps) * 100, 2) if max_tps > 0 else 0, # TPS
+ round((1 - avg_time / max_time) * 100, 2) if max_time > 0 else 100, # 速度(反向)
+ round((1 - cost_per_ktok / max_cost) * 100, 2) if max_cost > 0 else 100, # 成本效益(反向)
+ round((avg_tokens / max_tokens) * 100, 2) if max_tokens > 0 else 0, # Token容量
+ ]
+ })
+ period_stats[RADAR_CHART_MODEL_EFFICIENCY] = {
+ "labels": ["请求量", "TPS", "响应速度", "成本效益", "Token容量"],
+ "datasets": radar_data
+ }
+
+ # 4. 供应商请求占比环形图
+ provider_requests = period_stats[REQ_CNT_BY_PROVIDER]
+ if provider_requests:
+ sorted_provider_reqs = sorted(provider_requests.items(), key=lambda item: item[1], reverse=True)
+ period_stats[DOUGHNUT_CHART_PROVIDER_REQUESTS] = {
+ "labels": [item[0] for item in sorted_provider_reqs],
+ "data": [item[1] for item in sorted_provider_reqs],
+ }
+
+ # 5. 平均响应时间条形图
+ if model_names:
+ sorted_by_time = sorted(
+ [(m, period_stats[AVG_TIME_COST_BY_MODEL].get(m, 0)) for m in model_names],
+ key=lambda x: x[1],
+ reverse=True
+ )
+ period_stats[BAR_CHART_AVG_RESPONSE_TIME] = {
+ "labels": [item[0] for item in sorted_by_time],
+ "data": [round(item[1], 3) for item in sorted_by_time],
+ }
return stats
@staticmethod
diff --git a/src/chat/utils/statistic_keys.py b/src/chat/utils/statistic_keys.py
index 2a552ac1a..3233ae8c8 100644
--- a/src/chat/utils/statistic_keys.py
+++ b/src/chat/utils/statistic_keys.py
@@ -59,5 +59,30 @@ STD_TIME_COST_BY_PROVIDER = "std_time_costs_by_provider"
# 新增饼图和条形图数据
PIE_CHART_COST_BY_PROVIDER = "pie_chart_cost_by_provider"
PIE_CHART_REQ_BY_PROVIDER = "pie_chart_req_by_provider"
+PIE_CHART_COST_BY_MODULE = "pie_chart_cost_by_module"
BAR_CHART_COST_BY_MODEL = "bar_chart_cost_by_model"
BAR_CHART_REQ_BY_MODEL = "bar_chart_req_by_model"
+
+# 新增更多图表数据
+BAR_CHART_TOKEN_COMPARISON = "bar_chart_token_comparison" # Token输入输出对比图
+SCATTER_CHART_RESPONSE_TIME = "scatter_chart_response_time" # 响应时间分布散点图
+RADAR_CHART_MODEL_EFFICIENCY = "radar_chart_model_efficiency" # 模型效率雷达图
+HEATMAP_CHAT_ACTIVITY = "heatmap_chat_activity" # 聊天活跃度热力图
+DOUGHNUT_CHART_PROVIDER_REQUESTS = "doughnut_chart_provider_requests" # 供应商请求占比环形图
+LINE_CHART_COST_TREND = "line_chart_cost_trend" # 成本趋势折线图
+BAR_CHART_AVG_RESPONSE_TIME = "bar_chart_avg_response_time" # 平均响应时间条形图
+
+# 新增消息分析指标
+MSG_CNT_BY_USER = "messages_by_user" # 按用户的消息数
+ACTIVE_CHATS_CNT = "active_chats_count" # 活跃聊天数
+MOST_ACTIVE_CHAT = "most_active_chat" # 最活跃的聊天
+AVG_MSG_PER_CHAT = "avg_messages_per_chat" # 平均每个聊天的消息数
+
+# 新增大模型效率指标
+AVG_COST_PER_MSG = "avg_cost_per_message" # 平均每条消息成本
+AVG_TOKENS_PER_MSG = "avg_tokens_per_message" # 平均每条消息Token数
+AVG_TOKENS_PER_REQ = "avg_tokens_per_request" # 平均每次请求Token数
+MSG_TO_REQ_RATIO = "message_to_request_ratio" # 消息/请求比率
+COST_PER_ONLINE_HOUR = "cost_per_online_hour" # 每小时在线成本
+REQ_PER_ONLINE_HOUR = "requests_per_online_hour" # 每小时请求数
+TOKEN_EFFICIENCY = "token_efficiency" # Token效率 (输出/输入比率)
diff --git a/src/chat/utils/templates/charts_tab.html b/src/chat/utils/templates/charts_tab.html
index 52e6c3024..e078f37a2 100644
--- a/src/chat/utils/templates/charts_tab.html
+++ b/src/chat/utils/templates/charts_tab.html
@@ -1,16 +1,33 @@
-
数据图表
-
-
+
📈 数据图表
+
+ show_chart
+ 动态图表: 选择不同的时间范围查看数据趋势变化
+
+
+
+
+ schedule
+ 时间范围:
+
-
\ No newline at end of file
diff --git a/src/chat/utils/templates/report.css b/src/chat/utils/templates/report.css
index 2229be785..b9fd0220f 100644
--- a/src/chat/utils/templates/report.css
+++ b/src/chat/utils/templates/report.css
@@ -1,199 +1,385 @@
-/* General Body Styles */
+/* Modern Dashboard Theme - 2025 Edition */
+:root {
+ /* Core Colors */
+ --primary-color: #2563eb;
+ --primary-light: #eff6ff;
+ --primary-dark: #1e40af;
+ --secondary-color: #64748b;
+ --success-color: #10b981;
+ --warning-color: #f59e0b;
+ --danger-color: #ef4444;
+
+ /* Backgrounds */
+ --bg-body: #f8fafc;
+ --bg-card: #ffffff;
+ --bg-sidebar: #ffffff;
+
+ /* Text */
+ --text-primary: #0f172a;
+ --text-secondary: #475569;
+ --text-muted: #94a3b8;
+
+ /* Borders & Shadows */
+ --border-color: #e2e8f0;
+ --shadow-sm: 0 1px 2px 0 rgb(0 0 0 / 0.05);
+ --shadow-md: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
+ --shadow-lg: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1);
+
+ /* Layout */
+ --radius-lg: 1rem;
+ --radius-md: 0.75rem;
+ --radius-sm: 0.5rem;
+}
+
+/* Reset & Base Styles */
+* {
+ box-sizing: border-box;
+}
+
body {
- font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
+ font-family: 'Inter', 'Roboto', -apple-system, BlinkMacSystemFont, sans-serif;
margin: 0;
- padding: 20px;
- background-color: #f0f4f8; /* Light blue-gray background */
- color: #333; /* Darker text for better contrast */
- line-height: 1.6;
+ padding: 0;
+ background-color: var(--bg-body);
+ color: var(--text-primary);
+ line-height: 1.5;
+ -webkit-font-smoothing: antialiased;
}
-/* Main Container */
+/* Layout Container */
.container {
- max-width: 95%; /* Make container almost full-width */
- margin: 20px auto;
- background-color: #FFFFFF; /* Pure white background */
- padding: 30px;
- border-radius: 12px; /* Slightly more rounded corners */
- box-shadow: 0 8px 25px rgba(0, 0, 0, 0.07); /* Softer, deeper shadow */
-}
-/* Dashboard Layout */
-.dashboard-layout {
- display: flex;
- gap: 30px;
-}
-
-.main-content {
- flex: 65%;
- min-width: 0;
-}
-
-.sidebar-content {
- flex: 35%;
- min-width: 0;
-}
-
-/* Responsive Design for Mobile */
-@media (max-width: 992px) {
- .dashboard-layout {
- flex-direction: column;
- }
- .main-content, .sidebar-content {
- flex: 1;
- }
-}
-
-/* Typography */
-h1, h2 {
- color: #212529;
- padding-bottom: 10px;
- margin-top: 0;
+ max-width: 1600px;
+ margin: 0 auto;
+ padding: 2rem;
}
+/* Header Section */
h1 {
- text-align: center;
- font-size: 2.2em;
- margin-bottom: 20px;
- color: #2A6CB5; /* A deeper, more professional blue */
+ font-size: 2rem;
+ font-weight: 700;
+ color: var(--text-primary);
+ margin-bottom: 0.5rem;
+ text-align: left;
+ letter-spacing: -0.025em;
}
-h2 {
- font-size: 1.5em;
- margin-top: 40px;
- margin-bottom: 15px;
- border-bottom: 2px solid #DDE6ED; /* Lighter border color */
-}
-
-/* Info Banners */
-.info-item {
- background-color: #E9F2FA; /* Light blue background */
- padding: 12px 18px;
- border-radius: 8px;
- margin-bottom: 20px;
- font-size: 0.95em;
- border: 1px solid #D1E3F4; /* Light blue border */
-}
-
-.info-item strong {
- color: #2A6CB5; /* Deeper blue for emphasis */
-}
-
-/* Tabs */
-.tabs {
- border-bottom: 2px solid #DEE2E6;
+.header-meta {
display: flex;
- margin-bottom: 20px;
+ align-items: center;
+ gap: 1rem;
+ margin-bottom: 2rem;
+ color: var(--text-secondary);
+ font-size: 0.875rem;
+}
+
+.info-item {
+ background: var(--primary-light);
+ color: var(--primary-dark);
+ padding: 0.75rem 1.25rem;
+ border-radius: var(--radius-md);
+ font-weight: 500;
+ display: inline-flex;
+ align-items: center;
+ gap: 0.5rem;
+ border: 1px solid rgba(37, 99, 235, 0.1);
+}
+
+/* Navigation Tabs */
+.tabs {
+ display: flex;
+ gap: 0.5rem;
+ margin-bottom: 2rem;
+ border-bottom: 1px solid var(--border-color);
+ padding-bottom: 1px;
+ overflow-x: auto;
+ scrollbar-width: none; /* Firefox */
+}
+
+.tabs::-webkit-scrollbar {
+ display: none; /* Chrome/Safari */
}
.tabs button {
- background: none;
+ background: transparent;
border: none;
- outline: none;
- padding: 14px 20px;
+ padding: 0.75rem 1.25rem;
+ font-size: 0.95rem;
+ font-weight: 500;
+ color: var(--text-secondary);
cursor: pointer;
- transition: all 0.3s ease;
- font-size: 16px;
- color: #6C757D;
- border-bottom: 3px solid transparent;
- margin-bottom: -2px; /* Align with container border */
+ border-radius: var(--radius-md) var(--radius-md) 0 0;
+ transition: all 0.2s ease;
+ position: relative;
+ white-space: nowrap;
}
.tabs button:hover {
- color: #2A6CB5;
- background-color: #f0f4f8; /* Subtle hover background */
+ color: var(--primary-color);
+ background-color: rgba(37, 99, 235, 0.05);
}
.tabs button.active {
- color: #2A6CB5; /* Active tab color */
- border-bottom-color: #2A6CB5; /* Active tab border color */
+ color: var(--primary-color);
+ font-weight: 600;
}
-.tab-content {
- display: none;
- padding-top: 10px;
+.tabs button.active::after {
+ content: '';
+ position: absolute;
+ bottom: -1px;
+ left: 0;
+ right: 0;
+ height: 2px;
+ background-color: var(--primary-color);
}
-.tab-content.active {
- display: block;
-}
-
-/* Summary Cards */
+/* Summary Cards Grid */
.summary-cards {
display: grid;
- grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
- gap: 20px;
- margin: 20px 0;
+ grid-template-columns: repeat(auto-fit, minmax(240px, 1fr));
+ gap: 1.5rem;
+ margin-bottom: 2rem;
}
.card {
- background-color: #FFFFFF;
- padding: 20px;
- border-radius: 8px;
- text-align: center;
- border: 1px solid #DDE6ED; /* Lighter border */
- transition: all 0.3s ease;
+ background: var(--bg-card);
+ padding: 1.5rem;
+ border-radius: var(--radius-lg);
+ box-shadow: var(--shadow-sm);
+ border: 1px solid var(--border-color);
+ transition: transform 0.2s ease, box-shadow 0.2s ease;
+ display: flex;
+ flex-direction: column;
+ justify-content: space-between;
}
.card:hover {
- transform: translateY(-5px);
- box-shadow: 0 6px 15px rgba(0,0,0,0.08);
+ transform: translateY(-2px);
+ box-shadow: var(--shadow-md);
}
.card h3 {
- margin: 0 0 10px;
- font-size: 1em;
- color: #6C757D;
+ font-size: 0.875rem;
+ font-weight: 500;
+ color: var(--text-secondary);
+ margin: 0 0 0.5rem 0;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
}
.card p {
+ font-size: 1.75rem;
+ font-weight: 700;
+ color: var(--text-primary);
margin: 0;
- font-size: 1.8em;
- font-weight: bold;
- color: #212529;
+ line-height: 1.2;
+}
+
+/* Dashboard Layout */
+.dashboard-layout {
+ display: grid;
+ grid-template-columns: 1fr 350px;
+ gap: 2rem;
+ align-items: start;
+}
+
+.main-content {
+ min-width: 0; /* Prevent grid blowout */
+}
+
+.sidebar-content {
+ background: var(--bg-sidebar);
+ padding: 1.5rem;
+ border-radius: var(--radius-lg);
+ border: 1px solid var(--border-color);
+ box-shadow: var(--shadow-sm);
+ position: sticky;
+ top: 2rem;
+ max-height: calc(100vh - 4rem);
+ overflow-y: auto;
+}
+
+/* Charts */
+.chart-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(450px, 1fr));
+ gap: 1.5rem;
+ margin-bottom: 2rem;
+}
+
+.chart-container, .chart-wrapper {
+ background: var(--bg-card);
+ padding: 1.5rem;
+ border-radius: var(--radius-lg);
+ border: 1px solid var(--border-color);
+ box-shadow: var(--shadow-sm);
+ position: relative;
+ height: auto;
+ min-height: 350px;
+ max-height: 600px;
+ overflow: hidden;
+}
+
+.chart-container > div, .chart-wrapper > div {
+ max-width: 100%;
+ max-height: 100%;
+}
+
+.chart-container h3, .chart-wrapper h3 {
+ margin-top: 0;
+ font-size: 1.1rem;
+ color: var(--text-primary);
+ margin-bottom: 1rem;
}
/* Tables */
+.table-container {
+ background: var(--bg-card);
+ border-radius: var(--radius-lg);
+ border: 1px solid var(--border-color);
+ box-shadow: var(--shadow-sm);
+ overflow: hidden;
+ margin-bottom: 2rem;
+}
+
table {
width: 100%;
border-collapse: collapse;
- margin-top: 15px;
- font-size: 0.9em;
+ font-size: 0.875rem;
}
-th, td {
- padding: 12px 15px;
- text-align: left;
- border-bottom: 1px solid #EAEAEA;
-}
th {
- background-color: #4A90E2; /* Main theme blue */
+ background: var(--bg-body);
+ padding: 1rem;
+ text-align: left;
+ font-weight: 600;
+ color: var(--text-secondary);
+ border-bottom: 1px solid var(--border-color);
+ white-space: nowrap;
+}
+
+td {
+ padding: 1rem;
+ border-bottom: 1px solid var(--border-color);
+ color: var(--text-primary);
+}
+
+tr:last-child td {
+ border-bottom: none;
+}
+
+tr:hover td {
+ background-color: var(--primary-light);
+}
+
+/* Section Headers */
+h2 {
+ font-size: 1.5rem;
+ font-weight: 600;
+ color: var(--text-primary);
+ margin: 2.5rem 0 1.5rem;
+ display: flex;
+ align-items: center;
+ gap: 0.75rem;
+}
+
+h2::before {
+ content: '';
+ display: block;
+ width: 4px;
+ height: 24px;
+ background: var(--primary-color);
+ border-radius: 2px;
+}
+
+/* Time Range Buttons */
+.time-range-controls {
+ display: flex;
+ justify-content: center;
+ gap: 0.5rem;
+ margin-bottom: 1.5rem;
+}
+
+.time-range-btn {
+ background: var(--bg-card);
+ border: 1px solid var(--border-color);
+ color: var(--text-secondary);
+ padding: 0.5rem 1rem;
+ border-radius: 2rem;
+ font-size: 0.875rem;
+ font-weight: 500;
+ cursor: pointer;
+ transition: all 0.2s;
+}
+
+.time-range-btn:hover {
+ border-color: var(--primary-color);
+ color: var(--primary-color);
+}
+
+.time-range-btn.active {
+ background: var(--primary-color);
color: white;
- font-weight: bold;
- font-size: 0.95em;
- text-transform: uppercase;
- letter-spacing: 0.5px;
-}
-
-tr:nth-child(even) {
- background-color: #F7FAFC; /* Very light blue for alternate rows */
-}
-
-tr:hover {
- background-color: #E9F2FA; /* Light blue for hover */
-}
-
-/* Chart Container in Sidebar */
-.chart-container {
- position: relative;
- height: 300px; /* Adjust height as needed */
- width: 100%;
- margin-bottom: 20px;
+ border-color: var(--primary-color);
+ box-shadow: 0 2px 4px rgba(37, 99, 235, 0.2);
}
/* Footer */
.footer {
text-align: center;
- margin-top: 40px;
- font-size: 0.85em;
- color: #6C757D;
+ margin-top: 4rem;
+ padding-top: 2rem;
+ border-top: 1px solid var(--border-color);
+ color: var(--text-muted);
+ font-size: 0.875rem;
+}
+
+/* Animations */
+@keyframes fadeIn {
+ from { opacity: 0; transform: translateY(10px); }
+ to { opacity: 1; transform: translateY(0); }
+}
+
+.tab-content {
+ display: none;
+}
+
+.tab-content.active {
+ display: block;
+ animation: fadeIn 0.4s ease-out;
+}
+
+/* Responsive */
+@media (max-width: 1200px) {
+ .dashboard-layout {
+ grid-template-columns: 1fr;
+ }
+
+ .sidebar-content {
+ position: static;
+ max-height: none;
+ margin-top: 2rem;
+ }
+}
+
+@media (max-width: 768px) {
+ .container {
+ padding: 1rem;
+ }
+
+ .chart-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .summary-cards {
+ grid-template-columns: 1fr 1fr;
+ }
+
+ h1 {
+ font-size: 1.5rem;
+ }
+
+ .table-container {
+ overflow-x: auto;
+ }
}
diff --git a/src/chat/utils/templates/report.html b/src/chat/utils/templates/report.html
index b70146063..6fa4cacda 100644
--- a/src/chat/utils/templates/report.html
+++ b/src/chat/utils/templates/report.html
@@ -4,20 +4,31 @@
{{ report_title }}
-
+
+
+
+
+
{{ report_title }}
-
统计截止时间: {{ generation_time }}
+
{{ tab_list }}
{{ tab_content }}
+
-
+
+