style: 统一代码风格并进行现代化改进

对整个代码库进行了一次全面的风格统一和现代化改进。主要变更包括:

- 将 `hasattr` 等内置函数中的字符串参数从单引号 `'` 统一为双引号 `"`。
- 采用现代类型注解,例如将 `Optional[T]` 替换为 `T | None`,`List[T]` 替换为 `list[T]` 等。
- 移除不再需要的 Python 2 兼容性声明 `# -*- coding: utf-8 -*-`。
- 清理了多余的空行、注释和未使用的导入。
- 统一了文件末尾的换行符。
- 优化了部分日志输出和字符串格式化 (`f"{e!s}"`)。

这些改动旨在提升代码的可读性、一致性和可维护性,使其更符合现代 Python 编码规范。
This commit is contained in:
minecraft1024a
2025-10-05 13:21:27 +08:00
parent ad613a180b
commit 2c74b472ab
47 changed files with 274 additions and 287 deletions

8
bot.py
View File

@@ -103,7 +103,7 @@ async def graceful_shutdown(main_system_instance):
logger.info("正在优雅关闭麦麦...")
# 停止MainSystem中的组件它会处理服务器等
if main_system_instance and hasattr(main_system_instance, 'shutdown'):
if main_system_instance and hasattr(main_system_instance, "shutdown"):
logger.info("正在关闭MainSystem...")
await main_system_instance.shutdown()
@@ -111,7 +111,7 @@ async def graceful_shutdown(main_system_instance):
try:
from src.chat.message_receive.chat_stream import get_chat_manager
chat_manager = get_chat_manager()
if hasattr(chat_manager, '_stop_auto_save'):
if hasattr(chat_manager, "_stop_auto_save"):
logger.info("正在停止聊天管理器...")
chat_manager._stop_auto_save()
except Exception as e:
@@ -120,7 +120,7 @@ async def graceful_shutdown(main_system_instance):
# 停止情绪管理器
try:
from src.mood.mood_manager import mood_manager
if hasattr(mood_manager, 'stop'):
if hasattr(mood_manager, "stop"):
logger.info("正在停止情绪管理器...")
await mood_manager.stop()
except Exception as e:
@@ -129,7 +129,7 @@ async def graceful_shutdown(main_system_instance):
# 停止记忆系统
try:
from src.chat.memory_system.memory_manager import memory_manager
if hasattr(memory_manager, 'shutdown'):
if hasattr(memory_manager, "shutdown"):
logger.info("正在停止记忆系统...")
await memory_manager.shutdown()
except Exception as e:

View File

@@ -3,10 +3,10 @@ from typing import Literal
from fastapi import APIRouter, HTTPException, Query
from src.config.config import global_config
from src.plugin_system.apis import message_api, chat_api, person_api
from src.chat.message_receive.chat_stream import get_chat_manager
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.apis import message_api, person_api
logger = get_logger("HTTP消息API")

View File

@@ -112,7 +112,7 @@ class InterestManager:
# 返回默认结果
return InterestCalculationResult(
success=False,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.3,
error_message="没有可用的兴趣值计算组件"
)
@@ -129,7 +129,7 @@ class InterestManager:
logger.warning(f"兴趣值计算超时 ({timeout}s),消息 {getattr(message, 'message_id', '')} 使用默认兴趣值 0.5")
return InterestCalculationResult(
success=True,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.5, # 固定默认兴趣值
should_reply=False,
should_act=False,
@@ -140,9 +140,9 @@ class InterestManager:
logger.error(f"兴趣值计算异常: {e}")
return InterestCalculationResult(
success=False,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.3,
error_message=f"计算异常: {str(e)}"
error_message=f"计算异常: {e!s}"
)
async def _async_calculate(self, message: "DatabaseMessages") -> InterestCalculationResult:
@@ -168,9 +168,9 @@ class InterestManager:
logger.error(f"兴趣值计算异常: {e}", exc_info=True)
return InterestCalculationResult(
success=False,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.0,
error_message=f"计算异常: {str(e)}",
error_message=f"计算异常: {e!s}",
calculation_time=time.time() - start_time
)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
海马体双峰分布采样器
基于旧版海马体的采样策略,适配新版记忆系统
@@ -8,16 +7,15 @@
import asyncio
import random
import time
from datetime import datetime, timedelta
from typing import List, Optional, Tuple, Dict, Any
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Any
import numpy as np
import orjson
from src.chat.utils.chat_message_builder import (
get_raw_msg_by_timestamp,
build_readable_messages,
get_raw_msg_by_timestamp,
get_raw_msg_by_timestamp_with_chat,
)
from src.chat.utils.utils import translate_timestamp_to_human_readable
@@ -47,7 +45,7 @@ class HippocampusSampleConfig:
batch_size: int = 5 # 批处理大小
@classmethod
def from_global_config(cls) -> 'HippocampusSampleConfig':
def from_global_config(cls) -> "HippocampusSampleConfig":
"""从全局配置创建海马体采样配置"""
config = global_config.memory.hippocampus_distribution_config
return cls(
@@ -74,12 +72,12 @@ class HippocampusSampler:
self.is_running = False
# 记忆构建模型
self.memory_builder_model: Optional[LLMRequest] = None
self.memory_builder_model: LLMRequest | None = None
# 统计信息
self.sample_count = 0
self.success_count = 0
self.last_sample_results: List[Dict[str, Any]] = []
self.last_sample_results: list[dict[str, Any]] = []
async def initialize(self):
"""初始化采样器"""
@@ -101,7 +99,7 @@ class HippocampusSampler:
logger.error(f"❌ 海马体采样器初始化失败: {e}")
raise
def generate_time_samples(self) -> List[datetime]:
def generate_time_samples(self) -> list[datetime]:
"""生成双峰分布的时间采样点"""
# 计算每个分布的样本数
recent_samples = max(1, int(self.config.total_samples * self.config.recent_weight))
@@ -132,7 +130,7 @@ class HippocampusSampler:
# 按时间排序(从最早到最近)
return sorted(timestamps)
async def collect_message_samples(self, target_timestamp: float) -> Optional[List[Dict[str, Any]]]:
async def collect_message_samples(self, target_timestamp: float) -> list[dict[str, Any]] | None:
"""收集指定时间戳附近的消息样本"""
try:
# 随机时间窗口5-30分钟
@@ -190,7 +188,7 @@ class HippocampusSampler:
logger.error(f"收集消息样本失败: {e}")
return None
async def build_memory_from_samples(self, messages: List[Dict[str, Any]], target_timestamp: float) -> Optional[str]:
async def build_memory_from_samples(self, messages: list[dict[str, Any]], target_timestamp: float) -> str | None:
"""从消息样本构建记忆"""
if not messages or not self.memory_system or not self.memory_builder_model:
return None
@@ -262,7 +260,7 @@ class HippocampusSampler:
logger.error(f"海马体采样构建记忆失败: {e}")
return None
async def perform_sampling_cycle(self) -> Dict[str, Any]:
async def perform_sampling_cycle(self) -> dict[str, Any]:
"""执行一次完整的采样周期(优化版:批量融合构建)"""
if not self.should_sample():
return {"status": "skipped", "reason": "interval_not_met"}
@@ -363,7 +361,7 @@ class HippocampusSampler:
"duration": time.time() - start_time,
}
async def _collect_all_message_samples(self, time_samples: List[datetime]) -> List[List[Dict[str, Any]]]:
async def _collect_all_message_samples(self, time_samples: list[datetime]) -> list[list[dict[str, Any]]]:
"""批量收集所有时间点的消息样本"""
collected_messages = []
max_concurrent = min(5, len(time_samples)) # 提高并发数到5
@@ -394,7 +392,7 @@ class HippocampusSampler:
return collected_messages
async def _fuse_and_deduplicate_messages(self, collected_messages: List[List[Dict[str, Any]]]) -> List[List[Dict[str, Any]]]:
async def _fuse_and_deduplicate_messages(self, collected_messages: list[list[dict[str, Any]]]) -> list[list[dict[str, Any]]]:
"""融合和去重消息样本"""
if not collected_messages:
return []
@@ -450,7 +448,7 @@ class HippocampusSampler:
# 返回原始消息组作为备选
return collected_messages[:5] # 限制返回数量
def _merge_adjacent_messages(self, messages: List[Dict[str, Any]], time_gap: int = 1800) -> List[List[Dict[str, Any]]]:
def _merge_adjacent_messages(self, messages: list[dict[str, Any]], time_gap: int = 1800) -> list[list[dict[str, Any]]]:
"""合并时间间隔内的消息"""
if not messages:
return []
@@ -481,7 +479,7 @@ class HippocampusSampler:
return result_groups
async def _build_batch_memory(self, fused_messages: List[List[Dict[str, Any]]], time_samples: List[datetime]) -> Dict[str, Any]:
async def _build_batch_memory(self, fused_messages: list[list[dict[str, Any]]], time_samples: list[datetime]) -> dict[str, Any]:
"""批量构建记忆"""
if not fused_messages:
return {"memory_count": 0, "memories": []}
@@ -557,7 +555,7 @@ class HippocampusSampler:
logger.error(f"批量构建记忆失败: {e}")
return {"memory_count": 0, "error": str(e)}
async def _build_fused_conversation_text(self, fused_messages: List[List[Dict[str, Any]]]) -> str:
async def _build_fused_conversation_text(self, fused_messages: list[list[dict[str, Any]]]) -> str:
"""构建融合后的对话文本"""
try:
# 添加批次标识
@@ -589,7 +587,7 @@ class HippocampusSampler:
logger.error(f"构建融合文本失败: {e}")
return ""
async def _fallback_individual_build(self, fused_messages: List[List[Dict[str, Any]]]) -> Dict[str, Any]:
async def _fallback_individual_build(self, fused_messages: list[list[dict[str, Any]]]) -> dict[str, Any]:
"""备选方案:单独构建每个消息组"""
total_memories = []
total_count = 0
@@ -609,7 +607,7 @@ class HippocampusSampler:
"fallback_mode": True
}
async def process_sample_timestamp(self, target_timestamp: float) -> Optional[str]:
async def process_sample_timestamp(self, target_timestamp: float) -> str | None:
"""处理单个时间戳采样(保留作为备选方法)"""
try:
# 收集消息样本
@@ -676,7 +674,7 @@ class HippocampusSampler:
self.is_running = False
logger.info("🛑 停止海马体后台采样任务")
def get_sampling_stats(self) -> Dict[str, Any]:
def get_sampling_stats(self) -> dict[str, Any]:
"""获取采样统计信息"""
success_rate = (self.success_count / self.sample_count * 100) if self.sample_count > 0 else 0
@@ -713,7 +711,7 @@ class HippocampusSampler:
# 全局海马体采样器实例
_hippocampus_sampler: Optional[HippocampusSampler] = None
_hippocampus_sampler: HippocampusSampler | None = None
def get_hippocampus_sampler(memory_system=None) -> HippocampusSampler:

View File

@@ -32,7 +32,7 @@ import time
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Any, Type, TypeVar
from typing import Any, TypeVar
E = TypeVar("E", bound=Enum)
@@ -503,7 +503,7 @@ class MemoryBuilder:
logger.warning(f"无法解析未知的记忆类型 '{type_str}',回退到上下文类型")
return MemoryType.CONTEXTUAL
def _parse_enum_value(self, enum_cls: Type[E], raw_value: Any, default: E, field_name: str) -> E:
def _parse_enum_value(self, enum_cls: type[E], raw_value: Any, default: E, field_name: str) -> E:
"""解析枚举值,兼容数字/字符串表示"""
if isinstance(raw_value, enum_cls):
return raw_value

View File

@@ -556,11 +556,11 @@ class MemorySystem:
context = dict(context or {})
# 获取配置的采样模式
sampling_mode = getattr(global_config.memory, 'memory_sampling_mode', 'precision')
sampling_mode = getattr(global_config.memory, "memory_sampling_mode", "precision")
current_mode = MemorySamplingMode(sampling_mode)
context['__sampling_mode'] = current_mode.value
context["__sampling_mode"] = current_mode.value
logger.debug(f"使用记忆采样模式: {current_mode.value}")
# 根据采样模式处理记忆
@@ -636,7 +636,7 @@ class MemorySystem:
# 检查信息价值阈值
value_score = await self._assess_information_value(conversation_text, normalized_context)
threshold = getattr(global_config.memory, 'precision_memory_reply_threshold', 0.5)
threshold = getattr(global_config.memory, "precision_memory_reply_threshold", 0.5)
if value_score < threshold:
logger.debug(f"信息价值评分 {value_score:.2f} 低于阈值 {threshold},跳过记忆构建")
@@ -1614,8 +1614,8 @@ async def initialize_memory_system(llm_model: LLMRequest | None = None):
await memory_system.initialize()
# 根据配置启动海马体采样
sampling_mode = getattr(global_config.memory, 'memory_sampling_mode', 'immediate')
if sampling_mode in ['hippocampus', 'all']:
sampling_mode = getattr(global_config.memory, "memory_sampling_mode", "immediate")
if sampling_mode in ["hippocampus", "all"]:
memory_system.start_hippocampus_sampling()
return memory_system

View File

@@ -4,14 +4,13 @@
"""
import asyncio
import psutil
import time
from typing import Dict, List, Optional, Set, Tuple
from dataclasses import dataclass, field
from enum import Enum
import psutil
from src.common.logger import get_logger
from src.chat.message_receive.chat_stream import ChatStream
logger = get_logger("adaptive_stream_manager")
@@ -71,16 +70,16 @@ class AdaptiveStreamManager:
# 当前状态
self.current_limit = base_concurrent_limit
self.active_streams: Set[str] = set()
self.pending_streams: Set[str] = set()
self.stream_metrics: Dict[str, StreamMetrics] = {}
self.active_streams: set[str] = set()
self.pending_streams: set[str] = set()
self.stream_metrics: dict[str, StreamMetrics] = {}
# 异步信号量
self.semaphore = asyncio.Semaphore(base_concurrent_limit)
self.priority_semaphore = asyncio.Semaphore(5) # 高优先级专用信号量
# 系统监控
self.system_metrics: List[SystemMetrics] = []
self.system_metrics: list[SystemMetrics] = []
self.last_adjustment_time = 0.0
# 统计信息
@@ -95,8 +94,8 @@ class AdaptiveStreamManager:
}
# 监控任务
self.monitor_task: Optional[asyncio.Task] = None
self.adjustment_task: Optional[asyncio.Task] = None
self.monitor_task: asyncio.Task | None = None
self.adjustment_task: asyncio.Task | None = None
self.is_running = False
logger.info(f"自适应流管理器初始化完成 (base_limit={base_concurrent_limit}, max_limit={max_concurrent_limit})")
@@ -443,7 +442,7 @@ class AdaptiveStreamManager:
if hasattr(metrics, key):
setattr(metrics, key, value)
def get_stats(self) -> Dict:
def get_stats(self) -> dict:
"""获取统计信息"""
stats = self.stats.copy()
stats.update({
@@ -465,7 +464,7 @@ class AdaptiveStreamManager:
# 全局自适应管理器实例
_adaptive_manager: Optional[AdaptiveStreamManager] = None
_adaptive_manager: AdaptiveStreamManager | None = None
def get_adaptive_stream_manager() -> AdaptiveStreamManager:

View File

@@ -5,9 +5,9 @@
import asyncio
import time
from typing import Any, Dict, List, Optional
from dataclasses import dataclass, field
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Any
from src.common.database.sqlalchemy_database_api import get_db_session
from src.common.database.sqlalchemy_models import ChatStreams
@@ -21,7 +21,7 @@ logger = get_logger("batch_database_writer")
class StreamUpdatePayload:
"""流更新数据结构"""
stream_id: str
update_data: Dict[str, Any]
update_data: dict[str, Any]
priority: int = 0 # 优先级,数字越大优先级越高
timestamp: float = field(default_factory=time.time)
@@ -47,7 +47,7 @@ class BatchDatabaseWriter:
# 运行状态
self.is_running = False
self.writer_task: Optional[asyncio.Task] = None
self.writer_task: asyncio.Task | None = None
# 统计信息
self.stats = {
@@ -60,7 +60,7 @@ class BatchDatabaseWriter:
}
# 按优先级分类的批次
self.priority_batches: Dict[int, List[StreamUpdatePayload]] = defaultdict(list)
self.priority_batches: dict[int, list[StreamUpdatePayload]] = defaultdict(list)
logger.info(f"批量数据库写入器初始化完成 (batch_size={batch_size}, interval={flush_interval}s)")
@@ -98,7 +98,7 @@ class BatchDatabaseWriter:
async def schedule_stream_update(
self,
stream_id: str,
update_data: Dict[str, Any],
update_data: dict[str, Any],
priority: int = 0
) -> bool:
"""
@@ -166,7 +166,7 @@ class BatchDatabaseWriter:
await self._flush_all_batches()
logger.info("批量写入循环结束")
async def _collect_batch(self) -> List[StreamUpdatePayload]:
async def _collect_batch(self) -> list[StreamUpdatePayload]:
"""收集一个批次的数据"""
batch = []
deadline = time.time() + self.flush_interval
@@ -189,7 +189,7 @@ class BatchDatabaseWriter:
return batch
async def _write_batch(self, batch: List[StreamUpdatePayload]):
async def _write_batch(self, batch: list[StreamUpdatePayload]):
"""批量写入数据库"""
if not batch:
return
@@ -228,7 +228,7 @@ class BatchDatabaseWriter:
except Exception as single_e:
logger.error(f"单个写入也失败: {single_e}")
async def _batch_write_to_database(self, payloads: List[StreamUpdatePayload]):
async def _batch_write_to_database(self, payloads: list[StreamUpdatePayload]):
"""批量写入数据库"""
async with get_db_session() as session:
for payload in payloads:
@@ -268,7 +268,7 @@ class BatchDatabaseWriter:
await session.commit()
async def _direct_write(self, stream_id: str, update_data: Dict[str, Any]):
async def _direct_write(self, stream_id: str, update_data: dict[str, Any]):
"""直接写入数据库(降级方案)"""
async with get_db_session() as session:
if global_config.database.database_type == "sqlite":
@@ -315,7 +315,7 @@ class BatchDatabaseWriter:
if remaining_batch:
await self._write_batch(remaining_batch)
def get_stats(self) -> Dict[str, Any]:
def get_stats(self) -> dict[str, Any]:
"""获取统计信息"""
stats = self.stats.copy()
stats["is_running"] = self.is_running
@@ -324,7 +324,7 @@ class BatchDatabaseWriter:
# 全局批量写入器实例
_batch_writer: Optional[BatchDatabaseWriter] = None
_batch_writer: BatchDatabaseWriter | None = None
def get_batch_writer() -> BatchDatabaseWriter:

View File

@@ -117,7 +117,7 @@ class StreamLoopManager:
# 使用自适应流管理器获取槽位
use_adaptive = False
try:
from src.chat.message_manager.adaptive_stream_manager import get_adaptive_stream_manager, StreamPriority
from src.chat.message_manager.adaptive_stream_manager import get_adaptive_stream_manager
adaptive_manager = get_adaptive_stream_manager()
if adaptive_manager.is_running:
@@ -137,7 +137,7 @@ class StreamLoopManager:
else:
logger.debug(f"自适应管理器拒绝槽位请求: {stream_id},尝试回退方案")
else:
logger.debug(f"自适应管理器未运行,使用原始方法")
logger.debug("自适应管理器未运行,使用原始方法")
except Exception as e:
logger.debug(f"自适应管理器获取槽位失败,使用原始方法: {e}")

View File

@@ -5,13 +5,13 @@
import asyncio
import time
from typing import Dict, List, Optional, Set
from dataclasses import dataclass
from collections import OrderedDict
from dataclasses import dataclass
from maim_message import GroupInfo, UserInfo
from src.common.logger import get_logger
from src.chat.message_receive.optimized_chat_stream import OptimizedChatStream, create_optimized_chat_stream
from src.common.logger import get_logger
logger = get_logger("stream_cache_manager")
@@ -52,14 +52,14 @@ class TieredStreamCache:
# 三层缓存存储
self.hot_cache: OrderedDict[str, OptimizedChatStream] = OrderedDict() # 热数据LRU
self.warm_storage: Dict[str, tuple[OptimizedChatStream, float]] = {} # 温数据(最后访问时间)
self.cold_storage: Dict[str, tuple[OptimizedChatStream, float]] = {} # 冷数据(最后访问时间)
self.warm_storage: dict[str, tuple[OptimizedChatStream, float]] = {} # 温数据(最后访问时间)
self.cold_storage: dict[str, tuple[OptimizedChatStream, float]] = {} # 冷数据(最后访问时间)
# 统计信息
self.stats = StreamCacheStats()
# 清理任务
self.cleanup_task: Optional[asyncio.Task] = None
self.cleanup_task: asyncio.Task | None = None
self.is_running = False
logger.info(f"分层流缓存管理器初始化完成 (hot:{max_hot_size}, warm:{max_warm_size}, cold:{max_cold_size})")
@@ -96,8 +96,8 @@ class TieredStreamCache:
stream_id: str,
platform: str,
user_info: UserInfo,
group_info: Optional[GroupInfo] = None,
data: Optional[Dict] = None,
group_info: GroupInfo | None = None,
data: dict | None = None,
) -> OptimizedChatStream:
"""获取或创建流 - 优化版本"""
current_time = time.time()
@@ -255,7 +255,7 @@ class TieredStreamCache:
hot_to_demote = []
for stream_id, stream in self.hot_cache.items():
# 获取最后访问时间(简化:使用创建时间作为近似)
last_access = getattr(stream, 'last_active_time', stream.create_time)
last_access = getattr(stream, "last_active_time", stream.create_time)
if current_time - last_access > self.hot_timeout:
hot_to_demote.append(stream_id)
@@ -341,7 +341,7 @@ class TieredStreamCache:
logger.info("所有缓存已清空")
async def get_stream_snapshot(self, stream_id: str) -> Optional[OptimizedChatStream]:
async def get_stream_snapshot(self, stream_id: str) -> OptimizedChatStream | None:
"""获取流的快照(不修改缓存状态)"""
if stream_id in self.hot_cache:
return self.hot_cache[stream_id].create_snapshot()
@@ -351,13 +351,13 @@ class TieredStreamCache:
return self.cold_storage[stream_id][0].create_snapshot()
return None
def get_cached_stream_ids(self) -> Set[str]:
def get_cached_stream_ids(self) -> set[str]:
"""获取所有缓存的流ID"""
return set(self.hot_cache.keys()) | set(self.warm_storage.keys()) | set(self.cold_storage.keys())
# 全局缓存管理器实例
_cache_manager: Optional[TieredStreamCache] = None
_cache_manager: TieredStreamCache | None = None
def get_stream_cache_manager() -> TieredStreamCache:

View File

@@ -313,11 +313,11 @@ class ChatStream:
except Exception as e:
logger.error(f"计算消息兴趣值失败: {e}", exc_info=True)
# 异常情况下使用默认值
if hasattr(db_message, 'interest_value'):
if hasattr(db_message, "interest_value"):
db_message.interest_value = 0.3
if hasattr(db_message, 'should_reply'):
if hasattr(db_message, "should_reply"):
db_message.should_reply = False
if hasattr(db_message, 'should_act'):
if hasattr(db_message, "should_act"):
db_message.should_act = False
def _extract_reply_from_segment(self, segment) -> str | None:
@@ -894,10 +894,10 @@ def _convert_to_original_stream(self, optimized_stream) -> "ChatStream":
original_stream.saved = optimized_stream.saved
# 复制上下文信息(如果存在)
if hasattr(optimized_stream, '_stream_context') and optimized_stream._stream_context:
if hasattr(optimized_stream, "_stream_context") and optimized_stream._stream_context:
original_stream.stream_context = optimized_stream._stream_context
if hasattr(optimized_stream, '_context_manager') and optimized_stream._context_manager:
if hasattr(optimized_stream, "_context_manager") and optimized_stream._context_manager:
original_stream.context_manager = optimized_stream._context_manager
return original_stream

View File

@@ -3,17 +3,12 @@
避免不必要的深拷贝开销,提升多流并发性能
"""
import asyncio
import copy
import hashlib
import time
from typing import TYPE_CHECKING, Any, Dict, Optional
from typing import TYPE_CHECKING, Any
from maim_message import GroupInfo, UserInfo
from rich.traceback import install
from src.common.database.sqlalchemy_database_api import get_db_session
from src.common.database.sqlalchemy_models import ChatStreams
from src.common.logger import get_logger
from src.config.config import global_config
@@ -28,7 +23,7 @@ logger = get_logger("optimized_chat_stream")
class SharedContext:
"""共享上下文数据 - 只读数据结构"""
def __init__(self, stream_id: str, platform: str, user_info: UserInfo, group_info: Optional[GroupInfo] = None):
def __init__(self, stream_id: str, platform: str, user_info: UserInfo, group_info: GroupInfo | None = None):
self.stream_id = stream_id
self.platform = platform
self.user_info = user_info
@@ -37,7 +32,7 @@ class SharedContext:
self._frozen = True
def __setattr__(self, name, value):
if hasattr(self, '_frozen') and self._frozen and name not in ['_frozen']:
if hasattr(self, "_frozen") and self._frozen and name not in ["_frozen"]:
raise AttributeError(f"SharedContext is frozen, cannot modify {name}")
super().__setattr__(name, value)
@@ -46,7 +41,7 @@ class LocalChanges:
"""本地修改跟踪器"""
def __init__(self):
self._changes: Dict[str, Any] = {}
self._changes: dict[str, Any] = {}
self._dirty = False
def set_change(self, key: str, value: Any):
@@ -62,7 +57,7 @@ class LocalChanges:
"""是否有修改"""
return self._dirty
def get_changes(self) -> Dict[str, Any]:
def get_changes(self) -> dict[str, Any]:
"""获取所有修改"""
return self._changes.copy()
@@ -80,8 +75,8 @@ class OptimizedChatStream:
stream_id: str,
platform: str,
user_info: UserInfo,
group_info: Optional[GroupInfo] = None,
data: Optional[Dict] = None,
group_info: GroupInfo | None = None,
data: dict | None = None,
):
# 共享的只读数据
self._shared_context = SharedContext(
@@ -129,42 +124,42 @@ class OptimizedChatStream:
"""修改用户信息时触发写时复制"""
self._ensure_copy_on_write()
# 由于SharedContext是frozen的我们需要在本地修改中记录
self._local_changes.set_change('user_info', value)
self._local_changes.set_change("user_info", value)
@property
def group_info(self) -> Optional[GroupInfo]:
if self._local_changes.has_changes() and 'group_info' in self._local_changes._changes:
return self._local_changes.get_change('group_info')
def group_info(self) -> GroupInfo | None:
if self._local_changes.has_changes() and "group_info" in self._local_changes._changes:
return self._local_changes.get_change("group_info")
return self._shared_context.group_info
@group_info.setter
def group_info(self, value: Optional[GroupInfo]):
def group_info(self, value: GroupInfo | None):
"""修改群组信息时触发写时复制"""
self._ensure_copy_on_write()
self._local_changes.set_change('group_info', value)
self._local_changes.set_change("group_info", value)
@property
def create_time(self) -> float:
if self._local_changes.has_changes() and 'create_time' in self._local_changes._changes:
return self._local_changes.get_change('create_time')
if self._local_changes.has_changes() and "create_time" in self._local_changes._changes:
return self._local_changes.get_change("create_time")
return self._shared_context.create_time
@property
def last_active_time(self) -> float:
return self._local_changes.get_change('last_active_time', self.create_time)
return self._local_changes.get_change("last_active_time", self.create_time)
@last_active_time.setter
def last_active_time(self, value: float):
self._local_changes.set_change('last_active_time', value)
self._local_changes.set_change("last_active_time", value)
self.saved = False
@property
def sleep_pressure(self) -> float:
return self._local_changes.get_change('sleep_pressure', 0.0)
return self._local_changes.get_change("sleep_pressure", 0.0)
@sleep_pressure.setter
def sleep_pressure(self, value: float):
self._local_changes.set_change('sleep_pressure', value)
self._local_changes.set_change("sleep_pressure", value)
self.saved = False
def _ensure_copy_on_write(self):
@@ -176,14 +171,14 @@ class OptimizedChatStream:
def _get_effective_user_info(self) -> UserInfo:
"""获取有效的用户信息"""
if self._local_changes.has_changes() and 'user_info' in self._local_changes._changes:
return self._local_changes.get_change('user_info')
if self._local_changes.has_changes() and "user_info" in self._local_changes._changes:
return self._local_changes.get_change("user_info")
return self._shared_context.user_info
def _get_effective_group_info(self) -> Optional[GroupInfo]:
def _get_effective_group_info(self) -> GroupInfo | None:
"""获取有效的群组信息"""
if self._local_changes.has_changes() and 'group_info' in self._local_changes._changes:
return self._local_changes.get_change('group_info')
if self._local_changes.has_changes() and "group_info" in self._local_changes._changes:
return self._local_changes.get_change("group_info")
return self._shared_context.group_info
def update_active_time(self):
@@ -199,6 +194,7 @@ class OptimizedChatStream:
# 将MessageRecv转换为DatabaseMessages并设置到stream_context
import json
from src.common.data_models.database_data_model import DatabaseMessages
message_info = getattr(message, "message_info", {})
@@ -298,7 +294,7 @@ class OptimizedChatStream:
self._create_stream_context()
return self._context_manager
def to_dict(self) -> Dict[str, Any]:
def to_dict(self) -> dict[str, Any]:
"""转换为字典格式 - 考虑本地修改"""
user_info = self._get_effective_user_info()
group_info = self._get_effective_group_info()
@@ -319,7 +315,7 @@ class OptimizedChatStream:
}
@classmethod
def from_dict(cls, data: Dict) -> "OptimizedChatStream":
def from_dict(cls, data: dict) -> "OptimizedChatStream":
"""从字典创建实例"""
user_info = UserInfo.from_dict(data.get("user_info", {})) if data.get("user_info") else None
group_info = GroupInfo.from_dict(data.get("group_info", {})) if data.get("group_info") else None
@@ -481,8 +477,8 @@ def create_optimized_chat_stream(
stream_id: str,
platform: str,
user_info: UserInfo,
group_info: Optional[GroupInfo] = None,
data: Optional[Dict] = None,
group_info: GroupInfo | None = None,
data: dict | None = None,
) -> OptimizedChatStream:
"""创建优化版聊天流实例"""
return OptimizedChatStream(

View File

@@ -15,7 +15,7 @@ from src.plugin_system.base.component_types import ActionActivationType, ActionI
from src.plugin_system.core.global_announcement_manager import global_announcement_manager
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
pass
logger = get_logger("action_manager")

View File

@@ -9,8 +9,8 @@ import time
from collections import defaultdict
from pathlib import Path
import rjieba
import orjson
import rjieba
from pypinyin import Style, pinyin
from src.common.logger import get_logger

View File

@@ -6,8 +6,8 @@ import time
from collections import Counter
from typing import Any
import rjieba
import numpy as np
import rjieba
from maim_message import UserInfo
from src.chat.message_receive.chat_stream import get_chat_manager

View File

@@ -5,9 +5,8 @@
import asyncio
import time
import weakref
from contextlib import asynccontextmanager
from typing import Any, Dict, Optional, Set
from typing import Any
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
@@ -69,7 +68,7 @@ class ConnectionPoolManager:
self.max_idle = max_idle
# 连接池
self._connections: Set[ConnectionInfo] = set()
self._connections: set[ConnectionInfo] = set()
self._lock = asyncio.Lock()
# 统计信息
@@ -83,7 +82,7 @@ class ConnectionPoolManager:
}
# 后台清理任务
self._cleanup_task: Optional[asyncio.Task] = None
self._cleanup_task: asyncio.Task | None = None
self._should_cleanup = False
logger.info(f"连接池管理器初始化完成 (最大池大小: {max_pool_size})")
@@ -144,7 +143,7 @@ class ConnectionPoolManager:
yield connection_info.session
except Exception as e:
except Exception:
# 发生错误时回滚连接
if connection_info and connection_info.session:
try:
@@ -157,7 +156,7 @@ class ConnectionPoolManager:
if connection_info:
connection_info.mark_released()
async def _get_reusable_connection(self, session_factory: async_sessionmaker[AsyncSession]) -> Optional[ConnectionInfo]:
async def _get_reusable_connection(self, session_factory: async_sessionmaker[AsyncSession]) -> ConnectionInfo | None:
"""获取可复用的连接"""
async with self._lock:
# 清理过期连接
@@ -231,7 +230,7 @@ class ConnectionPoolManager:
self._connections.clear()
logger.info("所有连接已关闭")
def get_stats(self) -> Dict[str, Any]:
def get_stats(self) -> dict[str, Any]:
"""获取连接池统计信息"""
return {
**self._stats,
@@ -244,7 +243,7 @@ class ConnectionPoolManager:
# 全局连接池管理器实例
_connection_pool_manager: Optional[ConnectionPoolManager] = None
_connection_pool_manager: ConnectionPoolManager | None = None
def get_connection_pool_manager() -> ConnectionPoolManager:

View File

@@ -2,15 +2,16 @@ import os
from rich.traceback import install
from src.common.database.connection_pool_manager import start_connection_pool, stop_connection_pool
# 数据库批量调度器和连接池
from src.common.database.db_batch_scheduler import get_db_batch_scheduler
# SQLAlchemy相关导入
from src.common.database.sqlalchemy_init import initialize_database_compat
from src.common.database.sqlalchemy_models import get_db_session, get_engine
from src.common.logger import get_logger
# 数据库批量调度器和连接池
from src.common.database.db_batch_scheduler import get_db_batch_scheduler
from src.common.database.connection_pool_manager import start_connection_pool, stop_connection_pool
install(extra_lines=3)
_sql_engine = None

View File

@@ -6,19 +6,19 @@
import asyncio
import time
from collections import defaultdict, deque
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TypeVar
from collections.abc import Callable
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import Any, TypeVar
from sqlalchemy import select, delete, insert, update
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import delete, insert, select, update
from src.common.database.sqlalchemy_database_api import get_db_session
from src.common.logger import get_logger
logger = get_logger("db_batch_scheduler")
T = TypeVar('T')
T = TypeVar("T")
@dataclass
@@ -26,10 +26,10 @@ class BatchOperation:
"""批量操作基础类"""
operation_type: str # 'select', 'insert', 'update', 'delete'
model_class: Any
conditions: Dict[str, Any]
data: Optional[Dict[str, Any]] = None
callback: Optional[Callable] = None
future: Optional[asyncio.Future] = None
conditions: dict[str, Any]
data: dict[str, Any] | None = None
callback: Callable | None = None
future: asyncio.Future | None = None
timestamp: float = 0.0
def __post_init__(self):
@@ -42,7 +42,7 @@ class BatchResult:
"""批量操作结果"""
success: bool
data: Any = None
error: Optional[str] = None
error: str | None = None
class DatabaseBatchScheduler:
@@ -57,23 +57,23 @@ class DatabaseBatchScheduler:
self.max_queue_size = max_queue_size
# 操作队列,按操作类型和模型分类
self.operation_queues: Dict[str, deque] = defaultdict(deque)
self.operation_queues: dict[str, deque] = defaultdict(deque)
# 调度控制
self._scheduler_task: Optional[asyncio.Task] = None
self._scheduler_task: asyncio.Task | None = None
self._is_running = bool = False
self._lock = asyncio.Lock()
# 统计信息
self.stats = {
'total_operations': 0,
'batched_operations': 0,
'cache_hits': 0,
'execution_time': 0.0
"total_operations": 0,
"batched_operations": 0,
"cache_hits": 0,
"execution_time": 0.0
}
# 简单的结果缓存(用于频繁的查询)
self._result_cache: Dict[str, Tuple[Any, float]] = {}
self._result_cache: dict[str, tuple[Any, float]] = {}
self._cache_ttl = 5.0 # 5秒缓存
async def start(self):
@@ -102,7 +102,7 @@ class DatabaseBatchScheduler:
await self._flush_all_queues()
logger.info("数据库批量调度器已停止")
def _generate_cache_key(self, operation_type: str, model_class: Any, conditions: Dict[str, Any]) -> str:
def _generate_cache_key(self, operation_type: str, model_class: Any, conditions: dict[str, Any]) -> str:
"""生成缓存键"""
# 简单的缓存键生成,实际可以根据需要优化
key_parts = [
@@ -112,12 +112,12 @@ class DatabaseBatchScheduler:
]
return "|".join(key_parts)
def _get_from_cache(self, cache_key: str) -> Optional[Any]:
def _get_from_cache(self, cache_key: str) -> Any | None:
"""从缓存获取结果"""
if cache_key in self._result_cache:
result, timestamp = self._result_cache[cache_key]
if time.time() - timestamp < self._cache_ttl:
self.stats['cache_hits'] += 1
self.stats["cache_hits"] += 1
return result
else:
# 清理过期缓存
@@ -131,7 +131,7 @@ class DatabaseBatchScheduler:
async def add_operation(self, operation: BatchOperation) -> asyncio.Future:
"""添加操作到队列"""
# 检查是否可以立即返回缓存结果
if operation.operation_type == 'select':
if operation.operation_type == "select":
cache_key = self._generate_cache_key(
operation.operation_type,
operation.model_class,
@@ -158,7 +158,7 @@ class DatabaseBatchScheduler:
await self._execute_operations([operation])
else:
self.operation_queues[queue_key].append(operation)
self.stats['total_operations'] += 1
self.stats["total_operations"] += 1
return future
@@ -193,7 +193,7 @@ class DatabaseBatchScheduler:
if operations:
await self._execute_operations(list(operations))
async def _execute_operations(self, operations: List[BatchOperation]):
async def _execute_operations(self, operations: list[BatchOperation]):
"""执行批量操作"""
if not operations:
return
@@ -209,13 +209,13 @@ class DatabaseBatchScheduler:
# 为每种操作类型创建批量执行任务
tasks = []
for op_type, ops in op_groups.items():
if op_type == 'select':
if op_type == "select":
tasks.append(self._execute_select_batch(ops))
elif op_type == 'insert':
elif op_type == "insert":
tasks.append(self._execute_insert_batch(ops))
elif op_type == 'update':
elif op_type == "update":
tasks.append(self._execute_update_batch(ops))
elif op_type == 'delete':
elif op_type == "delete":
tasks.append(self._execute_delete_batch(ops))
# 并发执行所有操作
@@ -238,7 +238,7 @@ class DatabaseBatchScheduler:
operation.future.set_result(result)
# 缓存查询结果
if operation.operation_type == 'select':
if operation.operation_type == "select":
cache_key = self._generate_cache_key(
operation.operation_type,
operation.model_class,
@@ -246,7 +246,7 @@ class DatabaseBatchScheduler:
)
self._set_cache(cache_key, result)
self.stats['batched_operations'] += len(operations)
self.stats["batched_operations"] += len(operations)
except Exception as e:
logger.error(f"批量操作执行失败: {e}", exc_info="")
@@ -255,9 +255,9 @@ class DatabaseBatchScheduler:
if operation.future and not operation.future.done():
operation.future.set_exception(e)
finally:
self.stats['execution_time'] += time.time() - start_time
self.stats["execution_time"] += time.time() - start_time
async def _execute_select_batch(self, operations: List[BatchOperation]):
async def _execute_select_batch(self, operations: list[BatchOperation]):
"""批量执行查询操作"""
# 合并相似的查询条件
merged_conditions = self._merge_select_conditions(operations)
@@ -302,7 +302,7 @@ class DatabaseBatchScheduler:
return results if len(results) > 1 else results[0] if results else []
async def _execute_insert_batch(self, operations: List[BatchOperation]):
async def _execute_insert_batch(self, operations: list[BatchOperation]):
"""批量执行插入操作"""
async with get_db_session() as session:
try:
@@ -323,7 +323,7 @@ class DatabaseBatchScheduler:
logger.error(f"批量插入失败: {e}", exc_info=True)
return [0] * len(operations)
async def _execute_update_batch(self, operations: List[BatchOperation]):
async def _execute_update_batch(self, operations: list[BatchOperation]):
"""批量执行更新操作"""
async with get_db_session() as session:
try:
@@ -353,7 +353,7 @@ class DatabaseBatchScheduler:
logger.error(f"批量更新失败: {e}", exc_info=True)
return [0] * len(operations)
async def _execute_delete_batch(self, operations: List[BatchOperation]):
async def _execute_delete_batch(self, operations: list[BatchOperation]):
"""批量执行删除操作"""
async with get_db_session() as session:
try:
@@ -382,7 +382,7 @@ class DatabaseBatchScheduler:
logger.error(f"批量删除失败: {e}", exc_info=True)
return [0] * len(operations)
def _merge_select_conditions(self, operations: List[BatchOperation]) -> Dict[Tuple, List[BatchOperation]]:
def _merge_select_conditions(self, operations: list[BatchOperation]) -> dict[tuple, list[BatchOperation]]:
"""合并相似的查询条件"""
merged = {}
@@ -405,15 +405,15 @@ class DatabaseBatchScheduler:
# 记录操作
if condition_key not in merged:
merged[condition_key] = {'_operations': []}
if '_operations' not in merged[condition_key]:
merged[condition_key]['_operations'] = []
merged[condition_key]['_operations'].append(op)
merged[condition_key] = {"_operations": []}
if "_operations" not in merged[condition_key]:
merged[condition_key]["_operations"] = []
merged[condition_key]["_operations"].append(op)
# 去重并构建最终条件
final_merged = {}
for condition_key, conditions in merged.items():
operations = conditions.pop('_operations')
operations = conditions.pop("_operations")
# 去重
for field_name, values in conditions.items():
@@ -423,13 +423,13 @@ class DatabaseBatchScheduler:
return final_merged
def get_stats(self) -> Dict[str, Any]:
def get_stats(self) -> dict[str, Any]:
"""获取统计信息"""
return {
**self.stats,
'cache_size': len(self._result_cache),
'queue_sizes': {k: len(v) for k, v in self.operation_queues.items()},
'is_running': self._is_running
"cache_size": len(self._result_cache),
"queue_sizes": {k: len(v) for k, v in self.operation_queues.items()},
"is_running": self._is_running
}
@@ -450,20 +450,20 @@ async def get_batch_session():
# 便捷函数
async def batch_select(model_class: Any, conditions: Dict[str, Any]) -> Any:
async def batch_select(model_class: Any, conditions: dict[str, Any]) -> Any:
"""批量查询"""
operation = BatchOperation(
operation_type='select',
operation_type="select",
model_class=model_class,
conditions=conditions
)
return await db_batch_scheduler.add_operation(operation)
async def batch_insert(model_class: Any, data: Dict[str, Any]) -> int:
async def batch_insert(model_class: Any, data: dict[str, Any]) -> int:
"""批量插入"""
operation = BatchOperation(
operation_type='insert',
operation_type="insert",
model_class=model_class,
conditions={},
data=data
@@ -471,10 +471,10 @@ async def batch_insert(model_class: Any, data: Dict[str, Any]) -> int:
return await db_batch_scheduler.add_operation(operation)
async def batch_update(model_class: Any, conditions: Dict[str, Any], data: Dict[str, Any]) -> int:
async def batch_update(model_class: Any, conditions: dict[str, Any], data: dict[str, Any]) -> int:
"""批量更新"""
operation = BatchOperation(
operation_type='update',
operation_type="update",
model_class=model_class,
conditions=conditions,
data=data
@@ -482,10 +482,10 @@ async def batch_update(model_class: Any, conditions: Dict[str, Any], data: Dict[
return await db_batch_scheduler.add_operation(operation)
async def batch_delete(model_class: Any, conditions: Dict[str, Any]) -> int:
async def batch_delete(model_class: Any, conditions: dict[str, Any]) -> int:
"""批量删除"""
operation = BatchOperation(
operation_type='delete',
operation_type="delete",
model_class=model_class,
conditions=conditions
)

View File

@@ -15,8 +15,8 @@ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_asyn
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Mapped, mapped_column
from src.common.logger import get_logger
from src.common.database.connection_pool_manager import get_connection_pool_manager
from src.common.logger import get_logger
logger = get_logger("sqlalchemy_models")

View File

@@ -1,13 +1,13 @@
# 使用基于时间戳的文件处理器,简单的轮转份数限制
import logging
import tarfile
import threading
import time
import tarfile
from collections.abc import Callable
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Optional, Dict
from typing import Any
import orjson
import structlog
@@ -18,15 +18,15 @@ LOG_DIR = Path("logs")
LOG_DIR.mkdir(exist_ok=True)
# 全局handler实例避免重复创建可能为None表示禁用文件日志
_file_handler: Optional[logging.Handler] = None
_console_handler: Optional[logging.Handler] = None
_file_handler: logging.Handler | None = None
_console_handler: logging.Handler | None = None
# 动态 logger 元数据注册表 (name -> {alias:str|None, color:str|None})
_LOGGER_META_LOCK = threading.Lock()
_LOGGER_META: Dict[str, Dict[str, Optional[str]]] = {}
_LOGGER_META: dict[str, dict[str, str | None]] = {}
def _normalize_color(color: Optional[str]) -> Optional[str]:
def _normalize_color(color: str | None) -> str | None:
"""接受 ANSI 码 / #RRGGBB / rgb(r,g,b) / 颜色名(直接返回) -> ANSI 码.
不做复杂解析,只支持 #RRGGBB 转 24bit ANSI。
"""
@@ -49,13 +49,13 @@ def _normalize_color(color: Optional[str]) -> Optional[str]:
nums = color[color.find("(") + 1 : -1].split(",")
r, g, b = (int(x) for x in nums[:3])
return f"\033[38;2;{r};{g};{b}m"
except Exception: # noqa: BLE001
except Exception:
return None
# 其他情况直接返回假设是短ANSI或名称控制台渲染器不做翻译仅输出
return color
def _register_logger_meta(name: str, *, alias: Optional[str] = None, color: Optional[str] = None):
def _register_logger_meta(name: str, *, alias: str | None = None, color: str | None = None):
"""注册/更新 logger 元数据。"""
if not name:
return
@@ -67,7 +67,7 @@ def _register_logger_meta(name: str, *, alias: Optional[str] = None, color: Opti
meta["color"] = _normalize_color(color)
def get_logger_meta(name: str) -> Dict[str, Optional[str]]:
def get_logger_meta(name: str) -> dict[str, str | None]:
with _LOGGER_META_LOCK:
return _LOGGER_META.get(name, {"alias": None, "color": None}).copy()
@@ -170,7 +170,7 @@ class TimestampedFileHandler(logging.Handler):
try:
self._compress_stale_logs()
self._cleanup_old_files()
except Exception as e: # noqa: BLE001
except Exception as e:
print(f"[日志轮转] 轮转过程出错: {e}")
def _compress_stale_logs(self): # sourcery skip: extract-method
@@ -184,12 +184,12 @@ class TimestampedFileHandler(logging.Handler):
continue
# 压缩
try:
with tarfile.open(tar_path, "w:gz") as tf: # noqa: SIM117
with tarfile.open(tar_path, "w:gz") as tf:
tf.add(f, arcname=f.name)
f.unlink(missing_ok=True)
except Exception as e: # noqa: BLE001
except Exception as e:
print(f"[日志压缩] 压缩 {f.name} 失败: {e}")
except Exception as e: # noqa: BLE001
except Exception as e:
print(f"[日志压缩] 过程出错: {e}")
def _cleanup_old_files(self):
@@ -206,9 +206,9 @@ class TimestampedFileHandler(logging.Handler):
mtime = datetime.fromtimestamp(f.stat().st_mtime)
if mtime < cutoff:
f.unlink(missing_ok=True)
except Exception as e: # noqa: BLE001
except Exception as e:
print(f"[日志清理] 删除 {f} 失败: {e}")
except Exception as e: # noqa: BLE001
except Exception as e:
print(f"[日志清理] 清理过程出错: {e}")
def emit(self, record):
@@ -850,7 +850,7 @@ class ModuleColoredConsoleRenderer:
if logger_name:
# 获取别名,如果没有别名则使用原名称
# 若上面条件不成立需要再次获取 meta
if 'meta' not in locals():
if "meta" not in locals():
meta = get_logger_meta(logger_name)
display_name = meta.get("alias") or DEFAULT_MODULE_ALIASES.get(logger_name, logger_name)
@@ -1066,7 +1066,7 @@ raw_logger: structlog.stdlib.BoundLogger = structlog.get_logger()
binds: dict[str, Callable] = {}
def get_logger(name: str | None, *, color: Optional[str] = None, alias: Optional[str] = None) -> structlog.stdlib.BoundLogger:
def get_logger(name: str | None, *, color: str | None = None, alias: str | None = None) -> structlog.stdlib.BoundLogger:
"""获取/创建 structlog logger。
新增:
@@ -1132,10 +1132,10 @@ def cleanup_old_logs():
tar_path = f.with_suffix(f.suffix + ".tar.gz")
if tar_path.exists():
continue
with tarfile.open(tar_path, "w:gz") as tf: # noqa: SIM117
with tarfile.open(tar_path, "w:gz") as tf:
tf.add(f, arcname=f.name)
f.unlink(missing_ok=True)
except Exception as e: # noqa: BLE001
except Exception as e:
logger = get_logger("logger")
logger.warning(f"周期压缩日志时出错: {e}")
@@ -1152,7 +1152,7 @@ def cleanup_old_logs():
log_file.unlink(missing_ok=True)
deleted_count += 1
deleted_size += size
except Exception as e: # noqa: BLE001
except Exception as e:
logger = get_logger("logger")
logger.warning(f"清理日志文件 {log_file} 时出错: {e}")
if deleted_count:
@@ -1160,7 +1160,7 @@ def cleanup_old_logs():
logger.info(
f"清理 {deleted_count} 个过期日志 (≈{deleted_size / 1024 / 1024:.2f}MB), 保留策略={retention_days}"
)
except Exception as e: # noqa: BLE001
except Exception as e:
logger = get_logger("logger")
logger.error(f"清理旧日志文件时出错: {e}")
@@ -1183,7 +1183,7 @@ def start_log_cleanup_task():
while True:
try:
cleanup_old_logs()
except Exception as e: # noqa: BLE001
except Exception as e:
print(f"[日志任务] 执行清理出错: {e}")
# 再次等待到下一个午夜
time.sleep(max(1, seconds_until_next_midnight()))

View File

@@ -120,10 +120,10 @@ class MainSystem:
logger.warning("未发现任何兴趣计算器组件")
return
logger.info(f"发现的兴趣计算器组件:")
logger.info("发现的兴趣计算器组件:")
for calc_name, calc_info in interest_calculators.items():
enabled = getattr(calc_info, 'enabled', True)
default_enabled = getattr(calc_info, 'enabled_by_default', True)
enabled = getattr(calc_info, "enabled", True)
default_enabled = getattr(calc_info, "enabled_by_default", True)
logger.info(f" - {calc_name}: 启用: {enabled}, 默认启用: {default_enabled}")
# 初始化兴趣度管理器
@@ -136,8 +136,8 @@ class MainSystem:
# 使用组件注册表获取组件类并注册
for calc_name, calc_info in interest_calculators.items():
enabled = getattr(calc_info, 'enabled', True)
default_enabled = getattr(calc_info, 'enabled_by_default', True)
enabled = getattr(calc_info, "enabled", True)
default_enabled = getattr(calc_info, "enabled_by_default", True)
if not enabled or not default_enabled:
logger.info(f"兴趣计算器 {calc_name} 未启用,跳过")
@@ -344,7 +344,7 @@ MoFox_Bot(第三方修改版)
get_emoji_manager().initialize()
logger.info("表情包管理器初始化成功")
'''
"""
# 初始化回复后关系追踪系统
try:
from src.plugins.built_in.affinity_flow_chatter.interest_scoring import chatter_interest_scoring_system
@@ -356,7 +356,7 @@ MoFox_Bot(第三方修改版)
except Exception as e:
logger.error(f"回复后关系追踪系统初始化失败: {e}")
relationship_tracker = None
'''
"""
# 启动情绪管理器
await mood_manager.start()
@@ -487,10 +487,10 @@ MoFox_Bot(第三方修改版)
# 关闭应用 (MessageServer可能没有shutdown方法)
try:
if self.app:
if hasattr(self.app, 'shutdown'):
if hasattr(self.app, "shutdown"):
await self.app.shutdown()
logger.info("应用已关闭")
elif hasattr(self.app, 'stop'):
elif hasattr(self.app, "stop"):
await self.app.stop()
logger.info("应用已停止")
else:

View File

@@ -2,7 +2,6 @@ import math
import random
import time
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.message_receive.message import MessageRecv
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
from src.chat.utils.prompt import Prompt, global_prompt_manager

View File

@@ -5,7 +5,6 @@ import time
import traceback
from typing import Any
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.utils.chat_message_builder import (
get_raw_msg_before_timestamp_with_chat,
get_raw_msg_by_timestamp_with_chat,

View File

@@ -5,7 +5,6 @@ from typing import Any
import orjson
from json_repair import repair_json
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.utils.prompt import Prompt, global_prompt_manager
from src.common.logger import get_logger
from src.config.config import global_config, model_config

View File

@@ -4,8 +4,8 @@ from datetime import datetime
from difflib import SequenceMatcher
from typing import Any
import rjieba
import orjson
import rjieba
from json_repair import repair_json
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

View File

@@ -49,7 +49,6 @@ from .base import (
ToolParamType,
create_plus_command_adapter,
)
from .utils.dependency_config import configure_dependency_settings, get_dependency_config
# 导入依赖管理模块

View File

@@ -113,7 +113,7 @@ class BaseInterestCalculator(ABC):
try:
self._enabled = True
return True
except Exception as e:
except Exception:
self._enabled = False
return False
@@ -170,7 +170,7 @@ class BaseInterestCalculator(ABC):
if not self._enabled:
return InterestCalculationResult(
success=False,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.0,
error_message="组件未启用"
)
@@ -184,9 +184,9 @@ class BaseInterestCalculator(ABC):
except Exception as e:
result = InterestCalculationResult(
success=False,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.0,
error_message=f"计算执行失败: {str(e)}",
error_message=f"计算执行失败: {e!s}",
calculation_time=time.time() - start_time
)
self._update_statistics(result)
@@ -201,7 +201,7 @@ class BaseInterestCalculator(ABC):
Returns:
InterestCalculatorInfo: 生成的兴趣计算器信息对象
"""
name = getattr(cls, 'component_name', cls.__name__.lower().replace('calculator', ''))
name = getattr(cls, "component_name", cls.__name__.lower().replace("calculator", ""))
if "." in name:
logger.error(f"InterestCalculator名称 '{name}' 包含非法字符 '.',请使用下划线替代")
raise ValueError(f"InterestCalculator名称 '{name}' 包含非法字符 '.',请使用下划线替代")
@@ -209,8 +209,8 @@ class BaseInterestCalculator(ABC):
return InterestCalculatorInfo(
name=name,
component_type=ComponentType.INTEREST_CALCULATOR,
description=getattr(cls, 'component_description', cls.__doc__ or "兴趣度计算器"),
enabled_by_default=getattr(cls, 'enabled_by_default', True),
description=getattr(cls, "component_description", cls.__doc__ or "兴趣度计算器"),
enabled_by_default=getattr(cls, "enabled_by_default", True),
)
def __repr__(self) -> str:

View File

@@ -43,21 +43,21 @@ class BasePlugin(PluginBase):
对应类型的ComponentInfo对象
"""
if component_type == ComponentType.COMMAND:
if hasattr(component_class, 'get_command_info'):
if hasattr(component_class, "get_command_info"):
return component_class.get_command_info()
else:
logger.warning(f"Command类 {component_class.__name__} 缺少 get_command_info 方法")
return None
elif component_type == ComponentType.ACTION:
if hasattr(component_class, 'get_action_info'):
if hasattr(component_class, "get_action_info"):
return component_class.get_action_info()
else:
logger.warning(f"Action类 {component_class.__name__} 缺少 get_action_info 方法")
return None
elif component_type == ComponentType.INTEREST_CALCULATOR:
if hasattr(component_class, 'get_interest_calculator_info'):
if hasattr(component_class, "get_interest_calculator_info"):
return component_class.get_interest_calculator_info()
else:
logger.warning(f"InterestCalculator类 {component_class.__name__} 缺少 get_interest_calculator_info 方法")

View File

@@ -1,5 +1,6 @@
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Set
from typing import Any
@dataclass
class PluginMetadata:
@@ -11,15 +12,15 @@ class PluginMetadata:
usage: str # 插件使用方法
# 以下为可选字段,参考自 _manifest.json 和 NoneBot 设计
type: Optional[str] = None # 插件类别: "library", "application"
type: str | None = None # 插件类别: "library", "application"
# 从原 _manifest.json 迁移的字段
version: str = "1.0.0" # 插件版本
author: str = "" # 作者名称
license: Optional[str] = None # 开源协议
repository_url: Optional[str] = None # 仓库地址
keywords: List[str] = field(default_factory=list) # 关键词
categories: List[str] = field(default_factory=list) # 分类
license: str | None = None # 开源协议
repository_url: str | None = None # 仓库地址
keywords: list[str] = field(default_factory=list) # 关键词
categories: list[str] = field(default_factory=list) # 分类
# 扩展字段
extra: Dict[str, Any] = field(default_factory=dict) # 其他任意信息
extra: dict[str, Any] = field(default_factory=dict) # 其他任意信息

View File

@@ -1,7 +1,6 @@
import asyncio
import importlib
import os
import traceback
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from typing import Any, Optional
@@ -288,7 +287,7 @@ class PluginManager:
return loaded_count, failed_count
def _load_plugin_module_file(self, plugin_file: str) -> Optional[Any]:
def _load_plugin_module_file(self, plugin_file: str) -> Any | None:
# sourcery skip: extract-method
"""加载单个插件模块文件

View File

@@ -2,7 +2,6 @@ import inspect
import time
from typing import Any
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.utils.prompt import Prompt, global_prompt_manager
from src.common.cache_manager import tool_cache
from src.common.logger import get_logger

View File

@@ -52,7 +52,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
# 用户关系数据缓存
self.user_relationships: dict[str, float] = {} # user_id -> relationship_score
logger.info(f"[Affinity兴趣计算器] 初始化完成:")
logger.info("[Affinity兴趣计算器] 初始化完成:")
logger.info(f" - 权重配置: {self.score_weights}")
logger.info(f" - 回复阈值: {self.reply_threshold}")
logger.info(f" - 智能匹配: {self.use_smart_matching}")
@@ -69,9 +69,9 @@ class AffinityInterestCalculator(BaseInterestCalculator):
"""执行AffinityFlow风格的兴趣值计算"""
try:
start_time = time.time()
message_id = getattr(message, 'message_id', '')
content = getattr(message, 'processed_plain_text', '')
user_id = getattr(message, 'user_info', {}).user_id if hasattr(message, 'user_info') and hasattr(message.user_info, 'user_id') else ''
message_id = getattr(message, "message_id", "")
content = getattr(message, "processed_plain_text", "")
user_id = getattr(message, "user_info", {}).user_id if hasattr(message, "user_info") and hasattr(message.user_info, "user_id") else ""
logger.debug(f"[Affinity兴趣计算] 开始处理消息 {message_id}")
logger.debug(f"[Affinity兴趣计算] 消息内容: {content[:50]}...")
@@ -135,7 +135,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
logger.error(f"Affinity兴趣值计算失败: {e}", exc_info=True)
return InterestCalculationResult(
success=False,
message_id=getattr(message, 'message_id', ''),
message_id=getattr(message, "message_id", ""),
interest_value=0.0,
error_message=str(e)
)
@@ -206,9 +206,9 @@ class AffinityInterestCalculator(BaseInterestCalculator):
def _calculate_mentioned_score(self, message: "DatabaseMessages", bot_nickname: str) -> float:
"""计算提及分"""
is_mentioned = getattr(message, 'is_mentioned', False)
is_at = getattr(message, 'is_at', False)
processed_plain_text = getattr(message, 'processed_plain_text', '')
is_mentioned = getattr(message, "is_mentioned", False)
is_at = getattr(message, "is_at", False)
processed_plain_text = getattr(message, "processed_plain_text", "")
if is_mentioned:
if is_at:
@@ -238,7 +238,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
keywords = []
# 尝试从 key_words 字段提取存储的是JSON字符串
key_words = getattr(message, 'key_words', '')
key_words = getattr(message, "key_words", "")
if key_words:
try:
import orjson
@@ -250,7 +250,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
# 如果没有 keywords尝试从 key_words_lite 提取
if not keywords:
key_words_lite = getattr(message, 'key_words_lite', '')
key_words_lite = getattr(message, "key_words_lite", "")
if key_words_lite:
try:
import orjson
@@ -262,7 +262,7 @@ class AffinityInterestCalculator(BaseInterestCalculator):
# 如果还是没有,从消息内容中提取(降级方案)
if not keywords:
content = getattr(message, 'processed_plain_text', '') or ''
content = getattr(message, "processed_plain_text", "") or ""
keywords = self._extract_keywords_from_content(content)
return keywords[:15] # 返回前15个关键词

View File

@@ -107,9 +107,9 @@ class ChatterActionPlanner:
# 直接使用消息中已计算的标志,无需重复计算兴趣值
for message in unread_messages:
try:
message_interest = getattr(message, 'interest_value', 0.3)
message_should_reply = getattr(message, 'should_reply', False)
message_should_act = getattr(message, 'should_act', False)
message_interest = getattr(message, "interest_value", 0.3)
message_should_reply = getattr(message, "should_reply", False)
message_should_act = getattr(message, "should_act", False)
# 确保interest_value不是None
if message_interest is None:

View File

@@ -5,7 +5,7 @@
from src.common.logger import get_logger
from src.plugin_system.apis.plugin_register_api import register_plugin
from src.plugin_system.base.base_plugin import BasePlugin
from src.plugin_system.base.component_types import ComponentInfo, ComponentType, InterestCalculatorInfo
from src.plugin_system.base.component_types import ComponentInfo
logger = get_logger("affinity_chatter_plugin")
@@ -52,4 +52,3 @@ class AffinityChatterPlugin(BasePlugin):
return components