diff --git a/plugins/hello_world_plugin/plugin.py b/plugins/hello_world_plugin/plugin.py index 57a226acd..efb98a939 100644 --- a/plugins/hello_world_plugin/plugin.py +++ b/plugins/hello_world_plugin/plugin.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Optional, Tuple, Type, Any +from typing import List, Tuple, Type, Any from src.plugin_system import ( BasePlugin, register_plugin, @@ -14,7 +14,6 @@ from src.plugin_system import ( from src.plugin_system.base.base_command import BaseCommand from src.plugin_system.apis import send_api -from typing import Tuple from src.common.logger import get_logger logger = get_logger(__name__) diff --git a/pyproject.toml b/pyproject.toml index e1cd9e9d5..5b2d4d688 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,28 +7,38 @@ dependencies = [ "aiohttp>=3.12.14", "aiohttp-cors>=0.8.1", "apscheduler>=3.11.0", + "asyncddgs>=0.1.0a1", + "asyncio>=4.0.0", + "beautifulsoup4>=4.13.4", "colorama>=0.4.6", "cryptography>=45.0.5", "customtkinter>=5.2.2", "dotenv>=0.9.9", + "exa-py>=1.14.20", "faiss-cpu>=1.11.0", "fastapi>=0.116.0", "google>=3.0.0", "google-genai>=1.29.0", + "httpx>=0.28.1", "jieba>=0.42.1", "json-repair>=0.47.6", + "json5>=0.12.1", "jsonlines>=4.0.0", + "lxml>=6.0.0", "maim-message>=0.3.8", "matplotlib>=3.10.3", "networkx>=3.4.2", "numpy>=2.2.6", "openai>=1.95.0", + "opencv-python>=4.11.0.86", "packaging>=25.0", "pandas>=2.3.1", + "peewee>=3.18.2", "pillow>=11.3.0", "psutil>=7.0.0", "pyarrow>=20.0.0", "pydantic>=2.11.7", + "pygments>=2.19.2", "pymongo>=4.13.2", "pymysql>=1.1.1", "pypinyin>=0.54.0", diff --git a/requirements.txt b/requirements.txt index 96fa7db93..e1fbc704c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -48,12 +48,12 @@ scikit-learn seaborn structlog watchdog -httpx>=0.24.0 -requests>=2.28.0 -beautifulsoup4>=4.11.0 -lxml>=4.9.0 -json5>=0.9.0 -toml>=0.10.0 +httpx +requests +beautifulsoup4 +lxml +json5 +toml beautifulsoup4 exa_py asyncddgs diff --git a/src/chat/memory_system/Hippocampus.py b/src/chat/memory_system/Hippocampus.py index f5c8c36e6..6e21d6e2b 100644 --- a/src/chat/memory_system/Hippocampus.py +++ b/src/chat/memory_system/Hippocampus.py @@ -16,7 +16,7 @@ from rich.traceback import install from src.llm_models.utils_model import LLMRequest from src.config.config import global_config, model_config -from sqlalchemy import select,insert,update,text,delete +from sqlalchemy import select,insert,update,delete from src.common.database.sqlalchemy_models import Messages, GraphNodes, GraphEdges # SQLAlchemy Models导入 from src.common.logger import get_logger from src.common.database.sqlalchemy_database_api import get_session diff --git a/src/chat/message_receive/message.py b/src/chat/message_receive/message.py index 50f88aad6..4919decd4 100644 --- a/src/chat/message_receive/message.py +++ b/src/chat/message_receive/message.py @@ -11,7 +11,7 @@ from maim_message import Seg, UserInfo, BaseMessageInfo, MessageBase from src.common.logger import get_logger from src.chat.utils.utils_image import get_image_manager from src.chat.utils.utils_voice import get_voice_text -from src.multimodal.video_analyzer import get_video_analyzer +from src.chat.utils.utils_video import get_video from .chat_stream import ChatStream install(extra_lines=3) @@ -370,7 +370,7 @@ class MessageRecvS4U(MessageRecv): logger.info(f"解码后视频大小: {len(video_bytes)} 字节") # 使用video analyzer分析视频 - video_analyzer = get_video_analyzer() + video_analyzer = get_video() result = await video_analyzer.analyze_video_from_bytes( video_bytes, filename, diff --git a/src/chat/utils/statistic.py b/src/chat/utils/statistic.py index 3f656fc30..15449e427 100644 --- a/src/chat/utils/statistic.py +++ b/src/chat/utils/statistic.py @@ -7,7 +7,7 @@ from typing import Any, Dict, Tuple, List from src.common.logger import get_logger from src.common.database.sqlalchemy_models import OnlineTime, LLMUsage, Messages -from src.common.database.sqlalchemy_database_api import get_db_session, db_query, db_save, db_get +from src.common.database.sqlalchemy_database_api import db_query, db_save, db_get from src.manager.async_task_manager import AsyncTask from src.manager.local_store_manager import local_storage diff --git a/src/multimodal/video_analyzer.py b/src/chat/utils/utils_video.py similarity index 95% rename from src/multimodal/video_analyzer.py rename to src/chat/utils/utils_video.py index 45c7e65bd..8e9833247 100644 --- a/src/multimodal/video_analyzer.py +++ b/src/chat/utils/utils_video.py @@ -12,7 +12,7 @@ import asyncio import base64 from PIL import Image from pathlib import Path -from typing import List, Tuple, Optional, Dict +from typing import List, Tuple, Dict import io from src.llm_models.utils_model import LLMRequest @@ -30,21 +30,20 @@ class VideoAnalyzer: # 使用专用的视频分析配置 try: self.video_llm = LLMRequest( - model_set=model_config.model_task_config.video_analysis, - request_type="video_analysis" + model_set=model_config.model_task_config.utils_video, + request_type="utils_video" ) - logger.info("✅ 使用video_analysis模型配置") except (AttributeError, KeyError) as e: - # 如果video_analysis不存在,使用vlm配置 + # 如果utils_video不存在,使用vlm配置 self.video_llm = LLMRequest( model_set=model_config.model_task_config.vlm, request_type="vlm" ) - logger.warning(f"video_analysis配置不可用({e}),回退使用vlm配置") + logger.warning(f"utils_video配置不可用({e}),回退使用vlm配置") # 从配置文件读取参数,如果配置不存在则使用默认值 try: - config = global_config.video_analysis + config = global_config.utils_video self.max_frames = config.max_frames self.frame_quality = config.frame_quality self.max_image_size = config.max_image_size @@ -67,11 +66,11 @@ class VideoAnalyzer: self.frame_interval = 1.0 # 抽帧时间间隔(秒) self.batch_size = 3 # 批处理时每批处理的帧数 self.timeout = 60.0 # 分析超时时间(秒) - logger.info(f"✅ 从配置文件读取视频分析参数") + logger.info("✅ 从配置文件读取视频分析参数") except AttributeError as e: # 如果配置不存在,使用代码中的默认值 - logger.warning(f"配置文件中缺少video_analysis配置({e}),使用默认值") + logger.warning(f"配置文件中缺少utils_video配置({e}),使用默认值") self.max_frames = 6 self.frame_quality = 85 self.max_image_size = 600 @@ -345,8 +344,8 @@ class VideoAnalyzer: # 全局实例 _video_analyzer = None -def get_video_analyzer() -> VideoAnalyzer: - """获取视频分析器实例(单例模式)""" +def get_video() -> VideoAnalyzer: + """获取视频分析器实例""" global _video_analyzer if _video_analyzer is None: _video_analyzer = VideoAnalyzer() diff --git a/src/common/database/sqlalchemy_database_api.py b/src/common/database/sqlalchemy_database_api.py index 53b9a4fbf..d85cf1c0b 100644 --- a/src/common/database/sqlalchemy_database_api.py +++ b/src/common/database/sqlalchemy_database_api.py @@ -10,7 +10,7 @@ from typing import Dict, List, Any, Union, Type, Optional from contextlib import contextmanager from sqlalchemy.orm import Session from sqlalchemy.exc import SQLAlchemyError, DisconnectionError, OperationalError -from sqlalchemy import desc, asc, func, and_, or_ +from sqlalchemy import desc, asc, func, and_ from src.common.logger import get_logger from src.common.database.sqlalchemy_models import ( Base, get_db_session, Messages, ActionRecords, PersonInfo, ChatStreams, @@ -61,7 +61,7 @@ def get_db_session(): time.sleep(retry_delay * (attempt + 1)) else: raise - except Exception as e: + except Exception: if session: session.rollback() raise diff --git a/src/common/database/sqlalchemy_models.py b/src/common/database/sqlalchemy_models.py index 619e5306d..97de907b7 100644 --- a/src/common/database/sqlalchemy_models.py +++ b/src/common/database/sqlalchemy_models.py @@ -68,7 +68,7 @@ class SessionProxy: # commit后不要清除session,只是刷新状态 pass # 保持session活跃 return result - except Exception as e: + except Exception: try: if session and hasattr(session, 'rollback'): session.rollback() @@ -555,7 +555,7 @@ def get_db_session(): session = SessionLocal() yield session session.commit() - except Exception as e: + except Exception: if session: session.rollback() raise diff --git a/src/common/schedule_manager.py b/src/common/schedule_manager.py index d886cfdc6..f155251ed 100644 --- a/src/common/schedule_manager.py +++ b/src/common/schedule_manager.py @@ -1,5 +1,4 @@ import json -import asyncio from datetime import datetime from typing import Optional, List, Dict, Any @@ -93,13 +92,13 @@ class ScheduleManager: existing_schedule = session.query(Schedule).filter(Schedule.date == today_str).first() if existing_schedule: # 更新现有日程 - 通过setattr或直接赋值 - setattr(existing_schedule, 'schedule_data', json.dumps(schedule_data)) - setattr(existing_schedule, 'updated_at', datetime.now()) + existing_schedule.schedule_data = json.dumps(schedule_data) + existing_schedule.updated_at = datetime.now() else: # 创建新日程 new_schedule = Schedule() - setattr(new_schedule, 'date', today_str) - setattr(new_schedule, 'schedule_data', json.dumps(schedule_data)) + new_schedule.date = today_str + new_schedule.schedule_data = json.dumps(schedule_data) session.add(new_schedule) # 美化输出 diff --git a/src/config/api_ada_configs.py b/src/config/api_ada_configs.py index f11a0ae8c..5037d6a97 100644 --- a/src/config/api_ada_configs.py +++ b/src/config/api_ada_configs.py @@ -141,7 +141,7 @@ class ModelTaskConfig(ConfigBase): schedule_generator: TaskConfig """日程生成模型配置""" - video_analysis: TaskConfig = field(default_factory=lambda: TaskConfig( + utils_video: TaskConfig = field(default_factory=lambda: TaskConfig( model_list=["qwen2.5-vl-72b"], max_tokens=1500, temperature=0.3 diff --git a/src/config/config.py b/src/config/config.py index 98a163f4c..2b15c4952 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -353,7 +353,7 @@ class Config(ConfigBase): custom_prompt: CustomPromptConfig voice: VoiceConfig schedule: ScheduleConfig - video_analysis: VideoAnalysisConfig = field(default_factory=lambda: VideoAnalysisConfig()) + utils_video: VideoAnalysisConfig = field(default_factory=lambda: VideoAnalysisConfig()) @dataclass diff --git a/src/llm_models/model_client/aiohttp_gemini_client.py b/src/llm_models/model_client/aiohttp_gemini_client.py index 95b5bb899..0dae4defa 100644 --- a/src/llm_models/model_client/aiohttp_gemini_client.py +++ b/src/llm_models/model_client/aiohttp_gemini_client.py @@ -1,10 +1,8 @@ import asyncio import json import io -import base64 -from typing import Callable, Any, Coroutine, Optional, List, AsyncIterator +from typing import Callable, Any, Coroutine, Optional import aiohttp -from json_repair import repair_json from src.config.api_ada_configs import ModelInfo, APIProvider from src.common.logger import get_logger @@ -289,7 +287,7 @@ def _default_normal_response_parser( if "functionCall" in candidate: func_call = candidate["functionCall"] api_response.tool_calls = [ToolCall( - f"gemini_call_0", + "gemini_call_0", func_call.get("name", ""), func_call.get("args", {}) )] diff --git a/src/multimodal/__init__.py b/src/multimodal/__init__.py deleted file mode 100644 index 79ef3bac0..000000000 --- a/src/multimodal/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -多模态模块初始化文件 -""" - -from .video_analyzer import VideoAnalyzer, get_video_analyzer - -__all__ = ['VideoAnalyzer', 'get_video_analyzer'] diff --git a/src/person_info/fix_session.py b/src/person_info/fix_session.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/plugin_system/apis/send_api.py b/src/plugin_system/apis/send_api.py index 92e50304c..80154f145 100644 --- a/src/plugin_system/apis/send_api.py +++ b/src/plugin_system/apis/send_api.py @@ -32,18 +32,18 @@ import traceback import time import difflib import asyncio -from typing import Optional, Union, Dict, Any +from typing import Optional, Union, Dict from src.common.logger import get_logger # 导入依赖 from src.chat.message_receive.chat_stream import get_chat_manager -from maim_message import UserInfo, GroupInfo +from maim_message import UserInfo from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.uni_message_sender import HeartFCSender from src.chat.message_receive.message import MessageSending, MessageRecv from src.chat.utils.chat_message_builder import get_raw_msg_before_timestamp_with_chat, replace_user_references_async from src.person_info.person_info import get_person_info_manager -from maim_message import Seg, UserInfo +from maim_message import Seg from src.config.config import global_config logger = get_logger("send_api") @@ -520,7 +520,7 @@ async def adapter_command_to_stream( logger.error("[SendAPI] 发送适配器命令失败") return {"status": "error", "message": "发送适配器命令失败"} - logger.debug(f"[SendAPI] 已发送适配器命令,等待响应...") + logger.debug("[SendAPI] 已发送适配器命令,等待响应...") # 等待适配器响应 response = await wait_adapter_response(message_id, timeout) diff --git a/src/plugins/built_in/Maizone/config_loader.py b/src/plugins/built_in/Maizone/config_loader.py index d3b7ba811..37d932b8a 100644 --- a/src/plugins/built_in/Maizone/config_loader.py +++ b/src/plugins/built_in/Maizone/config_loader.py @@ -5,7 +5,6 @@ MaiZone插件独立配置文件加载系统 它支持TOML格式的配置文件,具有配置验证、默认值处理、类型转换等功能。 """ -import os import toml import shutil import datetime @@ -165,8 +164,8 @@ class MaiZoneConfigLoader: def _save_config_to_file(self, config_data: Dict[str, Any]): """保存配置到文件(带注释)""" - toml_content = f"# MaiZone插件配置文件\n" - toml_content += f"# 让你的麦麦发QQ空间说说、评论、点赞,支持AI配图、定时发送和自动监控功能\n" + toml_content = "# MaiZone插件配置文件\n" + toml_content += "# 让你的麦麦发QQ空间说说、评论、点赞,支持AI配图、定时发送和自动监控功能\n" toml_content += f"# 配置版本: {self.config_version}\n\n" for section_name, section_spec in self.config_specs.items(): @@ -187,7 +186,7 @@ class MaiZoneConfigLoader: if field_spec.choices: toml_content += f"# 可选值: {', '.join(map(str, field_spec.choices))}\n" if field_spec.min_value is not None or field_spec.max_value is not None: - range_str = f"# 范围: " + range_str = "# 范围: " if field_spec.min_value is not None: range_str += f"最小值 {field_spec.min_value}" if field_spec.max_value is not None: diff --git a/src/plugins/built_in/Maizone/monitor.py b/src/plugins/built_in/Maizone/monitor.py index 17f3f6674..df1c170a5 100644 --- a/src/plugins/built_in/Maizone/monitor.py +++ b/src/plugins/built_in/Maizone/monitor.py @@ -2,7 +2,7 @@ import asyncio import random import time import traceback -from typing import List, Dict, Any +from typing import Dict, Any from src.common.logger import get_logger from src.plugin_system.apis import llm_api, config_api diff --git a/src/plugins/built_in/Maizone/plugin.py b/src/plugins/built_in/Maizone/plugin.py index 1a27a39b7..c1d65e7e7 100644 --- a/src/plugins/built_in/Maizone/plugin.py +++ b/src/plugins/built_in/Maizone/plugin.py @@ -1,8 +1,7 @@ import asyncio import random import time -import traceback -from typing import List, Tuple, Type, Union, Any, Optional +from typing import List, Tuple, Type from src.common.logger import get_logger from src.plugin_system import ( @@ -88,7 +87,7 @@ class SendFeedCommand(BaseCommand): # 权限检查 if not user_id or not self.check_permission(user_id): logger.info(f"用户 {user_id} 权限不足") - await self.send_text(f"权限不足,无法使用此命令") + await self.send_text("权限不足,无法使用此命令") return False, "权限不足", True # 获取主题 @@ -791,7 +790,7 @@ class MaiZonePlugin(BasePlugin): # 初始化定时管理器 if self.config_loader.get_config("schedule.enable_schedule", False): - logger.info(f"定时任务启用状态: true") + logger.info("定时任务启用状态: true") self.schedule_manager = ScheduleManager(self) asyncio.create_task(self._start_scheduler_delayed()) diff --git a/src/plugins/built_in/Maizone/qzone_utils.py b/src/plugins/built_in/Maizone/qzone_utils.py index ae092046f..ef8a5456c 100644 --- a/src/plugins/built_in/Maizone/qzone_utils.py +++ b/src/plugins/built_in/Maizone/qzone_utils.py @@ -4,13 +4,10 @@ import os import random import time import datetime -import traceback from typing import List, Dict, Any, Optional from pathlib import Path -import httpx import requests -import asyncio import bs4 import json5 diff --git a/src/plugins/built_in/Maizone/scheduler.py b/src/plugins/built_in/Maizone/scheduler.py index cb513b9f3..266883d23 100644 --- a/src/plugins/built_in/Maizone/scheduler.py +++ b/src/plugins/built_in/Maizone/scheduler.py @@ -3,15 +3,13 @@ import datetime import time import traceback import os -import json -from typing import Dict, List, Any +from typing import Dict, Any from src.common.logger import get_logger from src.plugin_system.apis import llm_api, config_api # 导入工具模块 import sys -import os sys.path.append(os.path.dirname(__file__)) from qzone_utils import QZoneManager, get_send_history diff --git a/src/plugins/built_in/WEB_SEARCH_TOOL/plugin.py b/src/plugins/built_in/WEB_SEARCH_TOOL/plugin.py index ac4cd8cb1..8b2532dba 100644 --- a/src/plugins/built_in/WEB_SEARCH_TOOL/plugin.py +++ b/src/plugins/built_in/WEB_SEARCH_TOOL/plugin.py @@ -1,10 +1,8 @@ -import os import asyncio import functools from typing import Any, Dict, List from datetime import datetime, timedelta from exa_py import Exa -import asyncio from asyncddgs import aDDGS from src.common.logger import get_logger @@ -12,19 +10,12 @@ from typing import Tuple,Type from src.plugin_system import ( BasePlugin, register_plugin, - BaseAction, - BaseCommand, BaseTool, ComponentInfo, - ActionActivationType, ConfigField, - BaseEventHandler, llm_api, - EventType, - MaiMessages, ToolParamType ) -from src.config.config import global_config import httpx from bs4 import BeautifulSoup diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 3116e537c..8dc61841a 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -279,7 +279,7 @@ guidelines = """ [experimental] #实验性功能 enable_friend_chat = false # 是否启用好友聊天 -[video_analysis] # 视频分析配置 +[utils_video] # 视频分析配置 enable = true # 是否启用视频分析功能 analysis_mode = "batch_frames" # 分析模式:"frame_by_frame"(逐帧分析,慢但详细)、"batch_frames"(批量分析,快但可能略简单)或 "auto"(自动选择) max_frames = 8 # 最大分析帧数 diff --git a/template/model_config_template.toml b/template/model_config_template.toml index 0a5cb7329..69b275920 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -148,7 +148,7 @@ max_tokens = 800 model_list = ["qwen2.5-vl-72b"] max_tokens = 800 -[model_task_config.video_analysis] # 专用视频分析模型 +[model_task_config.utils_video] # 专用视频分析模型 model_list = ["qwen2.5-vl-72b"] temperature = 0.3 max_tokens = 1500