diff --git a/README.md b/README.md index b1c271245..ceafd8c43 100644 --- a/README.md +++ b/README.md @@ -65,11 +65,11 @@ ## 💬 讨论 -- [一群](https://qm.qq.com/q/VQ3XZrWgMs) | +- [一群](https://qm.qq.com/q/VQ3XZrWgMs) | + [四群](https://qm.qq.com/q/wGePTl1UyY) | [二群](https://qm.qq.com/q/RzmCiRtHEW) | - [五群](https://qm.qq.com/q/JxvHZnxyec) | - [三群](https://qm.qq.com/q/wlH5eT8OmQ)(已满)| - [四群](https://qm.qq.com/q/wGePTl1UyY)(已满) + [五群](https://qm.qq.com/q/JxvHZnxyec)(已满) | + [三群](https://qm.qq.com/q/wlH5eT8OmQ)(已满) ## 📚 文档 diff --git a/changelogs/changelog.md b/changelogs/changelog.md index d77deaa67..00bdf2afc 100644 --- a/changelogs/changelog.md +++ b/changelogs/changelog.md @@ -27,6 +27,11 @@ - 表达器:装饰语言风格 - 可通过插件添加和自定义HFC部件(目前只支持action定义) +**插件系统** +- 添加示例插件 +- 示例插件:禁言插件 +- 示例插件:豆包绘图插件 + **新增表达方式学习** - 自主学习群聊中的表达方式,更贴近群友 - 可自定义的学习频率和开关 @@ -45,7 +50,6 @@ **优化** - 移除日程系统,减少幻觉(将会在未来版本回归) - 移除主心流思考和LLM进入聊天判定 - - ## [0.6.3-fix-4] - 2025-5-18 diff --git a/src/chat/__init__.py b/src/chat/__init__.py index 1e859ffb7..0caa0870b 100644 --- a/src/chat/__init__.py +++ b/src/chat/__init__.py @@ -5,7 +5,7 @@ MaiBot模块系统 from src.chat.message_receive.chat_stream import chat_manager from src.chat.emoji_system.emoji_manager import emoji_manager -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager from src.chat.normal_chat.willing.willing_manager import willing_manager # 导出主要组件供外部使用 diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py index fda0a63fd..51275c9b7 100644 --- a/src/chat/emoji_system/emoji_manager.py +++ b/src/chat/emoji_system/emoji_manager.py @@ -12,11 +12,11 @@ import re # from gradio_client import file -from ...common.database.database_model import Emoji -from ...common.database.database import db as peewee_db -from ...config.config import global_config -from ..utils.utils_image import image_path_to_base64, image_manager -from ..models.utils_model import LLMRequest +from src.common.database.database_model import Emoji +from src.common.database.database import db as peewee_db +from src.config.config import global_config +from src.chat.utils.utils_image import image_path_to_base64, image_manager +from src.llm_models.utils_model import LLMRequest from src.common.logger_manager import get_logger from rich.traceback import install diff --git a/src/chat/focus_chat/expressors/default_expressor.py b/src/chat/focus_chat/expressors/default_expressor.py index 3b95b185c..2d0d1f35b 100644 --- a/src/chat/focus_chat/expressors/default_expressor.py +++ b/src/chat/focus_chat/expressors/default_expressor.py @@ -5,7 +5,7 @@ from src.chat.message_receive.message import Seg # Local import needed after mo from src.chat.message_receive.message import UserInfo from src.chat.message_receive.chat_stream import chat_manager from src.common.logger_manager import get_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move from src.chat.utils.timer_calculator import Timer # <--- Import Timer diff --git a/src/chat/focus_chat/expressors/exprssion_learner.py b/src/chat/focus_chat/expressors/exprssion_learner.py index 348de9f09..31cb5d13b 100644 --- a/src/chat/focus_chat/expressors/exprssion_learner.py +++ b/src/chat/focus_chat/expressors/exprssion_learner.py @@ -2,7 +2,7 @@ import time import random from typing import List, Dict, Optional, Any, Tuple from src.common.logger_manager import get_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_anonymous_messages from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index a22807b72..a0144294f 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -425,7 +425,10 @@ class HeartFChatting: self.all_observations = observations with Timer("回忆", cycle_timers): + logger.debug(f"{self.log_prefix} 开始回忆") running_memorys = await self.memory_activator.activate_memory(observations) + logger.debug(f"{self.log_prefix} 回忆完成") + print(running_memorys) with Timer("执行 信息处理器", cycle_timers): all_plan_info = await self._process_processors(observations, running_memorys, cycle_timers) diff --git a/src/chat/focus_chat/heartflow_message_revceiver.py b/src/chat/focus_chat/heartflow_message_revceiver.py index 0e5cc0534..57f133f7a 100644 --- a/src/chat/focus_chat/heartflow_message_revceiver.py +++ b/src/chat/focus_chat/heartflow_message_revceiver.py @@ -11,7 +11,7 @@ from ..message_receive.chat_stream import chat_manager # from ..message_receive.message_buffer import message_buffer from ..utils.timer_calculator import Timer -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager from typing import Optional, Tuple, Dict, Any logger = get_logger("chat") diff --git a/src/chat/focus_chat/heartflow_prompt_builder.py b/src/chat/focus_chat/heartflow_prompt_builder.py index b506b9066..e0be2d809 100644 --- a/src/chat/focus_chat/heartflow_prompt_builder.py +++ b/src/chat/focus_chat/heartflow_prompt_builder.py @@ -3,7 +3,7 @@ from src.common.logger_manager import get_logger from src.individuality.individuality import individuality from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager import time from typing import Optional from src.chat.utils.utils import get_recent_group_speaker diff --git a/src/chat/focus_chat/info/self_info.py b/src/chat/focus_chat/info/self_info.py index 866457956..cec3be6b6 100644 --- a/src/chat/focus_chat/info/self_info.py +++ b/src/chat/focus_chat/info/self_info.py @@ -37,4 +37,4 @@ class SelfInfo(InfoBase): Returns: str: 处理后的信息 """ - return self.get_self_info() + return self.get_self_info() or "" diff --git a/src/chat/focus_chat/info/structured_info.py b/src/chat/focus_chat/info/structured_info.py index 61269c8f2..3a55c81f2 100644 --- a/src/chat/focus_chat/info/structured_info.py +++ b/src/chat/focus_chat/info/structured_info.py @@ -67,3 +67,16 @@ class StructuredInfo: value: 要设置的属性值 """ self.data[key] = value + + def get_processed_info(self) -> str: + """获取处理后的信息 + + Returns: + str: 处理后的信息字符串 + """ + + info_str = "" + for key, value in self.data.items(): + info_str += f"信息类型:{key},信息内容:{value}\n" + + return info_str diff --git a/src/chat/focus_chat/info_processors/action_processor.py b/src/chat/focus_chat/info_processors/action_processor.py index 89970cd99..04a4dc5bd 100644 --- a/src/chat/focus_chat/info_processors/action_processor.py +++ b/src/chat/focus_chat/info_processors/action_processor.py @@ -8,7 +8,7 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.message_receive.chat_stream import ChatStream, chat_manager from typing import Dict -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import random diff --git a/src/chat/focus_chat/info_processors/chattinginfo_processor.py b/src/chat/focus_chat/info_processors/chattinginfo_processor.py index 5b46d16bb..1fcab5e44 100644 --- a/src/chat/focus_chat/info_processors/chattinginfo_processor.py +++ b/src/chat/focus_chat/info_processors/chattinginfo_processor.py @@ -9,7 +9,7 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati from src.chat.focus_chat.info.cycle_info import CycleInfo from datetime import datetime from typing import Dict -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config logger = get_logger("processor") diff --git a/src/chat/focus_chat/info_processors/mind_processor.py b/src/chat/focus_chat/info_processors/mind_processor.py index 0457ea1e8..8e911acec 100644 --- a/src/chat/focus_chat/info_processors/mind_processor.py +++ b/src/chat/focus_chat/info_processors/mind_processor.py @@ -1,6 +1,6 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.observation import Observation -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import time import traceback @@ -9,7 +9,7 @@ from src.individuality.individuality import individuality from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.utils.json_utils import safe_json_dumps from src.chat.message_receive.chat_stream import chat_manager -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager from .base_processor import BaseProcessor from src.chat.focus_chat.info.mind_info import MindInfo from typing import List, Optional diff --git a/src/chat/focus_chat/info_processors/self_processor.py b/src/chat/focus_chat/info_processors/self_processor.py index 0b65fe77c..692c5207e 100644 --- a/src/chat/focus_chat/info_processors/self_processor.py +++ b/src/chat/focus_chat/info_processors/self_processor.py @@ -1,6 +1,6 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.observation import Observation -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import time import traceback @@ -8,7 +8,7 @@ from src.common.logger_manager import get_logger from src.individuality.individuality import individuality from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.message_receive.chat_stream import chat_manager -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager from .base_processor import BaseProcessor from typing import List, Optional from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation @@ -33,12 +33,13 @@ def init_prompt(): 现在请你根据现有的信息,思考自我认同 1. 你是一个什么样的人,你和群里的人关系如何 -2. 思考有没有人提到你,或者图片与你有关 -3. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十个字以内的简短自我认同 -4. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景 +2. 你的形象是什么 +3. 思考有没有人提到你,或者图片与你有关 +4. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十几个字的简短自我认同 +5. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景 -请思考的平淡一些,简短一些,说中文,不要浮夸,平淡一些。 -请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出自我认同内容。 +输出内容平淡一些,说中文,不要浮夸,平淡一些。 +请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出自我认同内容,记得明确说明这是你的自我认同。 """ Prompt(indentify_prompt, "indentify_prompt") diff --git a/src/chat/focus_chat/info_processors/tool_processor.py b/src/chat/focus_chat/info_processors/tool_processor.py index 294f130e8..39ac8dc67 100644 --- a/src/chat/focus_chat/info_processors/tool_processor.py +++ b/src/chat/focus_chat/info_processors/tool_processor.py @@ -1,5 +1,5 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObservation -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import time from src.common.logger_manager import get_logger @@ -7,7 +7,7 @@ from src.individuality.individuality import individuality from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.tools.tool_use import ToolUser from src.chat.utils.json_utils import process_llm_tool_calls -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager from .base_processor import BaseProcessor from typing import List, Optional, Dict from src.chat.heart_flow.observation.observation import Observation diff --git a/src/chat/focus_chat/info_processors/working_memory_processor.py b/src/chat/focus_chat/info_processors/working_memory_processor.py index c79c8363d..cceb16235 100644 --- a/src/chat/focus_chat/info_processors/working_memory_processor.py +++ b/src/chat/focus_chat/info_processors/working_memory_processor.py @@ -1,6 +1,6 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.observation import Observation -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import time import traceback diff --git a/src/chat/focus_chat/memory_activator.py b/src/chat/focus_chat/memory_activator.py index 4fcd37302..0d5d63229 100644 --- a/src/chat/focus_chat/memory_activator.py +++ b/src/chat/focus_chat/memory_activator.py @@ -1,7 +1,7 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.structure_observation import StructureObservation from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.common.logger_manager import get_logger from src.chat.utils.prompt_builder import Prompt @@ -61,6 +61,8 @@ class MemoryActivator: elif isinstance(observation, HFCloopObservation): obs_info_text += observation.get_observe_info() + logger.debug(f"回忆待检索内容:obs_info_text: {obs_info_text}") + # prompt = await global_prompt_manager.format_prompt( # "memory_activator_prompt", # obs_info_text=obs_info_text, @@ -81,7 +83,7 @@ class MemoryActivator: # valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 # ) related_memory = await HippocampusManager.get_instance().get_memory_from_text( - text=obs_info_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=True + text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=True ) # logger.debug(f"获取到的记忆: {related_memory}") diff --git a/src/chat/focus_chat/planners/actions/plugin_action.py b/src/chat/focus_chat/planners/actions/plugin_action.py index 35ffd9b20..a74c4328d 100644 --- a/src/chat/focus_chat/planners/actions/plugin_action.py +++ b/src/chat/focus_chat/planners/actions/plugin_action.py @@ -1,11 +1,14 @@ import traceback from typing import Tuple, Dict, List, Any, Optional -from src.chat.focus_chat.planners.actions.base_action import BaseAction +from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action # noqa F401 from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.focus_chat.hfc_utils import create_empty_anchor_message from src.common.logger_manager import get_logger -from src.chat.person_info.person_info import person_info_manager +from src.person_info.person_info import person_info_manager from abc import abstractmethod +import os +import inspect +import toml # 导入 toml 库 logger = get_logger("plugin_action") @@ -16,12 +19,24 @@ class PluginAction(BaseAction): 封装了主程序内部依赖,提供简化的API接口给插件开发者 """ - def __init__(self, action_data: dict, reasoning: str, cycle_timers: dict, thinking_id: str, **kwargs): + action_config_file_name: Optional[str] = None # 插件可以覆盖此属性来指定配置文件名 + + def __init__( + self, + action_data: dict, + reasoning: str, + cycle_timers: dict, + thinking_id: str, + global_config: Optional[dict] = None, + **kwargs, + ): """初始化插件动作基类""" super().__init__(action_data, reasoning, cycle_timers, thinking_id) # 存储内部服务和对象引用 self._services = {} + self._global_config = global_config # 存储全局配置的只读引用 + self.config: Dict[str, Any] = {} # 用于存储插件自身的配置 # 从kwargs提取必要的内部服务 if "observations" in kwargs: @@ -32,6 +47,61 @@ class PluginAction(BaseAction): self._services["chat_stream"] = kwargs["chat_stream"] self.log_prefix = kwargs.get("log_prefix", "") + self._load_plugin_config() # 初始化时加载插件配置 + + def _load_plugin_config(self): + """ + 加载插件自身的配置文件。 + 配置文件应与插件模块在同一目录下。 + 插件可以通过覆盖 `action_config_file_name` 类属性来指定文件名。 + 如果 `action_config_file_name` 未指定,则不加载配置。 + 仅支持 TOML (.toml) 格式。 + """ + if not self.action_config_file_name: + logger.debug( + f"{self.log_prefix} 插件 {self.__class__.__name__} 未指定 action_config_file_name,不加载插件配置。" + ) + return + + try: + plugin_module_path = inspect.getfile(self.__class__) + plugin_dir = os.path.dirname(plugin_module_path) + config_file_path = os.path.join(plugin_dir, self.action_config_file_name) + + if not os.path.exists(config_file_path): + logger.warning( + f"{self.log_prefix} 插件 {self.__class__.__name__} 的配置文件 {config_file_path} 不存在。" + ) + return + + file_ext = os.path.splitext(self.action_config_file_name)[1].lower() + + if file_ext == ".toml": + with open(config_file_path, "r", encoding="utf-8") as f: + self.config = toml.load(f) or {} + logger.info(f"{self.log_prefix} 插件 {self.__class__.__name__} 的配置已从 {config_file_path} 加载。") + else: + logger.warning( + f"{self.log_prefix} 不支持的插件配置文件格式: {file_ext}。仅支持 .toml。插件配置未加载。" + ) + self.config = {} # 确保未加载时为空字典 + return + + except Exception as e: + logger.error( + f"{self.log_prefix} 加载插件 {self.__class__.__name__} 的配置文件 {self.action_config_file_name} 时出错: {e}" + ) + self.config = {} # 出错时确保 config 是一个空字典 + + def get_global_config(self, key: str, default: Any = None) -> Any: + """ + 安全地从全局配置中获取一个值。 + 插件应使用此方法读取全局配置,以保证只读和隔离性。 + """ + if self._global_config: + return self._global_config.get(key, default) + logger.debug(f"{self.log_prefix} 尝试访问全局配置项 '{key}',但全局配置未提供。") + return default async def get_user_id_by_person_name(self, person_name: str) -> Tuple[str, str]: """根据用户名获取用户ID""" diff --git a/src/chat/focus_chat/planners/planner.py b/src/chat/focus_chat/planners/planner.py index 8e1e8a0a0..5581d06f2 100644 --- a/src/chat/focus_chat/planners/planner.py +++ b/src/chat/focus_chat/planners/planner.py @@ -2,7 +2,7 @@ import json # <--- 确保导入 json import traceback from typing import List, Dict, Any, Optional from rich.traceback import install -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.focus_chat.info.info_base import InfoBase from src.chat.focus_chat.info.obs_info import ObsInfo @@ -10,6 +10,7 @@ from src.chat.focus_chat.info.cycle_info import CycleInfo from src.chat.focus_chat.info.mind_info import MindInfo from src.chat.focus_chat.info.action_info import ActionInfo from src.chat.focus_chat.info.structured_info import StructuredInfo +from src.chat.focus_chat.info.self_info import SelfInfo from src.common.logger_manager import get_logger from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.individuality.individuality import individuality @@ -22,7 +23,11 @@ install(extra_lines=3) def init_prompt(): Prompt( - """{extra_info_block} + """ +你的自我认知是: +{self_info_block} + +{extra_info_block} 你需要基于以下信息决定如何参与对话 这些信息可能会有冲突,请你整合这些信息,并选择一个最合适的action: @@ -127,6 +132,8 @@ class ActionPlanner: current_mind = info.get_current_mind() elif isinstance(info, CycleInfo): cycle_info = info.get_observe_info() + elif isinstance(info, SelfInfo): + self_info = info.get_processed_info() elif isinstance(info, StructuredInfo): _structured_info = info.get_data() elif not isinstance(info, ActionInfo): # 跳过已处理的ActionInfo @@ -148,6 +155,7 @@ class ActionPlanner: # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- prompt = await self.build_planner_prompt( + self_info_block=self_info, is_group_chat=is_group_chat, # <-- Pass HFC state chat_target_info=None, observed_messages_str=observed_messages_str, # <-- Pass local variable @@ -236,6 +244,7 @@ class ActionPlanner: async def build_planner_prompt( self, + self_info_block: str, is_group_chat: bool, # Now passed as argument chat_target_info: Optional[dict], # Now passed as argument observed_messages_str: str, @@ -301,7 +310,8 @@ class ActionPlanner: planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt") prompt = planner_prompt_template.format( - bot_name=global_config.bot.nickname, + self_info_block=self_info_block, + # bot_name=global_config.bot.nickname, prompt_personality=personality_block, chat_context_description=chat_context_description, chat_content_block=chat_content_block, diff --git a/src/chat/focus_chat/working_memory/memory_manager.py b/src/chat/focus_chat/working_memory/memory_manager.py index 7fda40239..2ee8a36de 100644 --- a/src/chat/focus_chat/working_memory/memory_manager.py +++ b/src/chat/focus_chat/working_memory/memory_manager.py @@ -3,7 +3,7 @@ import traceback from json_repair import repair_json from rich.traceback import install from src.common.logger_manager import get_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.focus_chat.working_memory.memory_item import MemoryItem import json # 添加json模块导入 diff --git a/src/chat/heart_flow/observation/chatting_observation.py b/src/chat/heart_flow/observation/chatting_observation.py index a1375f587..b43074fa0 100644 --- a/src/chat/heart_flow/observation/chatting_observation.py +++ b/src/chat/heart_flow/observation/chatting_observation.py @@ -1,5 +1,5 @@ from datetime import datetime -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import traceback from src.chat.utils.chat_message_builder import ( diff --git a/src/chat/heart_flow/observation/hfcloop_observation.py b/src/chat/heart_flow/observation/hfcloop_observation.py index 2e047f071..bd8f3f343 100644 --- a/src/chat/heart_flow/observation/hfcloop_observation.py +++ b/src/chat/heart_flow/observation/hfcloop_observation.py @@ -88,5 +88,6 @@ class HFCloopObservation: for action_name, action_info in using_actions.items(): action_description = action_info["description"] cycle_info_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n" + cycle_info_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n" self.observe_info = cycle_info_block diff --git a/src/chat/heart_flow/utils_chat.py b/src/chat/heart_flow/utils_chat.py index 68d5cb1bc..f796254c4 100644 --- a/src/chat/heart_flow/utils_chat.py +++ b/src/chat/heart_flow/utils_chat.py @@ -2,7 +2,7 @@ import asyncio from typing import Optional, Tuple, Dict from src.common.logger_manager import get_logger from src.chat.message_receive.chat_stream import chat_manager -from src.chat.person_info.person_info import person_info_manager +from src.person_info.person_info import person_info_manager logger = get_logger("heartflow_utils") diff --git a/src/chat/memory_system/Hippocampus.py b/src/chat/memory_system/Hippocampus.py index 68758e298..d7a13bfe4 100644 --- a/src/chat/memory_system/Hippocampus.py +++ b/src/chat/memory_system/Hippocampus.py @@ -11,7 +11,7 @@ import jieba import networkx as nx import numpy as np from collections import Counter -from ...chat.models.utils_model import LLMRequest +from ...llm_models.utils_model import LLMRequest from src.common.logger_manager import get_logger from src.chat.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器 from ..utils.chat_message_builder import ( @@ -338,7 +338,8 @@ class Hippocampus: # 去重 keywords = list(set(keywords)) # 限制关键词数量 - keywords = keywords[:5] + logger.debug(f"提取关键词: {keywords}") + else: # 使用LLM提取关键词 topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量 @@ -361,7 +362,7 @@ class Hippocampus: # 过滤掉不存在于记忆图中的关键词 valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G] if not valid_keywords: - # logger.info("没有找到有效的关键词节点") + logger.info("没有找到有效的关键词节点") return [] logger.debug(f"有效的关键词: {', '.join(valid_keywords)}") diff --git a/src/chat/message_receive/__init__.py b/src/chat/message_receive/__init__.py index 39a1f2637..ba091bcb8 100644 --- a/src/chat/message_receive/__init__.py +++ b/src/chat/message_receive/__init__.py @@ -1,8 +1,8 @@ -from ..emoji_system.emoji_manager import emoji_manager -from ..person_info.relationship_manager import relationship_manager -from .chat_stream import chat_manager -from .message_sender import message_manager -from .storage import MessageStorage +from src.chat.emoji_system.emoji_manager import emoji_manager +from src.person_info.relationship_manager import relationship_manager +from src.chat.message_receive.chat_stream import chat_manager +from src.chat.message_receive.message_sender import message_manager +from src.chat.message_receive.storage import MessageStorage __all__ = [ diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index 9fe663b53..3264ab5bd 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -11,7 +11,7 @@ from src.common.logger_manager import get_logger from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from src.manager.mood_manager import mood_manager from src.chat.message_receive.chat_stream import ChatStream, chat_manager -from src.chat.person_info.relationship_manager import relationship_manager +from src.person_info.relationship_manager import relationship_manager from src.chat.utils.info_catcher import info_catcher_manager from src.chat.utils.timer_calculator import Timer from src.chat.utils.prompt_builder import global_prompt_manager diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py index 631f7baa5..efa1ec54f 100644 --- a/src/chat/normal_chat/normal_chat_generator.py +++ b/src/chat/normal_chat/normal_chat_generator.py @@ -1,8 +1,8 @@ from typing import List, Optional, Tuple, Union import random -from ..models.utils_model import LLMRequest -from ...config.config import global_config -from ..message_receive.message import MessageThinking +from src.llm_models.utils_model import LLMRequest +from src.config.config import global_config +from src.chat.message_receive.message import MessageThinking from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder from src.chat.utils.utils import process_llm_response from src.chat.utils.timer_calculator import Timer diff --git a/src/chat/normal_chat/willing/willing_manager.py b/src/chat/normal_chat/willing/willing_manager.py index bbc5dcc0a..4080ae8e8 100644 --- a/src/chat/normal_chat/willing/willing_manager.py +++ b/src/chat/normal_chat/willing/willing_manager.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from src.config.config import global_config from src.chat.message_receive.chat_stream import ChatStream, GroupInfo from src.chat.message_receive.message import MessageRecv -from src.chat.person_info.person_info import person_info_manager, PersonInfoManager +from src.person_info.person_info import person_info_manager, PersonInfoManager from abc import ABC, abstractmethod import importlib from typing import Dict, Optional diff --git a/src/chat/utils/chat_message_builder.py b/src/chat/utils/chat_message_builder.py index d662d8c0a..4c66b7428 100644 --- a/src/chat/utils/chat_message_builder.py +++ b/src/chat/utils/chat_message_builder.py @@ -4,7 +4,7 @@ import time # 导入 time 模块以获取当前时间 import random import re from src.common.message_repository import find_messages, count_messages -from src.chat.person_info.person_info import person_info_manager +from src.person_info.person_info import person_info_manager from src.chat.utils.utils import translate_timestamp_to_human_readable diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index 25e0e6e12..19703ec40 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -10,7 +10,7 @@ from maim_message import UserInfo from src.common.logger import get_module_logger from src.manager.mood_manager import mood_manager from ..message_receive.message import MessageRecv -from ..models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from .typo_generator import ChineseTypoGenerator from ...config.config import global_config from ...common.message_repository import find_messages, count_messages diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py index c317fbbd6..ca9f00aa9 100644 --- a/src/chat/utils/utils_image.py +++ b/src/chat/utils/utils_image.py @@ -8,10 +8,10 @@ import io import numpy as np -from ...common.database.database import db -from ...common.database.database_model import Images, ImageDescriptions -from ...config.config import global_config -from ..models.utils_model import LLMRequest +from src.common.database.database import db +from src.common.database.database_model import Images, ImageDescriptions +from src.config.config import global_config +from src.llm_models.utils_model import LLMRequest from src.common.logger_manager import get_logger from rich.traceback import install diff --git a/src/config/config_base.py b/src/config/config_base.py index 92f6cf9d4..fbd3dd9d0 100644 --- a/src/config/config_base.py +++ b/src/config/config_base.py @@ -1,5 +1,5 @@ from dataclasses import dataclass, fields, MISSING -from typing import TypeVar, Type, Any, get_origin, get_args +from typing import TypeVar, Type, Any, get_origin, get_args, Literal T = TypeVar("T", bound="ConfigBase") @@ -102,6 +102,18 @@ class ConfigBase: return {cls._convert_field(k, key_type): cls._convert_field(v, value_type) for k, v in value.items()} # 处理基础类型,例如 int, str 等 + if field_origin_type is type(None) and value is None: # 处理Optional类型 + return None + + # 处理Literal类型 + if field_origin_type is Literal or get_origin(field_type) is Literal: + # 获取Literal的允许值 + allowed_values = get_args(field_type) + if value in allowed_values: + return value + else: + raise TypeError(f"Value '{value}' is not in allowed values {allowed_values} for Literal type") + if field_type is Any or isinstance(value, field_type): return value diff --git a/src/experimental/PFC/action_planner.py b/src/experimental/PFC/action_planner.py index 2726f9c8b..6ab4c2305 100644 --- a/src/experimental/PFC/action_planner.py +++ b/src/experimental/PFC/action_planner.py @@ -1,7 +1,7 @@ import time from typing import Tuple, Optional # 增加了 Optional from src.common.logger_manager import get_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.experimental.PFC.chat_observer import ChatObserver from src.experimental.PFC.pfc_utils import get_items_from_json diff --git a/src/experimental/PFC/pfc.py b/src/experimental/PFC/pfc.py index ec34e8281..d487a1aad 100644 --- a/src/experimental/PFC/pfc.py +++ b/src/experimental/PFC/pfc.py @@ -1,6 +1,6 @@ from typing import List, Tuple, TYPE_CHECKING from src.common.logger import get_module_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.experimental.PFC.chat_observer import ChatObserver from src.experimental.PFC.pfc_utils import get_items_from_json diff --git a/src/experimental/PFC/pfc_KnowledgeFetcher.py b/src/experimental/PFC/pfc_KnowledgeFetcher.py index 4c1d8c759..769d54da8 100644 --- a/src/experimental/PFC/pfc_KnowledgeFetcher.py +++ b/src/experimental/PFC/pfc_KnowledgeFetcher.py @@ -1,7 +1,7 @@ from typing import List, Tuple from src.common.logger import get_module_logger from src.chat.memory_system.Hippocampus import HippocampusManager -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.message_receive.message import Message from src.chat.knowledge.knowledge_lib import qa_manager diff --git a/src/experimental/PFC/reply_checker.py b/src/experimental/PFC/reply_checker.py index 5bca9d601..a13618797 100644 --- a/src/experimental/PFC/reply_checker.py +++ b/src/experimental/PFC/reply_checker.py @@ -1,7 +1,7 @@ import json from typing import Tuple, List, Dict, Any from src.common.logger import get_module_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.experimental.PFC.chat_observer import ChatObserver from maim_message import UserInfo diff --git a/src/experimental/PFC/reply_generator.py b/src/experimental/PFC/reply_generator.py index c2e770248..0fababc67 100644 --- a/src/experimental/PFC/reply_generator.py +++ b/src/experimental/PFC/reply_generator.py @@ -1,6 +1,6 @@ from typing import Tuple, List, Dict, Any from src.common.logger import get_module_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.experimental.PFC.chat_observer import ChatObserver from src.experimental.PFC.reply_checker import ReplyChecker diff --git a/src/individuality/expression_style.py b/src/individuality/expression_style.py index c642a86c3..30906c450 100644 --- a/src/individuality/expression_style.py +++ b/src/individuality/expression_style.py @@ -1,6 +1,6 @@ import random from src.common.logger_manager import get_logger -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from typing import List, Tuple diff --git a/src/chat/models/utils_model.py b/src/llm_models/utils_model.py similarity index 99% rename from src/chat/models/utils_model.py rename to src/llm_models/utils_model.py index f6528856d..cda51b945 100644 --- a/src/chat/models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -3,10 +3,8 @@ import json import re from datetime import datetime from typing import Tuple, Union, Dict, Any - import aiohttp from aiohttp.client import ClientResponse - from src.common.logger import get_module_logger import base64 from PIL import Image @@ -14,7 +12,7 @@ import io import os from src.common.database.database import db # 确保 db 被导入用于 create_tables from src.common.database.database_model import LLMUsage # 导入 LLMUsage 模型 -from ...config.config import global_config +from src.config.config import global_config from rich.traceback import install install(extra_lines=3) diff --git a/src/main.py b/src/main.py index fb138fd50..3b0cbf013 100644 --- a/src/main.py +++ b/src/main.py @@ -6,7 +6,7 @@ from .manager.async_task_manager import async_task_manager from .chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask from .manager.mood_manager import MoodPrintTask, MoodUpdateTask from .chat.emoji_system.emoji_manager import emoji_manager -from .chat.person_info.person_info import person_info_manager +from .person_info.person_info import person_info_manager from .chat.normal_chat.willing.willing_manager import willing_manager from .chat.message_receive.chat_stream import chat_manager from src.chat.heart_flow.heartflow import heartflow diff --git a/src/chat/person_info/person_info.py b/src/person_info/person_info.py similarity index 99% rename from src/chat/person_info/person_info.py rename to src/person_info/person_info.py index de120c6a4..788781291 100644 --- a/src/chat/person_info/person_info.py +++ b/src/person_info/person_info.py @@ -1,13 +1,13 @@ from src.common.logger_manager import get_logger -from ...common.database.database import db -from ...common.database.database_model import PersonInfo # 新增导入 +from src.common.database.database import db +from src.common.database.database_model import PersonInfo # 新增导入 import copy import hashlib from typing import Any, Callable, Dict import datetime import asyncio import numpy as np -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config from src.individuality.individuality import individuality diff --git a/src/chat/person_info/relationship_manager.py b/src/person_info/relationship_manager.py similarity index 98% rename from src/chat/person_info/relationship_manager.py rename to src/person_info/relationship_manager.py index a23780c0e..5388ac622 100644 --- a/src/chat/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -1,13 +1,13 @@ from src.common.logger_manager import get_logger -from ..message_receive.chat_stream import ChatStream +from src.chat.message_receive.chat_stream import ChatStream import math from bson.decimal128 import Decimal128 -from .person_info import person_info_manager +from src.person_info.person_info import person_info_manager import time import random from maim_message import UserInfo -from ...manager.mood_manager import mood_manager +from src.manager.mood_manager import mood_manager # import re # import traceback diff --git a/src/plugins/test_plugin/actions/mute_action.py b/src/plugins/test_plugin/actions/mute_action.py index 89fa387f5..d0f947c49 100644 --- a/src/plugins/test_plugin/actions/mute_action.py +++ b/src/plugins/test_plugin/actions/mute_action.py @@ -26,7 +26,7 @@ class MuteAction(PluginAction): "当你想回避某个话题时使用", ] default = True # 不是默认动作,需要手动添加到使用集 - associated_types = ["command",'text'] + associated_types = ["command", "text"] async def process(self) -> Tuple[bool, str]: """处理测试动作""" diff --git a/src/plugins/test_plugin_pic/__init__.py b/src/plugins/test_plugin_pic/__init__.py new file mode 100644 index 000000000..5242f1408 --- /dev/null +++ b/src/plugins/test_plugin_pic/__init__.py @@ -0,0 +1,5 @@ +"""测试插件包:图片发送""" + +""" +这是一个测试插件,用于测试图片发送功能 +""" diff --git a/src/plugins/test_plugin_pic/actions/__init__.py b/src/plugins/test_plugin_pic/actions/__init__.py new file mode 100644 index 000000000..249d25223 --- /dev/null +++ b/src/plugins/test_plugin_pic/actions/__init__.py @@ -0,0 +1,4 @@ +"""测试插件动作模块""" + +# 导入所有动作模块以确保装饰器被执行 +from . import pic_action # noqa diff --git a/src/plugins/test_plugin_pic/actions/generate_pic_config.py b/src/plugins/test_plugin_pic/actions/generate_pic_config.py new file mode 100644 index 000000000..207e0c539 --- /dev/null +++ b/src/plugins/test_plugin_pic/actions/generate_pic_config.py @@ -0,0 +1,50 @@ +import os + +CONFIG_CONTENT = """\ +# 请替换为您的火山引擎 Access Key ID +volcano_ak = "YOUR_VOLCANO_ENGINE_ACCESS_KEY_ID_HERE" +# 请替换为您的火山引擎 Secret Access Key +volcano_sk = "YOUR_VOLCANO_ENGINE_SECRET_ACCESS_KEY_HERE" +# 火山方舟 API 的基础 URL +base_url = "https://ark.cn-beijing.volces.com/api/v3" +# 默认图片生成模型 +default_model = "doubao-seedream-3-0-t2i-250415" +# 默认图片尺寸 +default_size = "1024x1024" +# 用于图片生成的API密钥 +# PicAction 当前配置为在HTTP请求体和Authorization头中使用此密钥。 +# 如果您的API认证方式不同,请相应调整或移除。 +volcano_generate_api_key = "YOUR_VOLCANO_GENERATE_API_KEY_HERE" + +# 是否默认开启水印 +default_watermark = true +# 默认引导强度 +default_guidance_scale = 2.5 +# 默认随机种子 +default_seed = 42 + +# 更多插件特定配置可以在此添加... +# custom_parameter = "some_value" +""" + + +def generate_config(): + # 获取当前脚本所在的目录 + current_dir = os.path.dirname(os.path.abspath(__file__)) + config_file_path = os.path.join(current_dir, "pic_action_config.toml") + + if not os.path.exists(config_file_path): + try: + with open(config_file_path, "w", encoding="utf-8") as f: + f.write(CONFIG_CONTENT) + print(f"配置文件已生成: {config_file_path}") + print("请记得编辑该文件,填入您的火山引擎 AK/SK 和 API 密钥。") + except IOError as e: + print(f"错误:无法写入配置文件 {config_file_path}。原因: {e}") + else: + print(f"配置文件已存在: {config_file_path}") + print("未进行任何更改。如果您想重新生成,请先删除或重命名现有文件。") + + +if __name__ == "__main__": + generate_config() diff --git a/src/plugins/test_plugin_pic/actions/pic_action.py b/src/plugins/test_plugin_pic/actions/pic_action.py new file mode 100644 index 000000000..0a965e872 --- /dev/null +++ b/src/plugins/test_plugin_pic/actions/pic_action.py @@ -0,0 +1,264 @@ +import asyncio +import json +import urllib.request +import urllib.error +import base64 # 新增:用于Base64编码 +import traceback # 新增:用于打印堆栈跟踪 +from typing import Tuple +from src.chat.focus_chat.planners.actions.plugin_action import PluginAction, register_action +from src.common.logger_manager import get_logger +from .generate_pic_config import generate_config + +logger = get_logger("pic_action") + +# 当此模块被加载时,尝试生成配置文件(如果它不存在) +# 注意:在某些插件加载机制下,这可能会在每次机器人启动或插件重载时执行 +# 考虑是否需要更复杂的逻辑来决定何时运行 (例如,仅在首次安装时) +generate_config() + + +@register_action +class PicAction(PluginAction): + """根据描述使用火山引擎HTTP API生成图片的动作处理类""" + + action_name = "pic_action" + action_description = "可以根据特定的描述,使用火山引擎模型生成并发送一张图片 (通过HTTP API)" + action_parameters = { + "description": "图片描述,输入你想要生成并发送的图片的描述,必填", + "size": "图片尺寸,例如 '1024x1024' (可选, 默认从配置或 '1024x1024')", + } + action_require = [ + "当有人要求你生成并发送一张图片时使用", + "当有人让你画一张图时使用", + ] + default = False + action_config_file_name = "pic_action_config.toml" + + def __init__( + self, + action_data: dict, + reasoning: str, + cycle_timers: dict, + thinking_id: str, + global_config: dict = None, + **kwargs, + ): + super().__init__(action_data, reasoning, cycle_timers, thinking_id, global_config, **kwargs) + + http_base_url = self.config.get("base_url") + http_api_key = self.config.get("volcano_generate_api_key") + + if not (http_base_url and http_api_key): + logger.error( + f"{self.log_prefix} PicAction初始化, 但HTTP配置 (base_url 或 volcano_generate_api_key) 缺失. HTTP图片生成将失败." + ) + else: + logger.info(f"{self.log_prefix} HTTP方式初始化完成. Base URL: {http_base_url}, API Key已配置.") + + # _restore_env_vars 方法不再需要,已移除 + + async def process(self) -> Tuple[bool, str]: + """处理图片生成动作(通过HTTP API)""" + logger.info(f"{self.log_prefix} 执行 pic_action (HTTP): {self.reasoning}") + + http_base_url = self.config.get("base_url") + http_api_key = self.config.get("volcano_generate_api_key") + + if not (http_base_url and http_api_key): + error_msg = "抱歉,图片生成功能所需的HTTP配置(如API地址或密钥)不完整,无法提供服务。" + await self.send_message_by_expressor(error_msg) + logger.error(f"{self.log_prefix} HTTP调用配置缺失: base_url 或 volcano_generate_api_key.") + return False, "HTTP配置不完整" + + description = self.action_data.get("description") + if not description: + logger.warning(f"{self.log_prefix} 图片描述为空,无法生成图片。") + await self.send_message_by_expressor("你需要告诉我想要画什么样的图片哦~") + return False, "图片描述为空" + + default_model = self.config.get("default_model", "doubao-seedream-3-0-t2i-250415") + image_size = self.action_data.get("size", self.config.get("default_size", "1024x1024")) + + # guidance_scale 现在完全由配置文件控制 + guidance_scale_input = self.config.get("default_guidance_scale", 2.5) # 默认2.5 + guidance_scale_val = 2.5 # Fallback default + try: + guidance_scale_val = float(guidance_scale_input) + except (ValueError, TypeError): + logger.warning( + f"{self.log_prefix} 配置文件中的 default_guidance_scale 值 '{guidance_scale_input}' 无效 (应为浮点数),使用默认值 2.5。" + ) + guidance_scale_val = 2.5 + + # Seed parameter - ensure it's always an integer + seed_config_value = self.config.get("default_seed") + seed_val = 42 # Default seed if not configured or invalid + if seed_config_value is not None: + try: + seed_val = int(seed_config_value) + except (ValueError, TypeError): + logger.warning( + f"{self.log_prefix} 配置文件中的 default_seed ('{seed_config_value}') 无效,将使用默认种子 42。" + ) + # seed_val is already 42 + else: + logger.info( + f"{self.log_prefix} 未在配置中找到 default_seed,将使用默认种子 42。建议在配置文件中添加 default_seed。" + ) + # seed_val is already 42 + + # Watermark 现在完全由配置文件控制 + effective_watermark_source = self.config.get("default_watermark", True) # 默认True + if isinstance(effective_watermark_source, bool): + watermark_val = effective_watermark_source + elif isinstance(effective_watermark_source, str): + watermark_val = effective_watermark_source.lower() == "true" + else: + logger.warning( + f"{self.log_prefix} 配置文件中的 default_watermark 值 '{effective_watermark_source}' 无效 (应为布尔值或 'true'/'false'),使用默认值 True。" + ) + watermark_val = True + + await self.send_message_by_expressor( + f"收到!正在为您生成关于 '{description}' 的图片,请稍候...(模型: {default_model}, 尺寸: {image_size})" + ) + + try: + success, result = await asyncio.to_thread( + self._make_http_image_request, + prompt=description, + model=default_model, + size=image_size, + seed=seed_val, + guidance_scale=guidance_scale_val, + watermark=watermark_val, + ) + except Exception as e: + logger.error(f"{self.log_prefix} (HTTP) 异步请求执行失败: {e!r}", exc_info=True) + traceback.print_exc() + success = False + result = f"图片生成服务遇到意外问题: {str(e)[:100]}" + + if success: + image_url = result + logger.info(f"{self.log_prefix} 图片URL获取成功: {image_url[:70]}... 下载并编码.") + + try: + encode_success, encode_result = await asyncio.to_thread(self._download_and_encode_base64, image_url) + except Exception as e: + logger.error(f"{self.log_prefix} (B64) 异步下载/编码失败: {e!r}", exc_info=True) + traceback.print_exc() + encode_success = False + encode_result = f"图片下载或编码时发生内部错误: {str(e)[:100]}" + + if encode_success: + base64_image_string = encode_result + send_success = await self.send_message(type="emoji", data=base64_image_string) + if send_success: + await self.send_message_by_expressor("图片表情已发送!") + return True, "图片表情已发送" + else: + await self.send_message_by_expressor("图片已处理为Base64,但作为表情发送失败了。") + return False, "图片表情发送失败 (Base64)" + else: + await self.send_message_by_expressor(f"获取到图片URL,但在处理图片时失败了:{encode_result}") + return False, f"图片处理失败(Base64): {encode_result}" + else: + error_message = result + await self.send_message_by_expressor(f"哎呀,生成图片时遇到问题:{error_message}") + return False, f"图片生成失败: {error_message}" + + def _download_and_encode_base64(self, image_url: str) -> Tuple[bool, str]: + """下载图片并将其编码为Base64字符串""" + logger.info(f"{self.log_prefix} (B64) 下载并编码图片: {image_url[:70]}...") + try: + with urllib.request.urlopen(image_url, timeout=30) as response: + if response.status == 200: + image_bytes = response.read() + base64_encoded_image = base64.b64encode(image_bytes).decode("utf-8") + logger.info(f"{self.log_prefix} (B64) 图片下载编码完成. Base64长度: {len(base64_encoded_image)}") + return True, base64_encoded_image + else: + error_msg = f"下载图片失败 (状态: {response.status})" + logger.error(f"{self.log_prefix} (B64) {error_msg} URL: {image_url}") + return False, error_msg + except Exception as e: # Catches all exceptions from urlopen, b64encode, etc. + logger.error(f"{self.log_prefix} (B64) 下载或编码时错误: {e!r}", exc_info=True) + traceback.print_exc() + return False, f"下载或编码图片时发生错误: {str(e)[:100]}" + + def _make_http_image_request( + self, prompt: str, model: str, size: str, seed: int | None, guidance_scale: float, watermark: bool + ) -> Tuple[bool, str]: + base_url = self.config.get("base_url") + generate_api_key = self.config.get("volcano_generate_api_key") + + endpoint = f"{base_url.rstrip('/')}/images/generations" + + payload_dict = { + "model": model, + "prompt": prompt, + "response_format": "url", + "size": size, + "guidance_scale": guidance_scale, + "watermark": watermark, + "seed": seed, # seed is now always an int from process() + "api-key": generate_api_key, + } + # if seed is not None: # No longer needed, seed is always an int + # payload_dict["seed"] = seed + + data = json.dumps(payload_dict).encode("utf-8") + headers = { + "Content-Type": "application/json", + "Accept": "application/json", + "Authorization": f"Bearer {generate_api_key}", + } + + logger.info(f"{self.log_prefix} (HTTP) 发起图片请求: {model}, Prompt: {prompt[:30]}... To: {endpoint}") + logger.debug( + f"{self.log_prefix} (HTTP) Request Headers: {{...Authorization: Bearer {generate_api_key[:10]}...}}" + ) + logger.debug( + f"{self.log_prefix} (HTTP) Request Body (api-key omitted): {json.dumps({k: v for k, v in payload_dict.items() if k != 'api-key'})}" + ) + + req = urllib.request.Request(endpoint, data=data, headers=headers, method="POST") + + try: + with urllib.request.urlopen(req, timeout=60) as response: + response_status = response.status + response_body_bytes = response.read() + response_body_str = response_body_bytes.decode("utf-8") + + logger.info(f"{self.log_prefix} (HTTP) 响应: {response_status}. Preview: {response_body_str[:150]}...") + + if 200 <= response_status < 300: + response_data = json.loads(response_body_str) + image_url = None + if ( + isinstance(response_data.get("data"), list) + and response_data["data"] + and isinstance(response_data["data"][0], dict) + ): + image_url = response_data["data"][0].get("url") + elif response_data.get("url"): + image_url = response_data.get("url") + + if image_url: + logger.info(f"{self.log_prefix} (HTTP) 图片生成成功,URL: {image_url[:70]}...") + return True, image_url + else: + logger.error( + f"{self.log_prefix} (HTTP) API成功但无图片URL. 响应预览: {response_body_str[:300]}..." + ) + return False, "图片生成API响应成功但未找到图片URL" + else: + logger.error( + f"{self.log_prefix} (HTTP) API请求失败. 状态: {response.status}. 正文: {response_body_str[:300]}..." + ) + return False, f"图片API请求失败(状态码 {response.status})" + except Exception as e: + logger.error(f"{self.log_prefix} (HTTP) 图片生成时意外错误: {e!r}", exc_info=True) + traceback.print_exc() + return False, f"图片生成HTTP请求时发生意外错误: {str(e)[:100]}" diff --git a/src/plugins/test_plugin_pic/actions/pic_action_config.toml b/src/plugins/test_plugin_pic/actions/pic_action_config.toml new file mode 100644 index 000000000..2d2b55a3e --- /dev/null +++ b/src/plugins/test_plugin_pic/actions/pic_action_config.toml @@ -0,0 +1,24 @@ +# 请替换为您的火山引擎 Access Key ID +volcano_ak = "YOUR_VOLCANO_ENGINE_ACCESS_KEY_ID_HERE" +# 请替换为您的火山引擎 Secret Access Key +volcano_sk = "YOUR_VOLCANO_ENGINE_SECRET_ACCESS_KEY_HERE" +# 火山方舟 API 的基础 URL +base_url = "https://ark.cn-beijing.volces.com/api/v3" +# 默认图片生成模型 +default_model = "doubao-seedream-3-0-t2i-250415" +# 默认图片尺寸 +default_size = "1024x1024" +# 用于图片生成的API密钥 +# PicAction 当前配置为在HTTP请求体和Authorization头中使用此密钥。 +# 如果您的API认证方式不同,请相应调整或移除。 +volcano_generate_api_key = "YOUR_VOLCANO_GENERATE_API_KEY_HERE" + +# 是否默认开启水印 +default_watermark = true +# 默认引导强度 +default_guidance_scale = 2.5 +# 默认随机种子 +default_seed = 42 + +# 更多插件特定配置可以在此添加... +# custom_parameter = "some_value" diff --git a/src/tools/tool_can_use/rename_person_tool.py b/src/tools/tool_can_use/rename_person_tool.py index c9914a4e4..e7f07a84e 100644 --- a/src/tools/tool_can_use/rename_person_tool.py +++ b/src/tools/tool_can_use/rename_person_tool.py @@ -1,5 +1,5 @@ from src.tools.tool_can_use.base_tool import BaseTool, register_tool -from src.chat.person_info.person_info import person_info_manager +from src.person_info.person_info import person_info_manager from src.common.logger_manager import get_logger import time diff --git a/src/tools/tool_use.py b/src/tools/tool_use.py index 9b62aa0e5..8ddc747da 100644 --- a/src/tools/tool_use.py +++ b/src/tools/tool_use.py @@ -1,4 +1,4 @@ -from src.chat.models.utils_model import LLMRequest +from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import json from src.common.logger_manager import get_logger diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index f3822729e..ef6bbfa5c 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "2.3.0" +version = "2.4.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更