Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into dev
This commit is contained in:
@@ -65,11 +65,11 @@
|
|||||||
|
|
||||||
## 💬 讨论
|
## 💬 讨论
|
||||||
|
|
||||||
- [一群](https://qm.qq.com/q/VQ3XZrWgMs) |
|
- [一群](https://qm.qq.com/q/VQ3XZrWgMs) |
|
||||||
|
[四群](https://qm.qq.com/q/wGePTl1UyY) |
|
||||||
[二群](https://qm.qq.com/q/RzmCiRtHEW) |
|
[二群](https://qm.qq.com/q/RzmCiRtHEW) |
|
||||||
[五群](https://qm.qq.com/q/JxvHZnxyec) |
|
[五群](https://qm.qq.com/q/JxvHZnxyec)(已满) |
|
||||||
[三群](https://qm.qq.com/q/wlH5eT8OmQ)(已满)|
|
[三群](https://qm.qq.com/q/wlH5eT8OmQ)(已满)
|
||||||
[四群](https://qm.qq.com/q/wGePTl1UyY)(已满)
|
|
||||||
|
|
||||||
## 📚 文档
|
## 📚 文档
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,11 @@
|
|||||||
- 表达器:装饰语言风格
|
- 表达器:装饰语言风格
|
||||||
- 可通过插件添加和自定义HFC部件(目前只支持action定义)
|
- 可通过插件添加和自定义HFC部件(目前只支持action定义)
|
||||||
|
|
||||||
|
**插件系统**
|
||||||
|
- 添加示例插件
|
||||||
|
- 示例插件:禁言插件
|
||||||
|
- 示例插件:豆包绘图插件
|
||||||
|
|
||||||
**新增表达方式学习**
|
**新增表达方式学习**
|
||||||
- 自主学习群聊中的表达方式,更贴近群友
|
- 自主学习群聊中的表达方式,更贴近群友
|
||||||
- 可自定义的学习频率和开关
|
- 可自定义的学习频率和开关
|
||||||
@@ -45,7 +50,6 @@
|
|||||||
**优化**
|
**优化**
|
||||||
- 移除日程系统,减少幻觉(将会在未来版本回归)
|
- 移除日程系统,减少幻觉(将会在未来版本回归)
|
||||||
- 移除主心流思考和LLM进入聊天判定
|
- 移除主心流思考和LLM进入聊天判定
|
||||||
-
|
|
||||||
|
|
||||||
|
|
||||||
## [0.6.3-fix-4] - 2025-5-18
|
## [0.6.3-fix-4] - 2025-5-18
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ MaiBot模块系统
|
|||||||
|
|
||||||
from src.chat.message_receive.chat_stream import chat_manager
|
from src.chat.message_receive.chat_stream import chat_manager
|
||||||
from src.chat.emoji_system.emoji_manager import emoji_manager
|
from src.chat.emoji_system.emoji_manager import emoji_manager
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from src.chat.normal_chat.willing.willing_manager import willing_manager
|
from src.chat.normal_chat.willing.willing_manager import willing_manager
|
||||||
|
|
||||||
# 导出主要组件供外部使用
|
# 导出主要组件供外部使用
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ import re
|
|||||||
|
|
||||||
# from gradio_client import file
|
# from gradio_client import file
|
||||||
|
|
||||||
from ...common.database.database_model import Emoji
|
from src.common.database.database_model import Emoji
|
||||||
from ...common.database.database import db as peewee_db
|
from src.common.database.database import db as peewee_db
|
||||||
from ...config.config import global_config
|
from src.config.config import global_config
|
||||||
from ..utils.utils_image import image_path_to_base64, image_manager
|
from src.chat.utils.utils_image import image_path_to_base64, image_manager
|
||||||
from ..models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from rich.traceback import install
|
from rich.traceback import install
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ from src.chat.message_receive.message import Seg # Local import needed after mo
|
|||||||
from src.chat.message_receive.message import UserInfo
|
from src.chat.message_receive.message import UserInfo
|
||||||
from src.chat.message_receive.chat_stream import chat_manager
|
from src.chat.message_receive.chat_stream import chat_manager
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
|
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
|
||||||
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
|
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import time
|
|||||||
import random
|
import random
|
||||||
from typing import List, Dict, Optional, Any, Tuple
|
from typing import List, Dict, Optional, Any, Tuple
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_anonymous_messages
|
from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_anonymous_messages
|
||||||
from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager
|
from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager
|
||||||
|
|||||||
@@ -425,7 +425,10 @@ class HeartFChatting:
|
|||||||
self.all_observations = observations
|
self.all_observations = observations
|
||||||
|
|
||||||
with Timer("回忆", cycle_timers):
|
with Timer("回忆", cycle_timers):
|
||||||
|
logger.debug(f"{self.log_prefix} 开始回忆")
|
||||||
running_memorys = await self.memory_activator.activate_memory(observations)
|
running_memorys = await self.memory_activator.activate_memory(observations)
|
||||||
|
logger.debug(f"{self.log_prefix} 回忆完成")
|
||||||
|
print(running_memorys)
|
||||||
|
|
||||||
with Timer("执行 信息处理器", cycle_timers):
|
with Timer("执行 信息处理器", cycle_timers):
|
||||||
all_plan_info = await self._process_processors(observations, running_memorys, cycle_timers)
|
all_plan_info = await self._process_processors(observations, running_memorys, cycle_timers)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from ..message_receive.chat_stream import chat_manager
|
|||||||
|
|
||||||
# from ..message_receive.message_buffer import message_buffer
|
# from ..message_receive.message_buffer import message_buffer
|
||||||
from ..utils.timer_calculator import Timer
|
from ..utils.timer_calculator import Timer
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from typing import Optional, Tuple, Dict, Any
|
from typing import Optional, Tuple, Dict, Any
|
||||||
|
|
||||||
logger = get_logger("chat")
|
logger = get_logger("chat")
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from src.common.logger_manager import get_logger
|
|||||||
from src.individuality.individuality import individuality
|
from src.individuality.individuality import individuality
|
||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
import time
|
import time
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from src.chat.utils.utils import get_recent_group_speaker
|
from src.chat.utils.utils import get_recent_group_speaker
|
||||||
|
|||||||
@@ -37,4 +37,4 @@ class SelfInfo(InfoBase):
|
|||||||
Returns:
|
Returns:
|
||||||
str: 处理后的信息
|
str: 处理后的信息
|
||||||
"""
|
"""
|
||||||
return self.get_self_info()
|
return self.get_self_info() or ""
|
||||||
|
|||||||
@@ -67,3 +67,16 @@ class StructuredInfo:
|
|||||||
value: 要设置的属性值
|
value: 要设置的属性值
|
||||||
"""
|
"""
|
||||||
self.data[key] = value
|
self.data[key] = value
|
||||||
|
|
||||||
|
def get_processed_info(self) -> str:
|
||||||
|
"""获取处理后的信息
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: 处理后的信息字符串
|
||||||
|
"""
|
||||||
|
|
||||||
|
info_str = ""
|
||||||
|
for key, value in self.data.items():
|
||||||
|
info_str += f"信息类型:{key},信息内容:{value}\n"
|
||||||
|
|
||||||
|
return info_str
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.message_receive.chat_stream import ChatStream, chat_manager
|
from src.chat.message_receive.chat_stream import ChatStream, chat_manager
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import random
|
import random
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservati
|
|||||||
from src.chat.focus_chat.info.cycle_info import CycleInfo
|
from src.chat.focus_chat.info.cycle_info import CycleInfo
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
|
|
||||||
logger = get_logger("processor")
|
logger = get_logger("processor")
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.heart_flow.observation.observation import Observation
|
from src.chat.heart_flow.observation.observation import Observation
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
@@ -9,7 +9,7 @@ from src.individuality.individuality import individuality
|
|||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||||
from src.chat.utils.json_utils import safe_json_dumps
|
from src.chat.utils.json_utils import safe_json_dumps
|
||||||
from src.chat.message_receive.chat_stream import chat_manager
|
from src.chat.message_receive.chat_stream import chat_manager
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from .base_processor import BaseProcessor
|
from .base_processor import BaseProcessor
|
||||||
from src.chat.focus_chat.info.mind_info import MindInfo
|
from src.chat.focus_chat.info.mind_info import MindInfo
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.heart_flow.observation.observation import Observation
|
from src.chat.heart_flow.observation.observation import Observation
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
@@ -8,7 +8,7 @@ from src.common.logger_manager import get_logger
|
|||||||
from src.individuality.individuality import individuality
|
from src.individuality.individuality import individuality
|
||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||||
from src.chat.message_receive.chat_stream import chat_manager
|
from src.chat.message_receive.chat_stream import chat_manager
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from .base_processor import BaseProcessor
|
from .base_processor import BaseProcessor
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||||
@@ -33,12 +33,13 @@ def init_prompt():
|
|||||||
|
|
||||||
现在请你根据现有的信息,思考自我认同
|
现在请你根据现有的信息,思考自我认同
|
||||||
1. 你是一个什么样的人,你和群里的人关系如何
|
1. 你是一个什么样的人,你和群里的人关系如何
|
||||||
2. 思考有没有人提到你,或者图片与你有关
|
2. 你的形象是什么
|
||||||
3. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十个字以内的简短自我认同
|
3. 思考有没有人提到你,或者图片与你有关
|
||||||
4. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景
|
4. 你的自我认同是否有助于你的回答,如果你需要自我相关的信息来帮你参与聊天,请输出,否则请输出十几个字的简短自我认同
|
||||||
|
5. 一般情况下不用输出自我认同,只需要输出十几个字的简短自我认同就好,除非有明显需要自我认同的场景
|
||||||
|
|
||||||
请思考的平淡一些,简短一些,说中文,不要浮夸,平淡一些。
|
输出内容平淡一些,说中文,不要浮夸,平淡一些。
|
||||||
请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出自我认同内容。
|
请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出自我认同内容,记得明确说明这是你的自我认同。
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Prompt(indentify_prompt, "indentify_prompt")
|
Prompt(indentify_prompt, "indentify_prompt")
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import time
|
import time
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
@@ -7,7 +7,7 @@ from src.individuality.individuality import individuality
|
|||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||||
from src.tools.tool_use import ToolUser
|
from src.tools.tool_use import ToolUser
|
||||||
from src.chat.utils.json_utils import process_llm_tool_calls
|
from src.chat.utils.json_utils import process_llm_tool_calls
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from .base_processor import BaseProcessor
|
from .base_processor import BaseProcessor
|
||||||
from typing import List, Optional, Dict
|
from typing import List, Optional, Dict
|
||||||
from src.chat.heart_flow.observation.observation import Observation
|
from src.chat.heart_flow.observation.observation import Observation
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.heart_flow.observation.observation import Observation
|
from src.chat.heart_flow.observation.observation import Observation
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
||||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.utils.prompt_builder import Prompt
|
from src.chat.utils.prompt_builder import Prompt
|
||||||
@@ -61,6 +61,8 @@ class MemoryActivator:
|
|||||||
elif isinstance(observation, HFCloopObservation):
|
elif isinstance(observation, HFCloopObservation):
|
||||||
obs_info_text += observation.get_observe_info()
|
obs_info_text += observation.get_observe_info()
|
||||||
|
|
||||||
|
logger.debug(f"回忆待检索内容:obs_info_text: {obs_info_text}")
|
||||||
|
|
||||||
# prompt = await global_prompt_manager.format_prompt(
|
# prompt = await global_prompt_manager.format_prompt(
|
||||||
# "memory_activator_prompt",
|
# "memory_activator_prompt",
|
||||||
# obs_info_text=obs_info_text,
|
# obs_info_text=obs_info_text,
|
||||||
@@ -81,7 +83,7 @@ class MemoryActivator:
|
|||||||
# valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
# valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||||
# )
|
# )
|
||||||
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
||||||
text=obs_info_text, max_memory_num=3, max_memory_length=2, max_depth=3, fast_retrieval=True
|
text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=True
|
||||||
)
|
)
|
||||||
|
|
||||||
# logger.debug(f"获取到的记忆: {related_memory}")
|
# logger.debug(f"获取到的记忆: {related_memory}")
|
||||||
|
|||||||
@@ -1,11 +1,14 @@
|
|||||||
import traceback
|
import traceback
|
||||||
from typing import Tuple, Dict, List, Any, Optional
|
from typing import Tuple, Dict, List, Any, Optional
|
||||||
from src.chat.focus_chat.planners.actions.base_action import BaseAction
|
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action # noqa F401
|
||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||||
from src.chat.focus_chat.hfc_utils import create_empty_anchor_message
|
from src.chat.focus_chat.hfc_utils import create_empty_anchor_message
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.person_info.person_info import person_info_manager
|
from src.person_info.person_info import person_info_manager
|
||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
|
import os
|
||||||
|
import inspect
|
||||||
|
import toml # 导入 toml 库
|
||||||
|
|
||||||
logger = get_logger("plugin_action")
|
logger = get_logger("plugin_action")
|
||||||
|
|
||||||
@@ -16,12 +19,24 @@ class PluginAction(BaseAction):
|
|||||||
封装了主程序内部依赖,提供简化的API接口给插件开发者
|
封装了主程序内部依赖,提供简化的API接口给插件开发者
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, action_data: dict, reasoning: str, cycle_timers: dict, thinking_id: str, **kwargs):
|
action_config_file_name: Optional[str] = None # 插件可以覆盖此属性来指定配置文件名
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
action_data: dict,
|
||||||
|
reasoning: str,
|
||||||
|
cycle_timers: dict,
|
||||||
|
thinking_id: str,
|
||||||
|
global_config: Optional[dict] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
"""初始化插件动作基类"""
|
"""初始化插件动作基类"""
|
||||||
super().__init__(action_data, reasoning, cycle_timers, thinking_id)
|
super().__init__(action_data, reasoning, cycle_timers, thinking_id)
|
||||||
|
|
||||||
# 存储内部服务和对象引用
|
# 存储内部服务和对象引用
|
||||||
self._services = {}
|
self._services = {}
|
||||||
|
self._global_config = global_config # 存储全局配置的只读引用
|
||||||
|
self.config: Dict[str, Any] = {} # 用于存储插件自身的配置
|
||||||
|
|
||||||
# 从kwargs提取必要的内部服务
|
# 从kwargs提取必要的内部服务
|
||||||
if "observations" in kwargs:
|
if "observations" in kwargs:
|
||||||
@@ -32,6 +47,61 @@ class PluginAction(BaseAction):
|
|||||||
self._services["chat_stream"] = kwargs["chat_stream"]
|
self._services["chat_stream"] = kwargs["chat_stream"]
|
||||||
|
|
||||||
self.log_prefix = kwargs.get("log_prefix", "")
|
self.log_prefix = kwargs.get("log_prefix", "")
|
||||||
|
self._load_plugin_config() # 初始化时加载插件配置
|
||||||
|
|
||||||
|
def _load_plugin_config(self):
|
||||||
|
"""
|
||||||
|
加载插件自身的配置文件。
|
||||||
|
配置文件应与插件模块在同一目录下。
|
||||||
|
插件可以通过覆盖 `action_config_file_name` 类属性来指定文件名。
|
||||||
|
如果 `action_config_file_name` 未指定,则不加载配置。
|
||||||
|
仅支持 TOML (.toml) 格式。
|
||||||
|
"""
|
||||||
|
if not self.action_config_file_name:
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix} 插件 {self.__class__.__name__} 未指定 action_config_file_name,不加载插件配置。"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
plugin_module_path = inspect.getfile(self.__class__)
|
||||||
|
plugin_dir = os.path.dirname(plugin_module_path)
|
||||||
|
config_file_path = os.path.join(plugin_dir, self.action_config_file_name)
|
||||||
|
|
||||||
|
if not os.path.exists(config_file_path):
|
||||||
|
logger.warning(
|
||||||
|
f"{self.log_prefix} 插件 {self.__class__.__name__} 的配置文件 {config_file_path} 不存在。"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
file_ext = os.path.splitext(self.action_config_file_name)[1].lower()
|
||||||
|
|
||||||
|
if file_ext == ".toml":
|
||||||
|
with open(config_file_path, "r", encoding="utf-8") as f:
|
||||||
|
self.config = toml.load(f) or {}
|
||||||
|
logger.info(f"{self.log_prefix} 插件 {self.__class__.__name__} 的配置已从 {config_file_path} 加载。")
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"{self.log_prefix} 不支持的插件配置文件格式: {file_ext}。仅支持 .toml。插件配置未加载。"
|
||||||
|
)
|
||||||
|
self.config = {} # 确保未加载时为空字典
|
||||||
|
return
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"{self.log_prefix} 加载插件 {self.__class__.__name__} 的配置文件 {self.action_config_file_name} 时出错: {e}"
|
||||||
|
)
|
||||||
|
self.config = {} # 出错时确保 config 是一个空字典
|
||||||
|
|
||||||
|
def get_global_config(self, key: str, default: Any = None) -> Any:
|
||||||
|
"""
|
||||||
|
安全地从全局配置中获取一个值。
|
||||||
|
插件应使用此方法读取全局配置,以保证只读和隔离性。
|
||||||
|
"""
|
||||||
|
if self._global_config:
|
||||||
|
return self._global_config.get(key, default)
|
||||||
|
logger.debug(f"{self.log_prefix} 尝试访问全局配置项 '{key}',但全局配置未提供。")
|
||||||
|
return default
|
||||||
|
|
||||||
async def get_user_id_by_person_name(self, person_name: str) -> Tuple[str, str]:
|
async def get_user_id_by_person_name(self, person_name: str) -> Tuple[str, str]:
|
||||||
"""根据用户名获取用户ID"""
|
"""根据用户名获取用户ID"""
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import json # <--- 确保导入 json
|
|||||||
import traceback
|
import traceback
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional
|
||||||
from rich.traceback import install
|
from rich.traceback import install
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.focus_chat.info.info_base import InfoBase
|
from src.chat.focus_chat.info.info_base import InfoBase
|
||||||
from src.chat.focus_chat.info.obs_info import ObsInfo
|
from src.chat.focus_chat.info.obs_info import ObsInfo
|
||||||
@@ -10,6 +10,7 @@ from src.chat.focus_chat.info.cycle_info import CycleInfo
|
|||||||
from src.chat.focus_chat.info.mind_info import MindInfo
|
from src.chat.focus_chat.info.mind_info import MindInfo
|
||||||
from src.chat.focus_chat.info.action_info import ActionInfo
|
from src.chat.focus_chat.info.action_info import ActionInfo
|
||||||
from src.chat.focus_chat.info.structured_info import StructuredInfo
|
from src.chat.focus_chat.info.structured_info import StructuredInfo
|
||||||
|
from src.chat.focus_chat.info.self_info import SelfInfo
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||||
from src.individuality.individuality import individuality
|
from src.individuality.individuality import individuality
|
||||||
@@ -22,7 +23,11 @@ install(extra_lines=3)
|
|||||||
|
|
||||||
def init_prompt():
|
def init_prompt():
|
||||||
Prompt(
|
Prompt(
|
||||||
"""{extra_info_block}
|
"""
|
||||||
|
你的自我认知是:
|
||||||
|
{self_info_block}
|
||||||
|
|
||||||
|
{extra_info_block}
|
||||||
|
|
||||||
你需要基于以下信息决定如何参与对话
|
你需要基于以下信息决定如何参与对话
|
||||||
这些信息可能会有冲突,请你整合这些信息,并选择一个最合适的action:
|
这些信息可能会有冲突,请你整合这些信息,并选择一个最合适的action:
|
||||||
@@ -127,6 +132,8 @@ class ActionPlanner:
|
|||||||
current_mind = info.get_current_mind()
|
current_mind = info.get_current_mind()
|
||||||
elif isinstance(info, CycleInfo):
|
elif isinstance(info, CycleInfo):
|
||||||
cycle_info = info.get_observe_info()
|
cycle_info = info.get_observe_info()
|
||||||
|
elif isinstance(info, SelfInfo):
|
||||||
|
self_info = info.get_processed_info()
|
||||||
elif isinstance(info, StructuredInfo):
|
elif isinstance(info, StructuredInfo):
|
||||||
_structured_info = info.get_data()
|
_structured_info = info.get_data()
|
||||||
elif not isinstance(info, ActionInfo): # 跳过已处理的ActionInfo
|
elif not isinstance(info, ActionInfo): # 跳过已处理的ActionInfo
|
||||||
@@ -148,6 +155,7 @@ class ActionPlanner:
|
|||||||
|
|
||||||
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
|
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
|
||||||
prompt = await self.build_planner_prompt(
|
prompt = await self.build_planner_prompt(
|
||||||
|
self_info_block=self_info,
|
||||||
is_group_chat=is_group_chat, # <-- Pass HFC state
|
is_group_chat=is_group_chat, # <-- Pass HFC state
|
||||||
chat_target_info=None,
|
chat_target_info=None,
|
||||||
observed_messages_str=observed_messages_str, # <-- Pass local variable
|
observed_messages_str=observed_messages_str, # <-- Pass local variable
|
||||||
@@ -236,6 +244,7 @@ class ActionPlanner:
|
|||||||
|
|
||||||
async def build_planner_prompt(
|
async def build_planner_prompt(
|
||||||
self,
|
self,
|
||||||
|
self_info_block: str,
|
||||||
is_group_chat: bool, # Now passed as argument
|
is_group_chat: bool, # Now passed as argument
|
||||||
chat_target_info: Optional[dict], # Now passed as argument
|
chat_target_info: Optional[dict], # Now passed as argument
|
||||||
observed_messages_str: str,
|
observed_messages_str: str,
|
||||||
@@ -301,7 +310,8 @@ class ActionPlanner:
|
|||||||
|
|
||||||
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
|
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
|
||||||
prompt = planner_prompt_template.format(
|
prompt = planner_prompt_template.format(
|
||||||
bot_name=global_config.bot.nickname,
|
self_info_block=self_info_block,
|
||||||
|
# bot_name=global_config.bot.nickname,
|
||||||
prompt_personality=personality_block,
|
prompt_personality=personality_block,
|
||||||
chat_context_description=chat_context_description,
|
chat_context_description=chat_context_description,
|
||||||
chat_content_block=chat_content_block,
|
chat_content_block=chat_content_block,
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import traceback
|
|||||||
from json_repair import repair_json
|
from json_repair import repair_json
|
||||||
from rich.traceback import install
|
from rich.traceback import install
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.focus_chat.working_memory.memory_item import MemoryItem
|
from src.chat.focus_chat.working_memory.memory_item import MemoryItem
|
||||||
import json # 添加json模块导入
|
import json # 添加json模块导入
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import traceback
|
import traceback
|
||||||
from src.chat.utils.chat_message_builder import (
|
from src.chat.utils.chat_message_builder import (
|
||||||
|
|||||||
@@ -88,5 +88,6 @@ class HFCloopObservation:
|
|||||||
for action_name, action_info in using_actions.items():
|
for action_name, action_info in using_actions.items():
|
||||||
action_description = action_info["description"]
|
action_description = action_info["description"]
|
||||||
cycle_info_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
|
cycle_info_block += f"\n你在聊天中可以使用{action_name},这个动作的描述是{action_description}\n"
|
||||||
|
cycle_info_block += "注意,除了上述动作选项之外,你在群聊里不能做其他任何事情,这是你能力的边界\n"
|
||||||
|
|
||||||
self.observe_info = cycle_info_block
|
self.observe_info = cycle_info_block
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import asyncio
|
|||||||
from typing import Optional, Tuple, Dict
|
from typing import Optional, Tuple, Dict
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.message_receive.chat_stream import chat_manager
|
from src.chat.message_receive.chat_stream import chat_manager
|
||||||
from src.chat.person_info.person_info import person_info_manager
|
from src.person_info.person_info import person_info_manager
|
||||||
|
|
||||||
logger = get_logger("heartflow_utils")
|
logger = get_logger("heartflow_utils")
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import jieba
|
|||||||
import networkx as nx
|
import networkx as nx
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
from ...chat.models.utils_model import LLMRequest
|
from ...llm_models.utils_model import LLMRequest
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
|
from src.chat.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
|
||||||
from ..utils.chat_message_builder import (
|
from ..utils.chat_message_builder import (
|
||||||
@@ -338,7 +338,8 @@ class Hippocampus:
|
|||||||
# 去重
|
# 去重
|
||||||
keywords = list(set(keywords))
|
keywords = list(set(keywords))
|
||||||
# 限制关键词数量
|
# 限制关键词数量
|
||||||
keywords = keywords[:5]
|
logger.debug(f"提取关键词: {keywords}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# 使用LLM提取关键词
|
# 使用LLM提取关键词
|
||||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||||
@@ -361,7 +362,7 @@ class Hippocampus:
|
|||||||
# 过滤掉不存在于记忆图中的关键词
|
# 过滤掉不存在于记忆图中的关键词
|
||||||
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
|
valid_keywords = [keyword for keyword in keywords if keyword in self.memory_graph.G]
|
||||||
if not valid_keywords:
|
if not valid_keywords:
|
||||||
# logger.info("没有找到有效的关键词节点")
|
logger.info("没有找到有效的关键词节点")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
logger.debug(f"有效的关键词: {', '.join(valid_keywords)}")
|
logger.debug(f"有效的关键词: {', '.join(valid_keywords)}")
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
from ..emoji_system.emoji_manager import emoji_manager
|
from src.chat.emoji_system.emoji_manager import emoji_manager
|
||||||
from ..person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from .chat_stream import chat_manager
|
from src.chat.message_receive.chat_stream import chat_manager
|
||||||
from .message_sender import message_manager
|
from src.chat.message_receive.message_sender import message_manager
|
||||||
from .storage import MessageStorage
|
from src.chat.message_receive.storage import MessageStorage
|
||||||
|
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ from src.common.logger_manager import get_logger
|
|||||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||||
from src.manager.mood_manager import mood_manager
|
from src.manager.mood_manager import mood_manager
|
||||||
from src.chat.message_receive.chat_stream import ChatStream, chat_manager
|
from src.chat.message_receive.chat_stream import ChatStream, chat_manager
|
||||||
from src.chat.person_info.relationship_manager import relationship_manager
|
from src.person_info.relationship_manager import relationship_manager
|
||||||
from src.chat.utils.info_catcher import info_catcher_manager
|
from src.chat.utils.info_catcher import info_catcher_manager
|
||||||
from src.chat.utils.timer_calculator import Timer
|
from src.chat.utils.timer_calculator import Timer
|
||||||
from src.chat.utils.prompt_builder import global_prompt_manager
|
from src.chat.utils.prompt_builder import global_prompt_manager
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
from typing import List, Optional, Tuple, Union
|
from typing import List, Optional, Tuple, Union
|
||||||
import random
|
import random
|
||||||
from ..models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from ...config.config import global_config
|
from src.config.config import global_config
|
||||||
from ..message_receive.message import MessageThinking
|
from src.chat.message_receive.message import MessageThinking
|
||||||
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
|
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
|
||||||
from src.chat.utils.utils import process_llm_response
|
from src.chat.utils.utils import process_llm_response
|
||||||
from src.chat.utils.timer_calculator import Timer
|
from src.chat.utils.timer_calculator import Timer
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from dataclasses import dataclass
|
|||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
|
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
|
||||||
from src.chat.message_receive.message import MessageRecv
|
from src.chat.message_receive.message import MessageRecv
|
||||||
from src.chat.person_info.person_info import person_info_manager, PersonInfoManager
|
from src.person_info.person_info import person_info_manager, PersonInfoManager
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
import importlib
|
import importlib
|
||||||
from typing import Dict, Optional
|
from typing import Dict, Optional
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import time # 导入 time 模块以获取当前时间
|
|||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
from src.common.message_repository import find_messages, count_messages
|
from src.common.message_repository import find_messages, count_messages
|
||||||
from src.chat.person_info.person_info import person_info_manager
|
from src.person_info.person_info import person_info_manager
|
||||||
from src.chat.utils.utils import translate_timestamp_to_human_readable
|
from src.chat.utils.utils import translate_timestamp_to_human_readable
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from maim_message import UserInfo
|
|||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
from src.manager.mood_manager import mood_manager
|
from src.manager.mood_manager import mood_manager
|
||||||
from ..message_receive.message import MessageRecv
|
from ..message_receive.message import MessageRecv
|
||||||
from ..models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from .typo_generator import ChineseTypoGenerator
|
from .typo_generator import ChineseTypoGenerator
|
||||||
from ...config.config import global_config
|
from ...config.config import global_config
|
||||||
from ...common.message_repository import find_messages, count_messages
|
from ...common.message_repository import find_messages, count_messages
|
||||||
|
|||||||
@@ -8,10 +8,10 @@ import io
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
from ...common.database.database import db
|
from src.common.database.database import db
|
||||||
from ...common.database.database_model import Images, ImageDescriptions
|
from src.common.database.database_model import Images, ImageDescriptions
|
||||||
from ...config.config import global_config
|
from src.config.config import global_config
|
||||||
from ..models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
|
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from rich.traceback import install
|
from rich.traceback import install
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from dataclasses import dataclass, fields, MISSING
|
from dataclasses import dataclass, fields, MISSING
|
||||||
from typing import TypeVar, Type, Any, get_origin, get_args
|
from typing import TypeVar, Type, Any, get_origin, get_args, Literal
|
||||||
|
|
||||||
T = TypeVar("T", bound="ConfigBase")
|
T = TypeVar("T", bound="ConfigBase")
|
||||||
|
|
||||||
@@ -102,6 +102,18 @@ class ConfigBase:
|
|||||||
return {cls._convert_field(k, key_type): cls._convert_field(v, value_type) for k, v in value.items()}
|
return {cls._convert_field(k, key_type): cls._convert_field(v, value_type) for k, v in value.items()}
|
||||||
|
|
||||||
# 处理基础类型,例如 int, str 等
|
# 处理基础类型,例如 int, str 等
|
||||||
|
if field_origin_type is type(None) and value is None: # 处理Optional类型
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 处理Literal类型
|
||||||
|
if field_origin_type is Literal or get_origin(field_type) is Literal:
|
||||||
|
# 获取Literal的允许值
|
||||||
|
allowed_values = get_args(field_type)
|
||||||
|
if value in allowed_values:
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
raise TypeError(f"Value '{value}' is not in allowed values {allowed_values} for Literal type")
|
||||||
|
|
||||||
if field_type is Any or isinstance(value, field_type):
|
if field_type is Any or isinstance(value, field_type):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import time
|
import time
|
||||||
from typing import Tuple, Optional # 增加了 Optional
|
from typing import Tuple, Optional # 增加了 Optional
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.experimental.PFC.chat_observer import ChatObserver
|
from src.experimental.PFC.chat_observer import ChatObserver
|
||||||
from src.experimental.PFC.pfc_utils import get_items_from_json
|
from src.experimental.PFC.pfc_utils import get_items_from_json
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import List, Tuple, TYPE_CHECKING
|
from typing import List, Tuple, TYPE_CHECKING
|
||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.experimental.PFC.chat_observer import ChatObserver
|
from src.experimental.PFC.chat_observer import ChatObserver
|
||||||
from src.experimental.PFC.pfc_utils import get_items_from_json
|
from src.experimental.PFC.pfc_utils import get_items_from_json
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
from src.chat.memory_system.Hippocampus import HippocampusManager
|
from src.chat.memory_system.Hippocampus import HippocampusManager
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.message_receive.message import Message
|
from src.chat.message_receive.message import Message
|
||||||
from src.chat.knowledge.knowledge_lib import qa_manager
|
from src.chat.knowledge.knowledge_lib import qa_manager
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import json
|
import json
|
||||||
from typing import Tuple, List, Dict, Any
|
from typing import Tuple, List, Dict, Any
|
||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.experimental.PFC.chat_observer import ChatObserver
|
from src.experimental.PFC.chat_observer import ChatObserver
|
||||||
from maim_message import UserInfo
|
from maim_message import UserInfo
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Tuple, List, Dict, Any
|
from typing import Tuple, List, Dict, Any
|
||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.experimental.PFC.chat_observer import ChatObserver
|
from src.experimental.PFC.chat_observer import ChatObserver
|
||||||
from src.experimental.PFC.reply_checker import ReplyChecker
|
from src.experimental.PFC.reply_checker import ReplyChecker
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import random
|
import random
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
|
|||||||
@@ -3,10 +3,8 @@ import json
|
|||||||
import re
|
import re
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Tuple, Union, Dict, Any
|
from typing import Tuple, Union, Dict, Any
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
from aiohttp.client import ClientResponse
|
from aiohttp.client import ClientResponse
|
||||||
|
|
||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
import base64
|
import base64
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
@@ -14,7 +12,7 @@ import io
|
|||||||
import os
|
import os
|
||||||
from src.common.database.database import db # 确保 db 被导入用于 create_tables
|
from src.common.database.database import db # 确保 db 被导入用于 create_tables
|
||||||
from src.common.database.database_model import LLMUsage # 导入 LLMUsage 模型
|
from src.common.database.database_model import LLMUsage # 导入 LLMUsage 模型
|
||||||
from ...config.config import global_config
|
from src.config.config import global_config
|
||||||
from rich.traceback import install
|
from rich.traceback import install
|
||||||
|
|
||||||
install(extra_lines=3)
|
install(extra_lines=3)
|
||||||
@@ -6,7 +6,7 @@ from .manager.async_task_manager import async_task_manager
|
|||||||
from .chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
|
from .chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
|
||||||
from .manager.mood_manager import MoodPrintTask, MoodUpdateTask
|
from .manager.mood_manager import MoodPrintTask, MoodUpdateTask
|
||||||
from .chat.emoji_system.emoji_manager import emoji_manager
|
from .chat.emoji_system.emoji_manager import emoji_manager
|
||||||
from .chat.person_info.person_info import person_info_manager
|
from .person_info.person_info import person_info_manager
|
||||||
from .chat.normal_chat.willing.willing_manager import willing_manager
|
from .chat.normal_chat.willing.willing_manager import willing_manager
|
||||||
from .chat.message_receive.chat_stream import chat_manager
|
from .chat.message_receive.chat_stream import chat_manager
|
||||||
from src.chat.heart_flow.heartflow import heartflow
|
from src.chat.heart_flow.heartflow import heartflow
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from ...common.database.database import db
|
from src.common.database.database import db
|
||||||
from ...common.database.database_model import PersonInfo # 新增导入
|
from src.common.database.database_model import PersonInfo # 新增导入
|
||||||
import copy
|
import copy
|
||||||
import hashlib
|
import hashlib
|
||||||
from typing import Any, Callable, Dict
|
from typing import Any, Callable, Dict
|
||||||
import datetime
|
import datetime
|
||||||
import asyncio
|
import asyncio
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.individuality.individuality import individuality
|
from src.individuality.individuality import individuality
|
||||||
|
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from ..message_receive.chat_stream import ChatStream
|
from src.chat.message_receive.chat_stream import ChatStream
|
||||||
import math
|
import math
|
||||||
from bson.decimal128 import Decimal128
|
from bson.decimal128 import Decimal128
|
||||||
from .person_info import person_info_manager
|
from src.person_info.person_info import person_info_manager
|
||||||
import time
|
import time
|
||||||
import random
|
import random
|
||||||
from maim_message import UserInfo
|
from maim_message import UserInfo
|
||||||
|
|
||||||
from ...manager.mood_manager import mood_manager
|
from src.manager.mood_manager import mood_manager
|
||||||
|
|
||||||
# import re
|
# import re
|
||||||
# import traceback
|
# import traceback
|
||||||
@@ -26,7 +26,7 @@ class MuteAction(PluginAction):
|
|||||||
"当你想回避某个话题时使用",
|
"当你想回避某个话题时使用",
|
||||||
]
|
]
|
||||||
default = True # 不是默认动作,需要手动添加到使用集
|
default = True # 不是默认动作,需要手动添加到使用集
|
||||||
associated_types = ["command",'text']
|
associated_types = ["command", "text"]
|
||||||
|
|
||||||
async def process(self) -> Tuple[bool, str]:
|
async def process(self) -> Tuple[bool, str]:
|
||||||
"""处理测试动作"""
|
"""处理测试动作"""
|
||||||
|
|||||||
5
src/plugins/test_plugin_pic/__init__.py
Normal file
5
src/plugins/test_plugin_pic/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
"""测试插件包:图片发送"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
这是一个测试插件,用于测试图片发送功能
|
||||||
|
"""
|
||||||
4
src/plugins/test_plugin_pic/actions/__init__.py
Normal file
4
src/plugins/test_plugin_pic/actions/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
"""测试插件动作模块"""
|
||||||
|
|
||||||
|
# 导入所有动作模块以确保装饰器被执行
|
||||||
|
from . import pic_action # noqa
|
||||||
50
src/plugins/test_plugin_pic/actions/generate_pic_config.py
Normal file
50
src/plugins/test_plugin_pic/actions/generate_pic_config.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
CONFIG_CONTENT = """\
|
||||||
|
# 请替换为您的火山引擎 Access Key ID
|
||||||
|
volcano_ak = "YOUR_VOLCANO_ENGINE_ACCESS_KEY_ID_HERE"
|
||||||
|
# 请替换为您的火山引擎 Secret Access Key
|
||||||
|
volcano_sk = "YOUR_VOLCANO_ENGINE_SECRET_ACCESS_KEY_HERE"
|
||||||
|
# 火山方舟 API 的基础 URL
|
||||||
|
base_url = "https://ark.cn-beijing.volces.com/api/v3"
|
||||||
|
# 默认图片生成模型
|
||||||
|
default_model = "doubao-seedream-3-0-t2i-250415"
|
||||||
|
# 默认图片尺寸
|
||||||
|
default_size = "1024x1024"
|
||||||
|
# 用于图片生成的API密钥
|
||||||
|
# PicAction 当前配置为在HTTP请求体和Authorization头中使用此密钥。
|
||||||
|
# 如果您的API认证方式不同,请相应调整或移除。
|
||||||
|
volcano_generate_api_key = "YOUR_VOLCANO_GENERATE_API_KEY_HERE"
|
||||||
|
|
||||||
|
# 是否默认开启水印
|
||||||
|
default_watermark = true
|
||||||
|
# 默认引导强度
|
||||||
|
default_guidance_scale = 2.5
|
||||||
|
# 默认随机种子
|
||||||
|
default_seed = 42
|
||||||
|
|
||||||
|
# 更多插件特定配置可以在此添加...
|
||||||
|
# custom_parameter = "some_value"
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def generate_config():
|
||||||
|
# 获取当前脚本所在的目录
|
||||||
|
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
config_file_path = os.path.join(current_dir, "pic_action_config.toml")
|
||||||
|
|
||||||
|
if not os.path.exists(config_file_path):
|
||||||
|
try:
|
||||||
|
with open(config_file_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write(CONFIG_CONTENT)
|
||||||
|
print(f"配置文件已生成: {config_file_path}")
|
||||||
|
print("请记得编辑该文件,填入您的火山引擎 AK/SK 和 API 密钥。")
|
||||||
|
except IOError as e:
|
||||||
|
print(f"错误:无法写入配置文件 {config_file_path}。原因: {e}")
|
||||||
|
else:
|
||||||
|
print(f"配置文件已存在: {config_file_path}")
|
||||||
|
print("未进行任何更改。如果您想重新生成,请先删除或重命名现有文件。")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
generate_config()
|
||||||
264
src/plugins/test_plugin_pic/actions/pic_action.py
Normal file
264
src/plugins/test_plugin_pic/actions/pic_action.py
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import urllib.request
|
||||||
|
import urllib.error
|
||||||
|
import base64 # 新增:用于Base64编码
|
||||||
|
import traceback # 新增:用于打印堆栈跟踪
|
||||||
|
from typing import Tuple
|
||||||
|
from src.chat.focus_chat.planners.actions.plugin_action import PluginAction, register_action
|
||||||
|
from src.common.logger_manager import get_logger
|
||||||
|
from .generate_pic_config import generate_config
|
||||||
|
|
||||||
|
logger = get_logger("pic_action")
|
||||||
|
|
||||||
|
# 当此模块被加载时,尝试生成配置文件(如果它不存在)
|
||||||
|
# 注意:在某些插件加载机制下,这可能会在每次机器人启动或插件重载时执行
|
||||||
|
# 考虑是否需要更复杂的逻辑来决定何时运行 (例如,仅在首次安装时)
|
||||||
|
generate_config()
|
||||||
|
|
||||||
|
|
||||||
|
@register_action
|
||||||
|
class PicAction(PluginAction):
|
||||||
|
"""根据描述使用火山引擎HTTP API生成图片的动作处理类"""
|
||||||
|
|
||||||
|
action_name = "pic_action"
|
||||||
|
action_description = "可以根据特定的描述,使用火山引擎模型生成并发送一张图片 (通过HTTP API)"
|
||||||
|
action_parameters = {
|
||||||
|
"description": "图片描述,输入你想要生成并发送的图片的描述,必填",
|
||||||
|
"size": "图片尺寸,例如 '1024x1024' (可选, 默认从配置或 '1024x1024')",
|
||||||
|
}
|
||||||
|
action_require = [
|
||||||
|
"当有人要求你生成并发送一张图片时使用",
|
||||||
|
"当有人让你画一张图时使用",
|
||||||
|
]
|
||||||
|
default = False
|
||||||
|
action_config_file_name = "pic_action_config.toml"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
action_data: dict,
|
||||||
|
reasoning: str,
|
||||||
|
cycle_timers: dict,
|
||||||
|
thinking_id: str,
|
||||||
|
global_config: dict = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(action_data, reasoning, cycle_timers, thinking_id, global_config, **kwargs)
|
||||||
|
|
||||||
|
http_base_url = self.config.get("base_url")
|
||||||
|
http_api_key = self.config.get("volcano_generate_api_key")
|
||||||
|
|
||||||
|
if not (http_base_url and http_api_key):
|
||||||
|
logger.error(
|
||||||
|
f"{self.log_prefix} PicAction初始化, 但HTTP配置 (base_url 或 volcano_generate_api_key) 缺失. HTTP图片生成将失败."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(f"{self.log_prefix} HTTP方式初始化完成. Base URL: {http_base_url}, API Key已配置.")
|
||||||
|
|
||||||
|
# _restore_env_vars 方法不再需要,已移除
|
||||||
|
|
||||||
|
async def process(self) -> Tuple[bool, str]:
|
||||||
|
"""处理图片生成动作(通过HTTP API)"""
|
||||||
|
logger.info(f"{self.log_prefix} 执行 pic_action (HTTP): {self.reasoning}")
|
||||||
|
|
||||||
|
http_base_url = self.config.get("base_url")
|
||||||
|
http_api_key = self.config.get("volcano_generate_api_key")
|
||||||
|
|
||||||
|
if not (http_base_url and http_api_key):
|
||||||
|
error_msg = "抱歉,图片生成功能所需的HTTP配置(如API地址或密钥)不完整,无法提供服务。"
|
||||||
|
await self.send_message_by_expressor(error_msg)
|
||||||
|
logger.error(f"{self.log_prefix} HTTP调用配置缺失: base_url 或 volcano_generate_api_key.")
|
||||||
|
return False, "HTTP配置不完整"
|
||||||
|
|
||||||
|
description = self.action_data.get("description")
|
||||||
|
if not description:
|
||||||
|
logger.warning(f"{self.log_prefix} 图片描述为空,无法生成图片。")
|
||||||
|
await self.send_message_by_expressor("你需要告诉我想要画什么样的图片哦~")
|
||||||
|
return False, "图片描述为空"
|
||||||
|
|
||||||
|
default_model = self.config.get("default_model", "doubao-seedream-3-0-t2i-250415")
|
||||||
|
image_size = self.action_data.get("size", self.config.get("default_size", "1024x1024"))
|
||||||
|
|
||||||
|
# guidance_scale 现在完全由配置文件控制
|
||||||
|
guidance_scale_input = self.config.get("default_guidance_scale", 2.5) # 默认2.5
|
||||||
|
guidance_scale_val = 2.5 # Fallback default
|
||||||
|
try:
|
||||||
|
guidance_scale_val = float(guidance_scale_input)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
logger.warning(
|
||||||
|
f"{self.log_prefix} 配置文件中的 default_guidance_scale 值 '{guidance_scale_input}' 无效 (应为浮点数),使用默认值 2.5。"
|
||||||
|
)
|
||||||
|
guidance_scale_val = 2.5
|
||||||
|
|
||||||
|
# Seed parameter - ensure it's always an integer
|
||||||
|
seed_config_value = self.config.get("default_seed")
|
||||||
|
seed_val = 42 # Default seed if not configured or invalid
|
||||||
|
if seed_config_value is not None:
|
||||||
|
try:
|
||||||
|
seed_val = int(seed_config_value)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
logger.warning(
|
||||||
|
f"{self.log_prefix} 配置文件中的 default_seed ('{seed_config_value}') 无效,将使用默认种子 42。"
|
||||||
|
)
|
||||||
|
# seed_val is already 42
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"{self.log_prefix} 未在配置中找到 default_seed,将使用默认种子 42。建议在配置文件中添加 default_seed。"
|
||||||
|
)
|
||||||
|
# seed_val is already 42
|
||||||
|
|
||||||
|
# Watermark 现在完全由配置文件控制
|
||||||
|
effective_watermark_source = self.config.get("default_watermark", True) # 默认True
|
||||||
|
if isinstance(effective_watermark_source, bool):
|
||||||
|
watermark_val = effective_watermark_source
|
||||||
|
elif isinstance(effective_watermark_source, str):
|
||||||
|
watermark_val = effective_watermark_source.lower() == "true"
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"{self.log_prefix} 配置文件中的 default_watermark 值 '{effective_watermark_source}' 无效 (应为布尔值或 'true'/'false'),使用默认值 True。"
|
||||||
|
)
|
||||||
|
watermark_val = True
|
||||||
|
|
||||||
|
await self.send_message_by_expressor(
|
||||||
|
f"收到!正在为您生成关于 '{description}' 的图片,请稍候...(模型: {default_model}, 尺寸: {image_size})"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
success, result = await asyncio.to_thread(
|
||||||
|
self._make_http_image_request,
|
||||||
|
prompt=description,
|
||||||
|
model=default_model,
|
||||||
|
size=image_size,
|
||||||
|
seed=seed_val,
|
||||||
|
guidance_scale=guidance_scale_val,
|
||||||
|
watermark=watermark_val,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"{self.log_prefix} (HTTP) 异步请求执行失败: {e!r}", exc_info=True)
|
||||||
|
traceback.print_exc()
|
||||||
|
success = False
|
||||||
|
result = f"图片生成服务遇到意外问题: {str(e)[:100]}"
|
||||||
|
|
||||||
|
if success:
|
||||||
|
image_url = result
|
||||||
|
logger.info(f"{self.log_prefix} 图片URL获取成功: {image_url[:70]}... 下载并编码.")
|
||||||
|
|
||||||
|
try:
|
||||||
|
encode_success, encode_result = await asyncio.to_thread(self._download_and_encode_base64, image_url)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"{self.log_prefix} (B64) 异步下载/编码失败: {e!r}", exc_info=True)
|
||||||
|
traceback.print_exc()
|
||||||
|
encode_success = False
|
||||||
|
encode_result = f"图片下载或编码时发生内部错误: {str(e)[:100]}"
|
||||||
|
|
||||||
|
if encode_success:
|
||||||
|
base64_image_string = encode_result
|
||||||
|
send_success = await self.send_message(type="emoji", data=base64_image_string)
|
||||||
|
if send_success:
|
||||||
|
await self.send_message_by_expressor("图片表情已发送!")
|
||||||
|
return True, "图片表情已发送"
|
||||||
|
else:
|
||||||
|
await self.send_message_by_expressor("图片已处理为Base64,但作为表情发送失败了。")
|
||||||
|
return False, "图片表情发送失败 (Base64)"
|
||||||
|
else:
|
||||||
|
await self.send_message_by_expressor(f"获取到图片URL,但在处理图片时失败了:{encode_result}")
|
||||||
|
return False, f"图片处理失败(Base64): {encode_result}"
|
||||||
|
else:
|
||||||
|
error_message = result
|
||||||
|
await self.send_message_by_expressor(f"哎呀,生成图片时遇到问题:{error_message}")
|
||||||
|
return False, f"图片生成失败: {error_message}"
|
||||||
|
|
||||||
|
def _download_and_encode_base64(self, image_url: str) -> Tuple[bool, str]:
|
||||||
|
"""下载图片并将其编码为Base64字符串"""
|
||||||
|
logger.info(f"{self.log_prefix} (B64) 下载并编码图片: {image_url[:70]}...")
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(image_url, timeout=30) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
image_bytes = response.read()
|
||||||
|
base64_encoded_image = base64.b64encode(image_bytes).decode("utf-8")
|
||||||
|
logger.info(f"{self.log_prefix} (B64) 图片下载编码完成. Base64长度: {len(base64_encoded_image)}")
|
||||||
|
return True, base64_encoded_image
|
||||||
|
else:
|
||||||
|
error_msg = f"下载图片失败 (状态: {response.status})"
|
||||||
|
logger.error(f"{self.log_prefix} (B64) {error_msg} URL: {image_url}")
|
||||||
|
return False, error_msg
|
||||||
|
except Exception as e: # Catches all exceptions from urlopen, b64encode, etc.
|
||||||
|
logger.error(f"{self.log_prefix} (B64) 下载或编码时错误: {e!r}", exc_info=True)
|
||||||
|
traceback.print_exc()
|
||||||
|
return False, f"下载或编码图片时发生错误: {str(e)[:100]}"
|
||||||
|
|
||||||
|
def _make_http_image_request(
|
||||||
|
self, prompt: str, model: str, size: str, seed: int | None, guidance_scale: float, watermark: bool
|
||||||
|
) -> Tuple[bool, str]:
|
||||||
|
base_url = self.config.get("base_url")
|
||||||
|
generate_api_key = self.config.get("volcano_generate_api_key")
|
||||||
|
|
||||||
|
endpoint = f"{base_url.rstrip('/')}/images/generations"
|
||||||
|
|
||||||
|
payload_dict = {
|
||||||
|
"model": model,
|
||||||
|
"prompt": prompt,
|
||||||
|
"response_format": "url",
|
||||||
|
"size": size,
|
||||||
|
"guidance_scale": guidance_scale,
|
||||||
|
"watermark": watermark,
|
||||||
|
"seed": seed, # seed is now always an int from process()
|
||||||
|
"api-key": generate_api_key,
|
||||||
|
}
|
||||||
|
# if seed is not None: # No longer needed, seed is always an int
|
||||||
|
# payload_dict["seed"] = seed
|
||||||
|
|
||||||
|
data = json.dumps(payload_dict).encode("utf-8")
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json",
|
||||||
|
"Authorization": f"Bearer {generate_api_key}",
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"{self.log_prefix} (HTTP) 发起图片请求: {model}, Prompt: {prompt[:30]}... To: {endpoint}")
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix} (HTTP) Request Headers: {{...Authorization: Bearer {generate_api_key[:10]}...}}"
|
||||||
|
)
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix} (HTTP) Request Body (api-key omitted): {json.dumps({k: v for k, v in payload_dict.items() if k != 'api-key'})}"
|
||||||
|
)
|
||||||
|
|
||||||
|
req = urllib.request.Request(endpoint, data=data, headers=headers, method="POST")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=60) as response:
|
||||||
|
response_status = response.status
|
||||||
|
response_body_bytes = response.read()
|
||||||
|
response_body_str = response_body_bytes.decode("utf-8")
|
||||||
|
|
||||||
|
logger.info(f"{self.log_prefix} (HTTP) 响应: {response_status}. Preview: {response_body_str[:150]}...")
|
||||||
|
|
||||||
|
if 200 <= response_status < 300:
|
||||||
|
response_data = json.loads(response_body_str)
|
||||||
|
image_url = None
|
||||||
|
if (
|
||||||
|
isinstance(response_data.get("data"), list)
|
||||||
|
and response_data["data"]
|
||||||
|
and isinstance(response_data["data"][0], dict)
|
||||||
|
):
|
||||||
|
image_url = response_data["data"][0].get("url")
|
||||||
|
elif response_data.get("url"):
|
||||||
|
image_url = response_data.get("url")
|
||||||
|
|
||||||
|
if image_url:
|
||||||
|
logger.info(f"{self.log_prefix} (HTTP) 图片生成成功,URL: {image_url[:70]}...")
|
||||||
|
return True, image_url
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"{self.log_prefix} (HTTP) API成功但无图片URL. 响应预览: {response_body_str[:300]}..."
|
||||||
|
)
|
||||||
|
return False, "图片生成API响应成功但未找到图片URL"
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"{self.log_prefix} (HTTP) API请求失败. 状态: {response.status}. 正文: {response_body_str[:300]}..."
|
||||||
|
)
|
||||||
|
return False, f"图片API请求失败(状态码 {response.status})"
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"{self.log_prefix} (HTTP) 图片生成时意外错误: {e!r}", exc_info=True)
|
||||||
|
traceback.print_exc()
|
||||||
|
return False, f"图片生成HTTP请求时发生意外错误: {str(e)[:100]}"
|
||||||
24
src/plugins/test_plugin_pic/actions/pic_action_config.toml
Normal file
24
src/plugins/test_plugin_pic/actions/pic_action_config.toml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
# 请替换为您的火山引擎 Access Key ID
|
||||||
|
volcano_ak = "YOUR_VOLCANO_ENGINE_ACCESS_KEY_ID_HERE"
|
||||||
|
# 请替换为您的火山引擎 Secret Access Key
|
||||||
|
volcano_sk = "YOUR_VOLCANO_ENGINE_SECRET_ACCESS_KEY_HERE"
|
||||||
|
# 火山方舟 API 的基础 URL
|
||||||
|
base_url = "https://ark.cn-beijing.volces.com/api/v3"
|
||||||
|
# 默认图片生成模型
|
||||||
|
default_model = "doubao-seedream-3-0-t2i-250415"
|
||||||
|
# 默认图片尺寸
|
||||||
|
default_size = "1024x1024"
|
||||||
|
# 用于图片生成的API密钥
|
||||||
|
# PicAction 当前配置为在HTTP请求体和Authorization头中使用此密钥。
|
||||||
|
# 如果您的API认证方式不同,请相应调整或移除。
|
||||||
|
volcano_generate_api_key = "YOUR_VOLCANO_GENERATE_API_KEY_HERE"
|
||||||
|
|
||||||
|
# 是否默认开启水印
|
||||||
|
default_watermark = true
|
||||||
|
# 默认引导强度
|
||||||
|
default_guidance_scale = 2.5
|
||||||
|
# 默认随机种子
|
||||||
|
default_seed = 42
|
||||||
|
|
||||||
|
# 更多插件特定配置可以在此添加...
|
||||||
|
# custom_parameter = "some_value"
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
from src.tools.tool_can_use.base_tool import BaseTool, register_tool
|
from src.tools.tool_can_use.base_tool import BaseTool, register_tool
|
||||||
from src.chat.person_info.person_info import person_info_manager
|
from src.person_info.person_info import person_info_manager
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from src.chat.models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
import json
|
import json
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[inner]
|
[inner]
|
||||||
version = "2.3.0"
|
version = "2.4.0"
|
||||||
|
|
||||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||||
#如果你想要修改配置文件,请在修改后将version的值进行变更
|
#如果你想要修改配置文件,请在修改后将version的值进行变更
|
||||||
|
|||||||
Reference in New Issue
Block a user