@@ -1,6 +1,6 @@
|
||||
from fastapi import HTTPException
|
||||
from rich.traceback import install
|
||||
from src.config.config import BotConfig
|
||||
from src.config.config import Config
|
||||
from src.common.logger_manager import get_logger
|
||||
import os
|
||||
|
||||
@@ -14,8 +14,8 @@ async def reload_config():
|
||||
from src.config import config as config_module
|
||||
|
||||
logger.debug("正在重载配置文件...")
|
||||
bot_config_path = os.path.join(BotConfig.get_config_dir(), "bot_config.toml")
|
||||
config_module.global_config = BotConfig.load_config(config_path=bot_config_path)
|
||||
bot_config_path = os.path.join(Config.get_config_dir(), "bot_config.toml")
|
||||
config_module.global_config = Config.load_config(config_path=bot_config_path)
|
||||
logger.debug("配置文件重载成功")
|
||||
return {"status": "reloaded"}
|
||||
except FileNotFoundError as e:
|
||||
|
||||
@@ -369,14 +369,15 @@ class EmojiManager:
|
||||
def __init__(self):
|
||||
self._initialized = None
|
||||
self._scan_task = None
|
||||
self.vlm = LLMRequest(model=global_config.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
|
||||
|
||||
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
|
||||
self.llm_emotion_judge = LLMRequest(
|
||||
model=global_config.llm_normal, max_tokens=600, request_type="emoji"
|
||||
model=global_config.model.normal, max_tokens=600, request_type="emoji"
|
||||
) # 更高的温度,更少的token(后续可以根据情绪来调整温度)
|
||||
|
||||
self.emoji_num = 0
|
||||
self.emoji_num_max = global_config.max_emoji_num
|
||||
self.emoji_num_max_reach_deletion = global_config.max_reach_deletion
|
||||
self.emoji_num_max = global_config.emoji.max_reg_num
|
||||
self.emoji_num_max_reach_deletion = global_config.emoji.do_replace
|
||||
self.emoji_objects: list[MaiEmoji] = [] # 存储MaiEmoji对象的列表,使用类型注解明确列表元素类型
|
||||
|
||||
logger.info("启动表情包管理器")
|
||||
@@ -613,18 +614,18 @@ class EmojiManager:
|
||||
logger.warning(f"[警告] 表情包目录不存在: {EMOJI_DIR}")
|
||||
os.makedirs(EMOJI_DIR, exist_ok=True)
|
||||
logger.info(f"[创建] 已创建表情包目录: {EMOJI_DIR}")
|
||||
await asyncio.sleep(global_config.EMOJI_CHECK_INTERVAL * 60)
|
||||
await asyncio.sleep(global_config.emoji.check_interval * 60)
|
||||
continue
|
||||
|
||||
# 检查目录是否为空
|
||||
files = os.listdir(EMOJI_DIR)
|
||||
if not files:
|
||||
logger.warning(f"[警告] 表情包目录为空: {EMOJI_DIR}")
|
||||
await asyncio.sleep(global_config.EMOJI_CHECK_INTERVAL * 60)
|
||||
await asyncio.sleep(global_config.emoji.check_interval * 60)
|
||||
continue
|
||||
|
||||
# 检查是否需要处理表情包(数量超过最大值或不足)
|
||||
if (self.emoji_num > self.emoji_num_max and global_config.max_reach_deletion) or (
|
||||
if (self.emoji_num > self.emoji_num_max and global_config.emoji.do_replace) or (
|
||||
self.emoji_num < self.emoji_num_max
|
||||
):
|
||||
try:
|
||||
@@ -651,7 +652,7 @@ class EmojiManager:
|
||||
except Exception as e:
|
||||
logger.error(f"[错误] 扫描表情包目录失败: {str(e)}")
|
||||
|
||||
await asyncio.sleep(global_config.EMOJI_CHECK_INTERVAL * 60)
|
||||
await asyncio.sleep(global_config.emoji.check_interval * 60)
|
||||
|
||||
async def get_all_emoji_from_db(self):
|
||||
"""获取所有表情包并初始化为MaiEmoji类对象,更新 self.emoji_objects"""
|
||||
@@ -788,7 +789,7 @@ class EmojiManager:
|
||||
|
||||
# 构建提示词
|
||||
prompt = (
|
||||
f"{global_config.BOT_NICKNAME}的表情包存储已满({self.emoji_num}/{self.emoji_num_max}),"
|
||||
f"{global_config.bot.nickname}的表情包存储已满({self.emoji_num}/{self.emoji_num_max}),"
|
||||
f"需要决定是否删除一个旧表情包来为新表情包腾出空间。\n\n"
|
||||
f"新表情包信息:\n"
|
||||
f"描述: {new_emoji.description}\n\n"
|
||||
@@ -871,10 +872,10 @@ class EmojiManager:
|
||||
description, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format)
|
||||
|
||||
# 审核表情包
|
||||
if global_config.EMOJI_CHECK:
|
||||
if global_config.emoji.content_filtration:
|
||||
prompt = f'''
|
||||
这是一个表情包,请对这个表情包进行审核,标准如下:
|
||||
1. 必须符合"{global_config.EMOJI_CHECK_PROMPT}"的要求
|
||||
1. 必须符合"{global_config.emoji.filtration_prompt}"的要求
|
||||
2. 不能是色情、暴力、等违法违规内容,必须符合公序良俗
|
||||
3. 不能是任何形式的截图,聊天记录或视频截图
|
||||
4. 不要出现5个以上文字
|
||||
|
||||
@@ -25,9 +25,10 @@ logger = get_logger("expressor")
|
||||
class DefaultExpressor:
|
||||
def __init__(self, chat_id: str):
|
||||
self.log_prefix = "expressor"
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_model = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
model=global_config.model.normal,
|
||||
temperature=global_config.model.normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_heartflow",
|
||||
)
|
||||
@@ -51,8 +52,8 @@ class DefaultExpressor:
|
||||
messageinfo = anchor_message.message_info
|
||||
thinking_time_point = parse_thinking_id_to_timestamp(thinking_id)
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=messageinfo.platform,
|
||||
)
|
||||
# logger.debug(f"创建思考消息:{anchor_message}")
|
||||
@@ -141,7 +142,7 @@ class DefaultExpressor:
|
||||
try:
|
||||
# 1. 获取情绪影响因子并调整模型温度
|
||||
arousal_multiplier = mood_manager.get_arousal_multiplier()
|
||||
current_temp = float(global_config.llm_normal["temp"]) * arousal_multiplier
|
||||
current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
|
||||
self.express_model.params["temperature"] = current_temp # 动态调整温度
|
||||
|
||||
# 2. 获取信息捕捉器
|
||||
@@ -183,6 +184,7 @@ class DefaultExpressor:
|
||||
|
||||
try:
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
# TODO: API-Adapter修改标记
|
||||
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
|
||||
content, reasoning_content, model_name = await self.express_model.generate_response(prompt)
|
||||
|
||||
@@ -330,8 +332,8 @@ class DefaultExpressor:
|
||||
|
||||
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(self.chat_id, thinking_id)
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=self.chat_stream.platform,
|
||||
)
|
||||
|
||||
|
||||
@@ -77,8 +77,9 @@ def init_prompt() -> None:
|
||||
|
||||
class ExpressionLearner:
|
||||
def __init__(self) -> None:
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_learn_model: LLMRequest = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
model=global_config.model.normal,
|
||||
temperature=0.1,
|
||||
max_tokens=256,
|
||||
request_type="response_heartflow",
|
||||
@@ -289,7 +290,7 @@ class ExpressionLearner:
|
||||
# 构建prompt
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"personality_expression_prompt",
|
||||
personality=global_config.expression_style,
|
||||
personality=global_config.personality.expression_style,
|
||||
)
|
||||
# logger.info(f"个性表达方式提取prompt: {prompt}")
|
||||
|
||||
|
||||
@@ -91,7 +91,6 @@ class HeartFChatting:
|
||||
self.action_manager = ActionManager()
|
||||
self.action_planner = ActionPlanner(log_prefix=self.log_prefix, action_manager=self.action_manager)
|
||||
|
||||
|
||||
# --- 处理器列表 ---
|
||||
self.processors: List[BaseProcessor] = []
|
||||
self._register_default_processors()
|
||||
@@ -526,5 +525,3 @@ class HeartFChatting:
|
||||
if last_n is not None:
|
||||
history = history[-last_n:]
|
||||
return [cycle.to_dict() for cycle in history]
|
||||
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ def _check_ban_words(text: str, chat, userinfo) -> bool:
|
||||
Returns:
|
||||
bool: 是否包含过滤词
|
||||
"""
|
||||
for word in global_config.ban_words:
|
||||
for word in global_config.chat.ban_words:
|
||||
if word in text:
|
||||
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
|
||||
@@ -132,7 +132,7 @@ def _check_ban_regex(text: str, chat, userinfo) -> bool:
|
||||
Returns:
|
||||
bool: 是否匹配过滤正则
|
||||
"""
|
||||
for pattern in global_config.ban_msgs_regex:
|
||||
for pattern in global_config.chat.ban_msgs_regex:
|
||||
if pattern.search(text):
|
||||
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
|
||||
|
||||
@@ -6,14 +6,13 @@ from src.chat.utils.chat_message_builder import build_readable_messages, get_raw
|
||||
from src.chat.person_info.relationship_manager import relationship_manager
|
||||
from src.chat.utils.utils import get_embedding
|
||||
import time
|
||||
from typing import Union, Optional, Dict, Any
|
||||
from typing import Union, Optional
|
||||
from src.common.database import db
|
||||
from src.chat.utils.utils import get_recent_group_speaker
|
||||
from src.manager.mood_manager import mood_manager
|
||||
from src.chat.memory_system.Hippocampus import HippocampusManager
|
||||
from src.chat.knowledge.knowledge_lib import qa_manager
|
||||
from src.chat.focus_chat.expressors.exprssion_learner import expression_learner
|
||||
import traceback
|
||||
import random
|
||||
|
||||
|
||||
@@ -142,7 +141,7 @@ async def _build_prompt_focus(
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.observation_context_size,
|
||||
limit=global_config.chat.observation_context_size,
|
||||
)
|
||||
chat_talking_prompt = await build_readable_messages(
|
||||
message_list_before_now,
|
||||
@@ -209,7 +208,7 @@ async def _build_prompt_focus(
|
||||
chat_target=chat_target_1, # Used in group template
|
||||
# chat_talking_prompt=chat_talking_prompt,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.BOT_NICKNAME,
|
||||
bot_name=global_config.bot.nickname,
|
||||
# prompt_personality=prompt_personality,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
@@ -225,7 +224,7 @@ async def _build_prompt_focus(
|
||||
info_from_tools=structured_info_prompt,
|
||||
sender_name=effective_sender_name, # Used in private template
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
bot_name=global_config.BOT_NICKNAME,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality=prompt_personality,
|
||||
# chat_target and chat_target_2 are not used in private template
|
||||
current_mind_info=current_mind_info,
|
||||
@@ -280,7 +279,7 @@ class PromptBuilder:
|
||||
who_chat_in_group = get_recent_group_speaker(
|
||||
chat_stream.stream_id,
|
||||
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
|
||||
limit=global_config.observation_context_size,
|
||||
limit=global_config.chat.observation_context_size,
|
||||
)
|
||||
elif chat_stream.user_info:
|
||||
who_chat_in_group.append(
|
||||
@@ -328,7 +327,7 @@ class PromptBuilder:
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.observation_context_size,
|
||||
limit=global_config.chat.observation_context_size,
|
||||
)
|
||||
chat_talking_prompt = await build_readable_messages(
|
||||
message_list_before_now,
|
||||
@@ -340,18 +339,15 @@ class PromptBuilder:
|
||||
|
||||
# 关键词检测与反应
|
||||
keywords_reaction_prompt = ""
|
||||
for rule in global_config.keywords_reaction_rules:
|
||||
if rule.get("enable", False):
|
||||
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
|
||||
logger.info(
|
||||
f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}"
|
||||
)
|
||||
keywords_reaction_prompt += rule.get("reaction", "") + ","
|
||||
for rule in global_config.keyword_reaction.rules:
|
||||
if rule.enable:
|
||||
if any(keyword in message_txt for keyword in rule.keywords):
|
||||
logger.info(f"检测到以下关键词之一:{rule.keywords},触发反应:{rule.reaction}")
|
||||
keywords_reaction_prompt += f"{rule.reaction},"
|
||||
else:
|
||||
for pattern in rule.get("regex", []):
|
||||
result = pattern.search(message_txt)
|
||||
if result:
|
||||
reaction = rule.get("reaction", "")
|
||||
for pattern in rule.regex:
|
||||
if result := pattern.search(message_txt):
|
||||
reaction = rule.reaction
|
||||
for name, content in result.groupdict().items():
|
||||
reaction = reaction.replace(f"[{name}]", content)
|
||||
logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}")
|
||||
@@ -397,8 +393,8 @@ class PromptBuilder:
|
||||
chat_target_2=chat_target_2,
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
message_txt=message_txt,
|
||||
bot_name=global_config.BOT_NICKNAME,
|
||||
bot_other_names="/".join(global_config.BOT_ALIAS_NAMES),
|
||||
bot_name=global_config.bot.nickname,
|
||||
bot_other_names="/".join(global_config.bot.alias_names),
|
||||
prompt_personality=prompt_personality,
|
||||
mood_prompt=mood_prompt,
|
||||
reply_style1=reply_style1_chosen,
|
||||
@@ -419,8 +415,8 @@ class PromptBuilder:
|
||||
prompt_info=prompt_info,
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
message_txt=message_txt,
|
||||
bot_name=global_config.BOT_NICKNAME,
|
||||
bot_other_names="/".join(global_config.BOT_ALIAS_NAMES),
|
||||
bot_name=global_config.bot.nickname,
|
||||
bot_other_names="/".join(global_config.bot.alias_names),
|
||||
prompt_personality=prompt_personality,
|
||||
mood_prompt=mood_prompt,
|
||||
reply_style1=reply_style1_chosen,
|
||||
|
||||
@@ -26,8 +26,9 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
def __init__(self):
|
||||
"""初始化观察处理器"""
|
||||
super().__init__()
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm_summary = LLMRequest(
|
||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
|
||||
async def process_info(
|
||||
@@ -108,12 +109,12 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
"created_at": datetime.now().timestamp(),
|
||||
}
|
||||
|
||||
obs.mid_memorys.append(mid_memory)
|
||||
if len(obs.mid_memorys) > obs.max_mid_memory_len:
|
||||
obs.mid_memorys.pop(0) # 移除最旧的
|
||||
obs.mid_memories.append(mid_memory)
|
||||
if len(obs.mid_memories) > obs.max_mid_memory_len:
|
||||
obs.mid_memories.pop(0) # 移除最旧的
|
||||
|
||||
mid_memory_str = "之前聊天的内容概述是:\n"
|
||||
for mid_memory_item in obs.mid_memorys: # 重命名循环变量以示区分
|
||||
for mid_memory_item in obs.mid_memories: # 重命名循环变量以示区分
|
||||
time_diff = int((datetime.now().timestamp() - mid_memory_item["created_at"]) / 60)
|
||||
mid_memory_str += (
|
||||
f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory_item['id']}):{mid_memory_item['theme']}\n"
|
||||
|
||||
@@ -81,8 +81,8 @@ class MindProcessor(BaseProcessor):
|
||||
self.subheartflow_id = subheartflow_id
|
||||
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_sub_heartflow,
|
||||
temperature=global_config.llm_sub_heartflow["temp"],
|
||||
model=global_config.model.sub_heartflow,
|
||||
temperature=global_config.model.sub_heartflow["temp"],
|
||||
max_tokens=800,
|
||||
request_type="sub_heart_flow",
|
||||
)
|
||||
|
||||
@@ -52,7 +52,7 @@ class ToolProcessor(BaseProcessor):
|
||||
self.subheartflow_id = subheartflow_id
|
||||
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_tool_use,
|
||||
model=global_config.model.tool_use,
|
||||
max_tokens=500,
|
||||
request_type="tool_execution",
|
||||
)
|
||||
|
||||
@@ -34,8 +34,9 @@ def init_prompt():
|
||||
|
||||
class MemoryActivator:
|
||||
def __init__(self):
|
||||
# TODO: API-Adapter修改标记
|
||||
self.summary_model = LLMRequest(
|
||||
model=global_config.llm_summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
|
||||
model=global_config.model.summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
|
||||
)
|
||||
self.running_memory = []
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from typing import Dict, List, Optional, Callable, Coroutine, Type, Any, Union
|
||||
import os
|
||||
import importlib
|
||||
from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY, _DEFAULT_ACTIONS
|
||||
from typing import Dict, List, Optional, Callable, Coroutine, Type, Any
|
||||
from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
@@ -9,8 +7,6 @@ from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
|
||||
from src.common.logger_manager import get_logger
|
||||
|
||||
# 导入动作类,确保装饰器被执行
|
||||
from src.chat.focus_chat.planners.actions.reply_action import ReplyAction
|
||||
from src.chat.focus_chat.planners.actions.no_reply_action import NoReplyAction
|
||||
|
||||
logger = get_logger("action_factory")
|
||||
|
||||
@@ -45,7 +41,6 @@ class ActionManager:
|
||||
# for action_name, action_info in self._using_actions.items():
|
||||
# logger.info(f"动作名称: {action_name}, 动作信息: {action_info}")
|
||||
|
||||
|
||||
def _load_registered_actions(self) -> None:
|
||||
"""
|
||||
加载所有通过装饰器注册的动作
|
||||
@@ -54,17 +49,17 @@ class ActionManager:
|
||||
# 从_ACTION_REGISTRY获取所有已注册动作
|
||||
for action_name, action_class in _ACTION_REGISTRY.items():
|
||||
# 获取动作相关信息
|
||||
action_description:str = getattr(action_class, "action_description", "")
|
||||
action_parameters:dict[str:str] = getattr(action_class, "action_parameters", {})
|
||||
action_require:list[str] = getattr(action_class, "action_require", [])
|
||||
is_default:bool = getattr(action_class, "default", False)
|
||||
action_description: str = getattr(action_class, "action_description", "")
|
||||
action_parameters: dict[str:str] = getattr(action_class, "action_parameters", {})
|
||||
action_require: list[str] = getattr(action_class, "action_require", [])
|
||||
is_default: bool = getattr(action_class, "default", False)
|
||||
|
||||
if action_name and action_description:
|
||||
# 创建动作信息字典
|
||||
action_info = {
|
||||
"description": action_description,
|
||||
"parameters": action_parameters,
|
||||
"require": action_require
|
||||
"require": action_require,
|
||||
}
|
||||
|
||||
# 注册2
|
||||
@@ -233,11 +228,7 @@ class ActionManager:
|
||||
if require is None:
|
||||
require = []
|
||||
|
||||
action_info = {
|
||||
"description": description,
|
||||
"parameters": parameters,
|
||||
"require": require
|
||||
}
|
||||
action_info = {"description": description, "parameters": parameters, "require": require}
|
||||
|
||||
self._registered_actions[action_name] = action_info
|
||||
return True
|
||||
|
||||
@@ -25,8 +25,8 @@ def register_action(cls):
|
||||
logger.error(f"动作类 {cls.__name__} 缺少必要的属性: action_name 或 action_description")
|
||||
return cls
|
||||
|
||||
action_name = getattr(cls, "action_name")
|
||||
action_description = getattr(cls, "action_description")
|
||||
action_name = cls.action_name
|
||||
action_description = cls.action_description
|
||||
is_default = getattr(cls, "default", False)
|
||||
|
||||
if not action_name or not action_description:
|
||||
@@ -60,14 +60,13 @@ class BaseAction(ABC):
|
||||
cycle_timers: 计时器字典
|
||||
thinking_id: 思考ID
|
||||
"""
|
||||
#每个动作必须实现
|
||||
self.action_name:str = "base_action"
|
||||
self.action_description:str = "基础动作"
|
||||
self.action_parameters:dict = {}
|
||||
self.action_require:list[str] = []
|
||||
|
||||
self.default:bool = False
|
||||
# 每个动作必须实现
|
||||
self.action_name: str = "base_action"
|
||||
self.action_description: str = "基础动作"
|
||||
self.action_parameters: dict = {}
|
||||
self.action_require: list[str] = []
|
||||
|
||||
self.default: bool = False
|
||||
|
||||
self.action_data = action_data
|
||||
self.reasoning = reasoning
|
||||
|
||||
@@ -29,7 +29,7 @@ class NoReplyAction(BaseAction):
|
||||
action_require = [
|
||||
"话题无关/无聊/不感兴趣/不懂",
|
||||
"最后一条消息是你自己发的且无人回应你",
|
||||
"你发送了太多消息,且无人回复"
|
||||
"你发送了太多消息,且无人回复",
|
||||
]
|
||||
default = True
|
||||
|
||||
@@ -46,7 +46,7 @@ class NoReplyAction(BaseAction):
|
||||
total_no_reply_count: int = 0,
|
||||
total_waiting_time: float = 0.0,
|
||||
shutting_down: bool = False,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""初始化不回复动作处理器
|
||||
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.chat.utils.timer_calculator import Timer
|
||||
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
|
||||
from typing import Tuple, List, Optional
|
||||
from typing import Tuple, List
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
@@ -22,14 +21,14 @@ class ReplyAction(BaseAction):
|
||||
处理构建和发送消息回复的动作。
|
||||
"""
|
||||
|
||||
action_name:str = "reply"
|
||||
action_description:str = "表达想法,可以只包含文本、表情或两者都有"
|
||||
action_parameters:dict[str:str] = {
|
||||
action_name: str = "reply"
|
||||
action_description: str = "表达想法,可以只包含文本、表情或两者都有"
|
||||
action_parameters: dict[str:str] = {
|
||||
"text": "你想要表达的内容(可选)",
|
||||
"emojis": "描述当前使用表情包的场景(可选)",
|
||||
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)(可选)",
|
||||
}
|
||||
action_require:list[str] = [
|
||||
action_require: list[str] = [
|
||||
"有实质性内容需要表达",
|
||||
"有人提到你,但你还没有回应他",
|
||||
"在合适的时候添加表情(不要总是添加)",
|
||||
@@ -38,7 +37,7 @@ class ReplyAction(BaseAction):
|
||||
"一次只回复一个人,一次只回复一个话题,突出重点",
|
||||
"如果是自己发的消息想继续,需自然衔接",
|
||||
"避免重复或评价自己的发言,不要和自己聊天",
|
||||
"注意:回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。"
|
||||
"注意:回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。",
|
||||
]
|
||||
default = True
|
||||
|
||||
@@ -54,7 +53,7 @@ class ReplyAction(BaseAction):
|
||||
chat_stream: ChatStream,
|
||||
current_cycle: CycleDetail,
|
||||
log_prefix: str,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
"""初始化回复动作处理器
|
||||
|
||||
@@ -89,7 +88,7 @@ class ReplyAction(BaseAction):
|
||||
reasoning=self.reasoning,
|
||||
reply_data=self.action_data,
|
||||
cycle_timers=self.cycle_timers,
|
||||
thinking_id=self.thinking_id
|
||||
thinking_id=self.thinking_id,
|
||||
)
|
||||
|
||||
async def _handle_reply(
|
||||
|
||||
@@ -4,7 +4,6 @@ from typing import List, Dict, Any, Optional
|
||||
from rich.traceback import install
|
||||
from src.chat.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from src.chat.focus_chat.info.obs_info import ObsInfo
|
||||
from src.chat.focus_chat.info.cycle_info import CycleInfo
|
||||
@@ -15,10 +14,12 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.individuality.individuality import Individuality
|
||||
from src.chat.focus_chat.planners.action_factory import ActionManager
|
||||
from src.chat.focus_chat.planners.action_factory import ActionInfo
|
||||
|
||||
logger = get_logger("planner")
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""你的名字是{bot_name},{prompt_personality},{chat_context_description}。需要基于以下信息决定如何参与对话:
|
||||
@@ -44,7 +45,8 @@ def init_prompt():
|
||||
}}
|
||||
|
||||
请输出你的决策 JSON:""",
|
||||
"planner_prompt",)
|
||||
"planner_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
@@ -103,7 +105,7 @@ class ActionPlanner:
|
||||
cycle_info = info.get_observe_info()
|
||||
elif isinstance(info, StructuredInfo):
|
||||
logger.debug(f"{self.log_prefix} 结构化信息: {info}")
|
||||
structured_info = info.get_data()
|
||||
_structured_info = info.get_data()
|
||||
|
||||
current_available_actions = self.action_manager.get_using_actions()
|
||||
|
||||
@@ -197,7 +199,6 @@ class ActionPlanner:
|
||||
# 返回结果字典
|
||||
return plan_result
|
||||
|
||||
|
||||
async def build_planner_prompt(
|
||||
self,
|
||||
is_group_chat: bool, # Now passed as argument
|
||||
@@ -218,7 +219,6 @@ class ActionPlanner:
|
||||
)
|
||||
chat_context_description = f"你正在和 {chat_target_name} 私聊"
|
||||
|
||||
|
||||
chat_content_block = ""
|
||||
if observed_messages_str:
|
||||
chat_content_block = f"聊天记录:\n{observed_messages_str}"
|
||||
@@ -234,7 +234,6 @@ class ActionPlanner:
|
||||
individuality = Individuality.get_instance()
|
||||
personality_block = individuality.get_prompt(x_person=2, level=2)
|
||||
|
||||
|
||||
action_options_block = ""
|
||||
for using_actions_name, using_actions_info in current_available_actions.items():
|
||||
# print(using_actions_name)
|
||||
@@ -262,9 +261,6 @@ class ActionPlanner:
|
||||
|
||||
action_options_block += using_action_prompt
|
||||
|
||||
|
||||
|
||||
|
||||
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
|
||||
prompt = planner_prompt_template.format(
|
||||
bot_name=global_config.BOT_NICKNAME,
|
||||
|
||||
@@ -35,8 +35,9 @@ class Heartflow:
|
||||
self.subheartflow_manager: SubHeartflowManager = SubHeartflowManager(self.current_state)
|
||||
|
||||
# LLM模型配置
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
|
||||
model=global_config.model.heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
|
||||
)
|
||||
|
||||
# 外部依赖模块
|
||||
|
||||
@@ -20,9 +20,9 @@ MAX_REPLY_PROBABILITY = 1
|
||||
class InterestChatting:
|
||||
def __init__(
|
||||
self,
|
||||
decay_rate=global_config.default_decay_rate_per_second,
|
||||
decay_rate=global_config.focus_chat.default_decay_rate_per_second,
|
||||
max_interest=MAX_INTEREST,
|
||||
trigger_threshold=global_config.reply_trigger_threshold,
|
||||
trigger_threshold=global_config.focus_chat.reply_trigger_threshold,
|
||||
max_probability=MAX_REPLY_PROBABILITY,
|
||||
):
|
||||
# 基础属性初始化
|
||||
|
||||
@@ -18,19 +18,14 @@ enable_unlimited_hfc_chat = True # 调试用:无限专注聊天
|
||||
prevent_offline_state = True
|
||||
# 目前默认不启用OFFLINE状态
|
||||
|
||||
# 不同状态下普通聊天的最大消息数
|
||||
base_normal_chat_num = global_config.base_normal_chat_num
|
||||
base_focused_chat_num = global_config.base_focused_chat_num
|
||||
|
||||
|
||||
MAX_NORMAL_CHAT_NUM_PEEKING = int(base_normal_chat_num / 2)
|
||||
MAX_NORMAL_CHAT_NUM_NORMAL = base_normal_chat_num
|
||||
MAX_NORMAL_CHAT_NUM_FOCUSED = base_normal_chat_num + 1
|
||||
MAX_NORMAL_CHAT_NUM_PEEKING = int(global_config.chat.base_normal_chat_num / 2)
|
||||
MAX_NORMAL_CHAT_NUM_NORMAL = global_config.chat.base_normal_chat_num
|
||||
MAX_NORMAL_CHAT_NUM_FOCUSED = global_config.chat.base_normal_chat_num + 1
|
||||
|
||||
# 不同状态下专注聊天的最大消息数
|
||||
MAX_FOCUSED_CHAT_NUM_PEEKING = int(base_focused_chat_num / 2)
|
||||
MAX_FOCUSED_CHAT_NUM_NORMAL = base_focused_chat_num
|
||||
MAX_FOCUSED_CHAT_NUM_FOCUSED = base_focused_chat_num + 2
|
||||
MAX_FOCUSED_CHAT_NUM_PEEKING = int(global_config.chat.base_focused_chat_num / 2)
|
||||
MAX_FOCUSED_CHAT_NUM_NORMAL = global_config.chat.base_focused_chat_num
|
||||
MAX_FOCUSED_CHAT_NUM_FOCUSED = global_config.chat.base_focused_chat_num + 2
|
||||
|
||||
# -- 状态定义 --
|
||||
|
||||
|
||||
@@ -53,19 +53,20 @@ class ChattingObservation(Observation):
|
||||
self.talking_message = []
|
||||
self.talking_message_str = ""
|
||||
self.talking_message_str_truncate = ""
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.nick_name = global_config.BOT_ALIAS_NAMES
|
||||
self.max_now_obs_len = global_config.observation_context_size
|
||||
self.overlap_len = global_config.compressed_length
|
||||
self.mid_memorys = []
|
||||
self.max_mid_memory_len = global_config.compress_length_limit
|
||||
self.name = global_config.bot.nickname
|
||||
self.nick_name = global_config.bot.alias_names
|
||||
self.max_now_obs_len = global_config.chat.observation_context_size
|
||||
self.overlap_len = global_config.focus_chat.compressed_length
|
||||
self.mid_memories = []
|
||||
self.max_mid_memory_len = global_config.focus_chat.compress_length_limit
|
||||
self.mid_memory_info = ""
|
||||
self.person_list = []
|
||||
self.oldest_messages = []
|
||||
self.oldest_messages_str = ""
|
||||
self.compressor_prompt = ""
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm_summary = LLMRequest(
|
||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
|
||||
async def initialize(self):
|
||||
@@ -83,7 +84,7 @@ class ChattingObservation(Observation):
|
||||
for id in ids:
|
||||
print(f"id:{id}")
|
||||
try:
|
||||
for mid_memory in self.mid_memorys:
|
||||
for mid_memory in self.mid_memories:
|
||||
if mid_memory["id"] == id:
|
||||
mid_memory_by_id = mid_memory
|
||||
msg_str = ""
|
||||
@@ -101,7 +102,7 @@ class ChattingObservation(Observation):
|
||||
|
||||
else:
|
||||
mid_memory_str = "之前的聊天内容:\n"
|
||||
for mid_memory in self.mid_memorys:
|
||||
for mid_memory in self.mid_memories:
|
||||
mid_memory_str += f"{mid_memory['theme']}\n"
|
||||
return mid_memory_str + "现在群里正在聊:\n" + self.talking_message_str
|
||||
|
||||
|
||||
@@ -76,8 +76,9 @@ class SubHeartflowManager:
|
||||
|
||||
# 为 LLM 状态评估创建一个 LLMRequest 实例
|
||||
# 使用与 Heartflow 相同的模型和参数
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm_state_evaluator = LLMRequest(
|
||||
model=global_config.llm_heartflow, # 与 Heartflow 一致
|
||||
model=global_config.model.heartflow, # 与 Heartflow 一致
|
||||
temperature=0.6, # 与 Heartflow 一致
|
||||
max_tokens=1000, # 与 Heartflow 一致 (虽然可能不需要这么多)
|
||||
request_type="subheartflow_state_eval", # 保留特定的请求类型
|
||||
@@ -278,7 +279,7 @@ class SubHeartflowManager:
|
||||
focused_limit = current_state.get_focused_chat_max_num()
|
||||
|
||||
# --- 新增:检查是否允许进入 FOCUS 模式 --- #
|
||||
if not global_config.allow_focus_mode:
|
||||
if not global_config.chat.allow_focus_mode:
|
||||
if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏
|
||||
logger.trace("未开启 FOCUSED 状态 (allow_focus_mode=False)")
|
||||
return # 如果不允许,直接返回
|
||||
@@ -766,7 +767,7 @@ class SubHeartflowManager:
|
||||
focused_limit = current_mai_state.get_focused_chat_max_num()
|
||||
|
||||
# --- 检查是否允许 FOCUS 模式 --- #
|
||||
if not global_config.allow_focus_mode:
|
||||
if not global_config.chat.allow_focus_mode:
|
||||
# Log less frequently to avoid spam
|
||||
# if int(time.time()) % 60 == 0:
|
||||
# logger.debug(f"{log_prefix_task} 配置不允许进入 FOCUSED 状态")
|
||||
|
||||
@@ -19,9 +19,10 @@ from ..utils.chat_message_builder import (
|
||||
build_readable_messages,
|
||||
) # 导入 build_readable_messages
|
||||
from ..utils.utils import translate_timestamp_to_human_readable
|
||||
from .memory_config import MemoryConfig
|
||||
from rich.traceback import install
|
||||
|
||||
from ...config.config import global_config
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
|
||||
@@ -195,18 +196,16 @@ class Hippocampus:
|
||||
self.llm_summary = None
|
||||
self.entorhinal_cortex = None
|
||||
self.parahippocampal_gyrus = None
|
||||
self.config = None
|
||||
|
||||
def initialize(self, global_config):
|
||||
# 使用导入的 MemoryConfig dataclass 和其 from_global_config 方法
|
||||
self.config = MemoryConfig.from_global_config(global_config)
|
||||
def initialize(self):
|
||||
# 初始化子组件
|
||||
self.entorhinal_cortex = EntorhinalCortex(self)
|
||||
self.parahippocampal_gyrus = ParahippocampalGyrus(self)
|
||||
# 从数据库加载记忆图
|
||||
self.entorhinal_cortex.sync_memory_from_db()
|
||||
self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory")
|
||||
self.llm_summary = LLMRequest(self.config.llm_summary, request_type="memory")
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm_topic_judge = LLMRequest(global_config.model.topic_judge, request_type="memory")
|
||||
self.llm_summary = LLMRequest(global_config.model.summary, request_type="memory")
|
||||
|
||||
def get_all_node_names(self) -> list:
|
||||
"""获取记忆图中所有节点的名字列表"""
|
||||
@@ -792,7 +791,6 @@ class EntorhinalCortex:
|
||||
def __init__(self, hippocampus: Hippocampus):
|
||||
self.hippocampus = hippocampus
|
||||
self.memory_graph = hippocampus.memory_graph
|
||||
self.config = hippocampus.config
|
||||
|
||||
def get_memory_sample(self):
|
||||
"""从数据库获取记忆样本"""
|
||||
@@ -801,13 +799,13 @@ class EntorhinalCortex:
|
||||
|
||||
# 创建双峰分布的记忆调度器
|
||||
sample_scheduler = MemoryBuildScheduler(
|
||||
n_hours1=self.config.memory_build_distribution[0],
|
||||
std_hours1=self.config.memory_build_distribution[1],
|
||||
weight1=self.config.memory_build_distribution[2],
|
||||
n_hours2=self.config.memory_build_distribution[3],
|
||||
std_hours2=self.config.memory_build_distribution[4],
|
||||
weight2=self.config.memory_build_distribution[5],
|
||||
total_samples=self.config.build_memory_sample_num,
|
||||
n_hours1=global_config.memory.memory_build_distribution[0],
|
||||
std_hours1=global_config.memory.memory_build_distribution[1],
|
||||
weight1=global_config.memory.memory_build_distribution[2],
|
||||
n_hours2=global_config.memory.memory_build_distribution[3],
|
||||
std_hours2=global_config.memory.memory_build_distribution[4],
|
||||
weight2=global_config.memory.memory_build_distribution[5],
|
||||
total_samples=global_config.memory.memory_build_sample_num,
|
||||
)
|
||||
|
||||
timestamps = sample_scheduler.get_timestamp_array()
|
||||
@@ -818,7 +816,7 @@ class EntorhinalCortex:
|
||||
for timestamp in timestamps:
|
||||
# 调用修改后的 random_get_msg_snippet
|
||||
messages = self.random_get_msg_snippet(
|
||||
timestamp, self.config.build_memory_sample_length, max_memorized_time_per_msg
|
||||
timestamp, global_config.memory.memory_build_sample_length, max_memorized_time_per_msg
|
||||
)
|
||||
if messages:
|
||||
time_diff = (datetime.datetime.now().timestamp() - timestamp) / 3600
|
||||
@@ -1099,7 +1097,6 @@ class ParahippocampalGyrus:
|
||||
def __init__(self, hippocampus: Hippocampus):
|
||||
self.hippocampus = hippocampus
|
||||
self.memory_graph = hippocampus.memory_graph
|
||||
self.config = hippocampus.config
|
||||
|
||||
async def memory_compress(self, messages: list, compress_rate=0.1):
|
||||
"""压缩和总结消息内容,生成记忆主题和摘要。
|
||||
@@ -1159,7 +1156,7 @@ class ParahippocampalGyrus:
|
||||
|
||||
# 3. 过滤掉包含禁用关键词的topic
|
||||
filtered_topics = [
|
||||
topic for topic in topics if not any(keyword in topic for keyword in self.config.memory_ban_words)
|
||||
topic for topic in topics if not any(keyword in topic for keyword in global_config.memory.memory_ban_words)
|
||||
]
|
||||
|
||||
logger.debug(f"过滤后话题: {filtered_topics}")
|
||||
@@ -1222,7 +1219,7 @@ class ParahippocampalGyrus:
|
||||
bar = "█" * filled_length + "-" * (bar_length - filled_length)
|
||||
logger.debug(f"进度: [{bar}] {progress:.1f}% ({i}/{len(memory_samples)})")
|
||||
|
||||
compress_rate = self.config.memory_compress_rate
|
||||
compress_rate = global_config.memory.memory_compress_rate
|
||||
try:
|
||||
compressed_memory, similar_topics_dict = await self.memory_compress(messages, compress_rate)
|
||||
except Exception as e:
|
||||
@@ -1322,7 +1319,7 @@ class ParahippocampalGyrus:
|
||||
edge_data = self.memory_graph.G[source][target]
|
||||
last_modified = edge_data.get("last_modified")
|
||||
|
||||
if current_time - last_modified > 3600 * self.config.memory_forget_time:
|
||||
if current_time - last_modified > 3600 * global_config.memory.memory_forget_time:
|
||||
current_strength = edge_data.get("strength", 1)
|
||||
new_strength = current_strength - 1
|
||||
|
||||
@@ -1430,8 +1427,8 @@ class ParahippocampalGyrus:
|
||||
async def operation_consolidate_memory(self):
|
||||
"""整合记忆:合并节点内相似的记忆项"""
|
||||
start_time = time.time()
|
||||
percentage = self.config.consolidate_memory_percentage
|
||||
similarity_threshold = self.config.consolidation_similarity_threshold
|
||||
percentage = global_config.memory.consolidate_memory_percentage
|
||||
similarity_threshold = global_config.memory.consolidation_similarity_threshold
|
||||
logger.info(f"[整合] 开始检查记忆节点... 检查比例: {percentage:.2%}, 合并阈值: {similarity_threshold}")
|
||||
|
||||
# 获取所有至少有2条记忆项的节点
|
||||
@@ -1544,7 +1541,6 @@ class ParahippocampalGyrus:
|
||||
class HippocampusManager:
|
||||
_instance = None
|
||||
_hippocampus = None
|
||||
_global_config = None
|
||||
_initialized = False
|
||||
|
||||
@classmethod
|
||||
@@ -1559,19 +1555,15 @@ class HippocampusManager:
|
||||
raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法")
|
||||
return cls._hippocampus
|
||||
|
||||
def initialize(self, global_config):
|
||||
def initialize(self):
|
||||
"""初始化海马体实例"""
|
||||
if self._initialized:
|
||||
return self._hippocampus
|
||||
|
||||
self._global_config = global_config
|
||||
self._hippocampus = Hippocampus()
|
||||
self._hippocampus.initialize(global_config)
|
||||
self._hippocampus.initialize()
|
||||
self._initialized = True
|
||||
|
||||
# 输出记忆系统参数信息
|
||||
config = self._hippocampus.config
|
||||
|
||||
# 输出记忆图统计信息
|
||||
memory_graph = self._hippocampus.memory_graph.G
|
||||
node_count = len(memory_graph.nodes())
|
||||
@@ -1579,9 +1571,9 @@ class HippocampusManager:
|
||||
|
||||
logger.success(f"""--------------------------------
|
||||
记忆系统参数配置:
|
||||
构建间隔: {global_config.build_memory_interval}秒|样本数: {config.build_memory_sample_num},长度: {config.build_memory_sample_length}|压缩率: {config.memory_compress_rate}
|
||||
记忆构建分布: {config.memory_build_distribution}
|
||||
遗忘间隔: {global_config.forget_memory_interval}秒|遗忘比例: {global_config.memory_forget_percentage}|遗忘: {config.memory_forget_time}小时之后
|
||||
构建间隔: {global_config.memory.memory_build_interval}秒|样本数: {global_config.memory.memory_build_sample_num},长度: {global_config.memory.memory_build_sample_length}|压缩率: {global_config.memory.memory_compress_rate}
|
||||
记忆构建分布: {global_config.memory.memory_build_distribution}
|
||||
遗忘间隔: {global_config.memory.forget_memory_interval}秒|遗忘比例: {global_config.memory.memory_forget_percentage}|遗忘: {global_config.memory.memory_forget_time}小时之后
|
||||
记忆图统计信息: 节点数量: {node_count}, 连接数量: {edge_count}
|
||||
--------------------------------""") # noqa: E501
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import os
|
||||
# 添加项目根目录到系统路径
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
||||
from src.chat.memory_system.Hippocampus import HippocampusManager
|
||||
from src.config.config import global_config
|
||||
from rich.traceback import install
|
||||
|
||||
install(extra_lines=3)
|
||||
@@ -19,7 +18,7 @@ async def test_memory_system():
|
||||
# 初始化记忆系统
|
||||
print("开始初始化记忆系统...")
|
||||
hippocampus_manager = HippocampusManager.get_instance()
|
||||
hippocampus_manager.initialize(global_config=global_config)
|
||||
hippocampus_manager.initialize()
|
||||
print("记忆系统初始化完成")
|
||||
|
||||
# 测试记忆构建
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryConfig:
|
||||
"""记忆系统配置类"""
|
||||
|
||||
# 记忆构建相关配置
|
||||
memory_build_distribution: List[float] # 记忆构建的时间分布参数
|
||||
build_memory_sample_num: int # 每次构建记忆的样本数量
|
||||
build_memory_sample_length: int # 每个样本的消息长度
|
||||
memory_compress_rate: float # 记忆压缩率
|
||||
|
||||
# 记忆遗忘相关配置
|
||||
memory_forget_time: int # 记忆遗忘时间(小时)
|
||||
|
||||
# 记忆过滤相关配置
|
||||
memory_ban_words: List[str] # 记忆过滤词列表
|
||||
|
||||
# 新增:记忆整合相关配置
|
||||
consolidation_similarity_threshold: float # 相似度阈值
|
||||
consolidate_memory_percentage: float # 检查节点比例
|
||||
consolidate_memory_interval: int # 记忆整合间隔
|
||||
|
||||
llm_topic_judge: str # 话题判断模型
|
||||
llm_summary: str # 话题总结模型
|
||||
|
||||
@classmethod
|
||||
def from_global_config(cls, global_config):
|
||||
"""从全局配置创建记忆系统配置"""
|
||||
# 使用 getattr 提供默认值,防止全局配置缺少这些项
|
||||
return cls(
|
||||
memory_build_distribution=getattr(
|
||||
global_config, "memory_build_distribution", (24, 12, 0.5, 168, 72, 0.5)
|
||||
), # 添加默认值
|
||||
build_memory_sample_num=getattr(global_config, "build_memory_sample_num", 5),
|
||||
build_memory_sample_length=getattr(global_config, "build_memory_sample_length", 30),
|
||||
memory_compress_rate=getattr(global_config, "memory_compress_rate", 0.1),
|
||||
memory_forget_time=getattr(global_config, "memory_forget_time", 24 * 7),
|
||||
memory_ban_words=getattr(global_config, "memory_ban_words", []),
|
||||
# 新增加载整合配置,并提供默认值
|
||||
consolidation_similarity_threshold=getattr(global_config, "consolidation_similarity_threshold", 0.7),
|
||||
consolidate_memory_percentage=getattr(global_config, "consolidate_memory_percentage", 0.01),
|
||||
consolidate_memory_interval=getattr(global_config, "consolidate_memory_interval", 1000),
|
||||
llm_topic_judge=getattr(global_config, "llm_topic_judge", "default_judge_model"), # 添加默认模型名
|
||||
llm_summary=getattr(global_config, "llm_summary", "default_summary_model"), # 添加默认模型名
|
||||
)
|
||||
@@ -41,7 +41,7 @@ class ChatBot:
|
||||
chat_id = str(message.chat_stream.stream_id)
|
||||
private_name = str(message.message_info.user_info.user_nickname)
|
||||
|
||||
if global_config.enable_pfc_chatting:
|
||||
if global_config.experimental.enable_pfc_chatting:
|
||||
await self.pfc_manager.get_or_create_conversation(chat_id, private_name)
|
||||
|
||||
except Exception as e:
|
||||
@@ -78,19 +78,19 @@ class ChatBot:
|
||||
userinfo = message.message_info.user_info
|
||||
|
||||
# 用户黑名单拦截
|
||||
if userinfo.user_id in global_config.ban_user_id:
|
||||
if userinfo.user_id in global_config.chat_target.ban_user_id:
|
||||
logger.debug(f"用户{userinfo.user_id}被禁止回复")
|
||||
return
|
||||
|
||||
if groupinfo is None:
|
||||
logger.trace("检测到私聊消息,检查")
|
||||
# 好友黑名单拦截
|
||||
if userinfo.user_id not in global_config.talk_allowed_private:
|
||||
if userinfo.user_id not in global_config.experimental.talk_allowed_private:
|
||||
logger.debug(f"用户{userinfo.user_id}没有私聊权限")
|
||||
return
|
||||
|
||||
# 群聊黑名单拦截
|
||||
if groupinfo is not None and groupinfo.group_id not in global_config.talk_allowed_groups:
|
||||
if groupinfo is not None and groupinfo.group_id not in global_config.chat_target.talk_allowed_groups:
|
||||
logger.trace(f"群{groupinfo.group_id}被禁止回复")
|
||||
return
|
||||
|
||||
@@ -112,7 +112,7 @@ class ChatBot:
|
||||
if groupinfo is None:
|
||||
logger.trace("检测到私聊消息")
|
||||
# 是否在配置信息中开启私聊模式
|
||||
if global_config.enable_friend_chat:
|
||||
if global_config.experimental.enable_friend_chat:
|
||||
logger.trace("私聊模式已启用")
|
||||
# 是否进入PFC
|
||||
if global_config.enable_pfc_chatting:
|
||||
|
||||
@@ -38,7 +38,7 @@ class MessageBuffer:
|
||||
|
||||
async def start_caching_messages(self, message: MessageRecv):
|
||||
"""添加消息,启动缓冲"""
|
||||
if not global_config.message_buffer:
|
||||
if not global_config.chat.message_buffer:
|
||||
person_id = person_info_manager.get_person_id(
|
||||
message.message_info.user_info.platform, message.message_info.user_info.user_id
|
||||
)
|
||||
@@ -107,7 +107,7 @@ class MessageBuffer:
|
||||
|
||||
async def query_buffer_result(self, message: MessageRecv) -> bool:
|
||||
"""查询缓冲结果,并清理"""
|
||||
if not global_config.message_buffer:
|
||||
if not global_config.chat.message_buffer:
|
||||
return True
|
||||
person_id_ = self.get_person_id_(
|
||||
message.message_info.platform, message.message_info.user_info.user_id, message.message_info.group_info
|
||||
|
||||
@@ -279,7 +279,7 @@ class MessageManager:
|
||||
)
|
||||
|
||||
# 检查是否超时
|
||||
if thinking_time > global_config.thinking_timeout:
|
||||
if thinking_time > global_config.normal_chat.thinking_timeout:
|
||||
logger.warning(
|
||||
f"[{chat_id}] 消息思考超时 ({thinking_time:.1f}秒),移除消息 {message_earliest.message_info.message_id}"
|
||||
)
|
||||
|
||||
@@ -111,8 +111,8 @@ class LLMRequest:
|
||||
def __init__(self, model: dict, **kwargs):
|
||||
# 将大写的配置键转换为小写并从config中获取实际值
|
||||
try:
|
||||
self.api_key = os.environ[model["key"]]
|
||||
self.base_url = os.environ[model["base_url"]]
|
||||
self.api_key = os.environ[f"{model['provider']}_KEY"]
|
||||
self.base_url = os.environ[f"{model['provider']}_BASE_URL"]
|
||||
except AttributeError as e:
|
||||
logger.error(f"原始 model dict 信息:{model}")
|
||||
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
|
||||
@@ -500,11 +500,11 @@ class LLMRequest:
|
||||
logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}")
|
||||
|
||||
# 对全局配置进行更新
|
||||
if global_config.llm_normal.get("name") == old_model_name:
|
||||
global_config.llm_normal["name"] = self.model_name
|
||||
if global_config.model.normal.get("name") == old_model_name:
|
||||
global_config.model.normal["name"] = self.model_name
|
||||
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
|
||||
if global_config.llm_reasoning.get("name") == old_model_name:
|
||||
global_config.llm_reasoning["name"] = self.model_name
|
||||
if global_config.model.reasoning.get("name") == old_model_name:
|
||||
global_config.model.reasoning["name"] = self.model_name
|
||||
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
|
||||
|
||||
if payload and "model" in payload:
|
||||
@@ -636,7 +636,7 @@ class LLMRequest:
|
||||
**params_copy,
|
||||
}
|
||||
if "max_tokens" not in payload and "max_completion_tokens" not in payload:
|
||||
payload["max_tokens"] = global_config.model_max_output_length
|
||||
payload["max_tokens"] = global_config.model.model_max_output_length
|
||||
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
|
||||
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
|
||||
payload["max_completion_tokens"] = payload.pop("max_tokens")
|
||||
|
||||
@@ -73,8 +73,8 @@ class NormalChat:
|
||||
messageinfo = message.message_info
|
||||
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=messageinfo.platform,
|
||||
)
|
||||
|
||||
@@ -121,8 +121,8 @@ class NormalChat:
|
||||
message_id=thinking_id,
|
||||
chat_stream=self.chat_stream, # 使用 self.chat_stream
|
||||
bot_user_info=UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=message.message_info.platform,
|
||||
),
|
||||
sender_info=message.message_info.user_info,
|
||||
@@ -147,7 +147,7 @@ class NormalChat:
|
||||
# 改为实例方法
|
||||
async def _handle_emoji(self, message: MessageRecv, response: str):
|
||||
"""处理表情包"""
|
||||
if random() < global_config.emoji_chance:
|
||||
if random() < global_config.normal_chat.emoji_chance:
|
||||
emoji_raw = await emoji_manager.get_emoji_for_text(response)
|
||||
if emoji_raw:
|
||||
emoji_path, description = emoji_raw
|
||||
@@ -160,8 +160,8 @@ class NormalChat:
|
||||
message_id="mt" + str(thinking_time_point),
|
||||
chat_stream=self.chat_stream, # 使用 self.chat_stream
|
||||
bot_user_info=UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=message.message_info.platform,
|
||||
),
|
||||
sender_info=message.message_info.user_info,
|
||||
@@ -186,7 +186,7 @@ class NormalChat:
|
||||
label=emotion,
|
||||
stance=stance, # 使用 self.chat_stream
|
||||
)
|
||||
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
|
||||
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor)
|
||||
|
||||
async def _reply_interested_message(self) -> None:
|
||||
"""
|
||||
@@ -430,7 +430,7 @@ class NormalChat:
|
||||
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
|
||||
"""检查消息中是否包含过滤词"""
|
||||
stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id
|
||||
for word in global_config.ban_words:
|
||||
for word in global_config.chat.ban_words:
|
||||
if word in text:
|
||||
logger.info(
|
||||
f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]"
|
||||
@@ -445,7 +445,7 @@ class NormalChat:
|
||||
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
|
||||
"""检查消息是否匹配过滤正则表达式"""
|
||||
stream_name = chat_manager.get_stream_name(chat.stream_id) or chat.stream_id
|
||||
for pattern in global_config.ban_msgs_regex:
|
||||
for pattern in global_config.chat.ban_msgs_regex:
|
||||
if pattern.search(text):
|
||||
logger.info(
|
||||
f"[{stream_name}][{chat.group_info.group_name if chat.group_info else '私聊'}]"
|
||||
|
||||
@@ -15,21 +15,22 @@ logger = get_logger("llm")
|
||||
|
||||
class NormalChatGenerator:
|
||||
def __init__(self):
|
||||
# TODO: API-Adapter修改标记
|
||||
self.model_reasoning = LLMRequest(
|
||||
model=global_config.llm_reasoning,
|
||||
model=global_config.model.reasoning,
|
||||
temperature=0.7,
|
||||
max_tokens=3000,
|
||||
request_type="response_reasoning",
|
||||
)
|
||||
self.model_normal = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
model=global_config.model.normal,
|
||||
temperature=global_config.model.normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_reasoning",
|
||||
)
|
||||
|
||||
self.model_sum = LLMRequest(
|
||||
model=global_config.llm_summary, temperature=0.7, max_tokens=3000, request_type="relation"
|
||||
model=global_config.model.summary, temperature=0.7, max_tokens=3000, request_type="relation"
|
||||
)
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
self.current_model_name = "unknown model"
|
||||
@@ -37,7 +38,7 @@ class NormalChatGenerator:
|
||||
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
|
||||
"""根据当前模型类型选择对应的生成函数"""
|
||||
# 从global_config中获取模型概率值并选择模型
|
||||
if random.random() < global_config.model_reasoning_probability:
|
||||
if random.random() < global_config.normal_chat.reasoning_model_probability:
|
||||
self.current_model_type = "深深地"
|
||||
current_model = self.model_reasoning
|
||||
else:
|
||||
@@ -51,7 +52,7 @@ class NormalChatGenerator:
|
||||
model_response = await self._generate_response_with_model(message, current_model, thinking_id)
|
||||
|
||||
if model_response:
|
||||
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}")
|
||||
logger.info(f"{global_config.bot.nickname}的回复是:{model_response}")
|
||||
model_response = await self._process_response(model_response)
|
||||
|
||||
return model_response
|
||||
@@ -113,7 +114,7 @@ class NormalChatGenerator:
|
||||
- "中立":不表达明确立场或无关回应
|
||||
2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
|
||||
3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒"
|
||||
4. 考虑回复者的人格设定为{global_config.personality_core}
|
||||
4. 考虑回复者的人格设定为{global_config.personality.personality_core}
|
||||
|
||||
对话示例:
|
||||
被回复:「A就是笨」
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
import asyncio
|
||||
|
||||
from src.config.config import global_config
|
||||
from .willing_manager import BaseWillingManager
|
||||
|
||||
|
||||
class ClassicalWillingManager(BaseWillingManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._decay_task: asyncio.Task = None
|
||||
self._decay_task: asyncio.Task | None = None
|
||||
|
||||
async def _decay_reply_willing(self):
|
||||
"""定期衰减回复意愿"""
|
||||
while True:
|
||||
await asyncio.sleep(1)
|
||||
for chat_id in self.chat_reply_willing:
|
||||
self.chat_reply_willing[chat_id] = max(0, self.chat_reply_willing[chat_id] * 0.9)
|
||||
self.chat_reply_willing[chat_id] = max(0.0, self.chat_reply_willing[chat_id] * 0.9)
|
||||
|
||||
async def async_task_starter(self):
|
||||
if self._decay_task is None:
|
||||
@@ -23,35 +25,33 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
chat_id = willing_info.chat_id
|
||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
||||
|
||||
interested_rate = willing_info.interested_rate * self.global_config.response_interested_rate_amplifier
|
||||
interested_rate = willing_info.interested_rate * global_config.normal_chat.response_interested_rate_amplifier
|
||||
|
||||
if interested_rate > 0.4:
|
||||
current_willing += interested_rate - 0.3
|
||||
|
||||
if willing_info.is_mentioned_bot and current_willing < 1.0:
|
||||
current_willing += 1
|
||||
elif willing_info.is_mentioned_bot:
|
||||
current_willing += 0.05
|
||||
if willing_info.is_mentioned_bot:
|
||||
current_willing += 1 if current_willing < 1.0 else 0.05
|
||||
|
||||
is_emoji_not_reply = False
|
||||
if willing_info.is_emoji:
|
||||
if self.global_config.emoji_response_penalty != 0:
|
||||
current_willing *= self.global_config.emoji_response_penalty
|
||||
if global_config.normal_chat.emoji_response_penalty != 0:
|
||||
current_willing *= global_config.normal_chat.emoji_response_penalty
|
||||
else:
|
||||
is_emoji_not_reply = True
|
||||
|
||||
self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
|
||||
|
||||
reply_probability = min(
|
||||
max((current_willing - 0.5), 0.01) * self.global_config.response_willing_amplifier * 2, 1
|
||||
max((current_willing - 0.5), 0.01) * global_config.normal_chat.response_willing_amplifier * 2, 1
|
||||
)
|
||||
|
||||
# 检查群组权限(如果是群聊)
|
||||
if (
|
||||
willing_info.group_info
|
||||
and willing_info.group_info.group_id in self.global_config.talk_frequency_down_groups
|
||||
and willing_info.group_info.group_id in global_config.chat_target.talk_frequency_down_groups
|
||||
):
|
||||
reply_probability = reply_probability / self.global_config.down_frequency_rate
|
||||
reply_probability = reply_probability / global_config.normal_chat.down_frequency_rate
|
||||
|
||||
if is_emoji_not_reply:
|
||||
reply_probability = 0
|
||||
@@ -61,7 +61,7 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
async def before_generate_reply_handle(self, message_id):
|
||||
chat_id = self.ongoing_messages[message_id].chat_id
|
||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
||||
self.chat_reply_willing[chat_id] = max(0, current_willing - 1.8)
|
||||
self.chat_reply_willing[chat_id] = max(0.0, current_willing - 1.8)
|
||||
|
||||
async def after_generate_reply_handle(self, message_id):
|
||||
if message_id not in self.ongoing_messages:
|
||||
@@ -70,7 +70,7 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
chat_id = self.ongoing_messages[message_id].chat_id
|
||||
current_willing = self.chat_reply_willing.get(chat_id, 0)
|
||||
if current_willing < 1:
|
||||
self.chat_reply_willing[chat_id] = min(1, current_willing + 0.4)
|
||||
self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.4)
|
||||
|
||||
async def bombing_buffer_message_handle(self, message_id):
|
||||
return await super().bombing_buffer_message_handle(message_id)
|
||||
|
||||
@@ -19,6 +19,7 @@ Mxp 模式:梦溪畔独家赞助
|
||||
下下策是询问一个菜鸟(@梦溪畔)
|
||||
"""
|
||||
|
||||
from src.config.config import global_config
|
||||
from .willing_manager import BaseWillingManager
|
||||
from typing import Dict
|
||||
import asyncio
|
||||
@@ -50,8 +51,6 @@ class MxpWillingManager(BaseWillingManager):
|
||||
|
||||
self.mention_willing_gain = 0.6 # 提及意愿增益
|
||||
self.interest_willing_gain = 0.3 # 兴趣意愿增益
|
||||
self.emoji_response_penalty = self.global_config.emoji_response_penalty # 表情包回复惩罚
|
||||
self.down_frequency_rate = self.global_config.down_frequency_rate # 降低回复频率的群组惩罚系数
|
||||
self.single_chat_gain = 0.12 # 单聊增益
|
||||
|
||||
self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int)
|
||||
@@ -179,10 +178,10 @@ class MxpWillingManager(BaseWillingManager):
|
||||
probability = self._willing_to_probability(current_willing)
|
||||
|
||||
if w_info.is_emoji:
|
||||
probability *= self.emoji_response_penalty
|
||||
probability *= global_config.normal_chat.emoji_response_penalty
|
||||
|
||||
if w_info.group_info and w_info.group_info.group_id in self.global_config.talk_frequency_down_groups:
|
||||
probability /= self.down_frequency_rate
|
||||
if w_info.group_info and w_info.group_info.group_id in global_config.chat_target.talk_frequency_down_groups:
|
||||
probability /= global_config.normal_chat.down_frequency_rate
|
||||
|
||||
self.temporary_willing = current_willing
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from src.common.logger import LogConfig, WILLING_STYLE_CONFIG, LoguruLogger, get_module_logger
|
||||
from dataclasses import dataclass
|
||||
from src.config.config import global_config, BotConfig
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.person_info.person_info import person_info_manager, PersonInfoManager
|
||||
@@ -93,7 +93,6 @@ class BaseWillingManager(ABC):
|
||||
self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿(chat_id)
|
||||
self.ongoing_messages: Dict[str, WillingInfo] = {} # 当前正在进行的消息(message_id)
|
||||
self.lock = asyncio.Lock()
|
||||
self.global_config: BotConfig = global_config
|
||||
self.logger: LoguruLogger = logger
|
||||
|
||||
def setup(self, message: MessageRecv, chat: ChatStream, is_mentioned_bot: bool, interested_rate: float):
|
||||
@@ -173,7 +172,7 @@ def init_willing_manager() -> BaseWillingManager:
|
||||
Returns:
|
||||
对应mode的WillingManager实例
|
||||
"""
|
||||
mode = global_config.willing_mode.lower()
|
||||
mode = global_config.normal_chat.willing_mode.lower()
|
||||
return BaseWillingManager.create(mode)
|
||||
|
||||
|
||||
|
||||
@@ -59,8 +59,9 @@ person_info_default = {
|
||||
class PersonInfoManager:
|
||||
def __init__(self):
|
||||
self.person_name_list = {}
|
||||
# TODO: API-Adapter修改标记
|
||||
self.qv_name_llm = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
model=global_config.model.normal,
|
||||
max_tokens=256,
|
||||
request_type="qv_name",
|
||||
)
|
||||
|
||||
@@ -190,8 +190,8 @@ async def _build_readable_messages_internal(
|
||||
|
||||
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||
# 根据 replace_bot_name 参数决定是否替换机器人名称
|
||||
if replace_bot_name and user_id == global_config.BOT_QQ:
|
||||
person_name = f"{global_config.BOT_NICKNAME}(你)"
|
||||
if replace_bot_name and user_id == global_config.bot.qq_account:
|
||||
person_name = f"{global_config.bot.nickname}(你)"
|
||||
else:
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
|
||||
@@ -427,7 +427,7 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
|
||||
output_lines = []
|
||||
|
||||
def get_anon_name(platform, user_id):
|
||||
if user_id == global_config.BOT_QQ:
|
||||
if user_id == global_config.bot.qq_account:
|
||||
return "SELF"
|
||||
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||
if person_id not in person_map:
|
||||
@@ -501,7 +501,7 @@ async def get_person_id_list(messages: List[Dict[str, Any]]) -> List[str]:
|
||||
user_id = user_info.get("user_id")
|
||||
|
||||
# 检查必要信息是否存在 且 不是机器人自己
|
||||
if not all([platform, user_id]) or user_id == global_config.BOT_QQ:
|
||||
if not all([platform, user_id]) or user_id == global_config.bot.qq_account:
|
||||
continue
|
||||
|
||||
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||
|
||||
@@ -9,7 +9,6 @@ from typing import List
|
||||
class InfoCatcher:
|
||||
def __init__(self):
|
||||
self.chat_history = [] # 聊天历史,长度为三倍使用的上下文喵~
|
||||
self.context_length = global_config.observation_context_size
|
||||
self.chat_history_in_thinking = [] # 思考期间的聊天内容喵~
|
||||
self.chat_history_after_response = [] # 回复后的聊天内容,长度为一倍上下文喵~
|
||||
|
||||
@@ -143,7 +142,7 @@ class InfoCatcher:
|
||||
messages_before = (
|
||||
db.messages.find({"chat_id": chat_id, "message_id": {"$lt": message_id}})
|
||||
.sort("time", -1)
|
||||
.limit(self.context_length * 3)
|
||||
.limit(global_config.chat.observation_context_size * 3)
|
||||
) # 获取更多历史信息
|
||||
|
||||
return list(messages_before)
|
||||
|
||||
@@ -43,8 +43,8 @@ def db_message_to_str(message_dict: dict) -> str:
|
||||
|
||||
def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
"""检查消息是否提到了机器人"""
|
||||
keywords = [global_config.BOT_NICKNAME]
|
||||
nicknames = global_config.BOT_ALIAS_NAMES
|
||||
keywords = [global_config.bot.nickname]
|
||||
nicknames = global_config.bot.alias_names
|
||||
reply_probability = 0.0
|
||||
is_at = False
|
||||
is_mentioned = False
|
||||
@@ -64,18 +64,18 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
)
|
||||
|
||||
# 判断是否被@
|
||||
if re.search(f"@[\s\S]*?(id:{global_config.BOT_QQ})", message.processed_plain_text):
|
||||
if re.search(f"@[\s\S]*?(id:{global_config.bot.qq_account})", message.processed_plain_text):
|
||||
is_at = True
|
||||
is_mentioned = True
|
||||
|
||||
if is_at and global_config.at_bot_inevitable_reply:
|
||||
if is_at and global_config.normal_chat.at_bot_inevitable_reply:
|
||||
reply_probability = 1.0
|
||||
logger.info("被@,回复概率设置为100%")
|
||||
else:
|
||||
if not is_mentioned:
|
||||
# 判断是否被回复
|
||||
if re.match(
|
||||
f"\[回复 [\s\S]*?\({str(global_config.BOT_QQ)}\):[\s\S]*?],说:", message.processed_plain_text
|
||||
f"\[回复 [\s\S]*?\({str(global_config.bot.qq_account)}\):[\s\S]*?],说:", message.processed_plain_text
|
||||
):
|
||||
is_mentioned = True
|
||||
else:
|
||||
@@ -88,7 +88,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
for nickname in nicknames:
|
||||
if nickname in message_content:
|
||||
is_mentioned = True
|
||||
if is_mentioned and global_config.mentioned_bot_inevitable_reply:
|
||||
if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply:
|
||||
reply_probability = 1.0
|
||||
logger.info("被提及,回复概率设置为100%")
|
||||
return is_mentioned, reply_probability
|
||||
@@ -96,7 +96,8 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
|
||||
async def get_embedding(text, request_type="embedding"):
|
||||
"""获取文本的embedding向量"""
|
||||
llm = LLMRequest(model=global_config.embedding, request_type=request_type)
|
||||
# TODO: API-Adapter修改标记
|
||||
llm = LLMRequest(model=global_config.model.embedding, request_type=request_type)
|
||||
# return llm.get_embedding_sync(text)
|
||||
try:
|
||||
embedding = await llm.get_embedding(text)
|
||||
@@ -163,7 +164,7 @@ def get_recent_group_speaker(chat_stream_id: int, sender, limit: int = 12) -> li
|
||||
user_info = UserInfo.from_dict(msg_db_data["user_info"])
|
||||
if (
|
||||
(user_info.platform, user_info.user_id) != sender
|
||||
and user_info.user_id != global_config.BOT_QQ
|
||||
and user_info.user_id != global_config.bot.qq_account
|
||||
and (user_info.platform, user_info.user_id, user_info.user_nickname) not in who_chat_in_group
|
||||
and len(who_chat_in_group) < 5
|
||||
): # 排除重复,排除消息发送者,排除bot,限制加载的关系数目
|
||||
@@ -321,7 +322,7 @@ def random_remove_punctuation(text: str) -> str:
|
||||
|
||||
def process_llm_response(text: str) -> list[str]:
|
||||
# 先保护颜文字
|
||||
if global_config.enable_kaomoji_protection:
|
||||
if global_config.response_splitter.enable_kaomoji_protection:
|
||||
protected_text, kaomoji_mapping = protect_kaomoji(text)
|
||||
logger.trace(f"保护颜文字后的文本: {protected_text}")
|
||||
else:
|
||||
@@ -340,8 +341,8 @@ def process_llm_response(text: str) -> list[str]:
|
||||
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
|
||||
|
||||
# 对清理后的文本进行进一步处理
|
||||
max_length = global_config.response_max_length * 2
|
||||
max_sentence_num = global_config.response_max_sentence_num
|
||||
max_length = global_config.response_splitter.max_length * 2
|
||||
max_sentence_num = global_config.response_splitter.max_sentence_num
|
||||
# 如果基本上是中文,则进行长度过滤
|
||||
if get_western_ratio(cleaned_text) < 0.1:
|
||||
if len(cleaned_text) > max_length:
|
||||
@@ -349,20 +350,20 @@ def process_llm_response(text: str) -> list[str]:
|
||||
return ["懒得说"]
|
||||
|
||||
typo_generator = ChineseTypoGenerator(
|
||||
error_rate=global_config.chinese_typo_error_rate,
|
||||
min_freq=global_config.chinese_typo_min_freq,
|
||||
tone_error_rate=global_config.chinese_typo_tone_error_rate,
|
||||
word_replace_rate=global_config.chinese_typo_word_replace_rate,
|
||||
error_rate=global_config.chinese_typo.error_rate,
|
||||
min_freq=global_config.chinese_typo.min_freq,
|
||||
tone_error_rate=global_config.chinese_typo.tone_error_rate,
|
||||
word_replace_rate=global_config.chinese_typo.word_replace_rate,
|
||||
)
|
||||
|
||||
if global_config.enable_response_splitter:
|
||||
if global_config.response_splitter.enable:
|
||||
split_sentences = split_into_sentences_w_remove_punctuation(cleaned_text)
|
||||
else:
|
||||
split_sentences = [cleaned_text]
|
||||
|
||||
sentences = []
|
||||
for sentence in split_sentences:
|
||||
if global_config.chinese_typo_enable:
|
||||
if global_config.chinese_typo.enable:
|
||||
typoed_text, typo_corrections = typo_generator.create_typo_sentence(sentence)
|
||||
sentences.append(typoed_text)
|
||||
if typo_corrections:
|
||||
@@ -372,7 +373,7 @@ def process_llm_response(text: str) -> list[str]:
|
||||
|
||||
if len(sentences) > max_sentence_num:
|
||||
logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
|
||||
return [f"{global_config.BOT_NICKNAME}不知道哦"]
|
||||
return [f"{global_config.bot.nickname}不知道哦"]
|
||||
|
||||
# if extracted_contents:
|
||||
# for content in extracted_contents:
|
||||
|
||||
@@ -36,7 +36,7 @@ class ImageManager:
|
||||
self._ensure_description_collection()
|
||||
self._ensure_image_dir()
|
||||
self._initialized = True
|
||||
self._llm = LLMRequest(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image")
|
||||
self._llm = LLMRequest(model=global_config.model.vlm, temperature=0.4, max_tokens=300, request_type="image")
|
||||
|
||||
def _ensure_image_dir(self):
|
||||
"""确保图像存储目录存在"""
|
||||
@@ -134,7 +134,7 @@ class ImageManager:
|
||||
return f"[表情包,含义看起来是:{cached_description}]"
|
||||
|
||||
# 根据配置决定是否保存图片
|
||||
if global_config.save_emoji:
|
||||
if global_config.emoji.save_emoji:
|
||||
# 生成文件名和路径
|
||||
timestamp = int(time.time())
|
||||
filename = f"{timestamp}_{image_hash[:8]}.{image_format}"
|
||||
@@ -200,7 +200,7 @@ class ImageManager:
|
||||
return "[图片]"
|
||||
|
||||
# 根据配置决定是否保存图片
|
||||
if global_config.save_pic:
|
||||
if global_config.emoji.save_pic:
|
||||
# 生成文件名和路径
|
||||
timestamp = int(time.time())
|
||||
filename = f"{timestamp}_{image_hash[:8]}.{image_format}"
|
||||
|
||||
@@ -35,7 +35,7 @@ class TelemetryHeartBeatTask(AsyncTask):
|
||||
info_dict = {
|
||||
"os_type": "Unknown",
|
||||
"py_version": platform.python_version(),
|
||||
"mmc_version": global_config.MAI_VERSION,
|
||||
"mmc_version": global_config.MMC_VERSION,
|
||||
}
|
||||
|
||||
match platform.system():
|
||||
@@ -133,9 +133,8 @@ class TelemetryHeartBeatTask(AsyncTask):
|
||||
|
||||
async def run(self):
|
||||
# 发送心跳
|
||||
if global_config.remote_enable:
|
||||
if self.client_uuid is None:
|
||||
if not await self._req_uuid():
|
||||
if global_config.telemetry.enable:
|
||||
if self.client_uuid is None and not await self._req_uuid():
|
||||
logger.error("获取UUID失败,跳过此次心跳")
|
||||
return
|
||||
|
||||
|
||||
@@ -1,64 +1,68 @@
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional
|
||||
from dataclasses import field, dataclass
|
||||
|
||||
import tomli
|
||||
import tomlkit
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from packaging import version
|
||||
from packaging.version import Version, InvalidVersion
|
||||
from packaging.specifiers import SpecifierSet, InvalidSpecifier
|
||||
|
||||
from tomlkit import TOMLDocument
|
||||
from tomlkit.items import Table
|
||||
|
||||
from src.common.logger_manager import get_logger
|
||||
from rich.traceback import install
|
||||
|
||||
from src.config.config_base import ConfigBase
|
||||
from src.config.official_configs import (
|
||||
BotConfig,
|
||||
ChatTargetConfig,
|
||||
PersonalityConfig,
|
||||
IdentityConfig,
|
||||
PlatformsConfig,
|
||||
ChatConfig,
|
||||
NormalChatConfig,
|
||||
FocusChatConfig,
|
||||
EmojiConfig,
|
||||
MemoryConfig,
|
||||
MoodConfig,
|
||||
KeywordReactionConfig,
|
||||
ChineseTypoConfig,
|
||||
ResponseSplitterConfig,
|
||||
TelemetryConfig,
|
||||
ExperimentalConfig,
|
||||
ModelConfig,
|
||||
)
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
|
||||
# 配置主程序日志格式
|
||||
logger = get_logger("config")
|
||||
|
||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||
is_test = True
|
||||
mai_version_main = "0.6.4"
|
||||
mai_version_fix = "snapshot-1"
|
||||
CONFIG_DIR = "config"
|
||||
TEMPLATE_DIR = "template"
|
||||
|
||||
if mai_version_fix:
|
||||
if is_test:
|
||||
mai_version = f"test-{mai_version_main}-{mai_version_fix}"
|
||||
else:
|
||||
mai_version = f"{mai_version_main}-{mai_version_fix}"
|
||||
else:
|
||||
if is_test:
|
||||
mai_version = f"test-{mai_version_main}"
|
||||
else:
|
||||
mai_version = mai_version_main
|
||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||
# 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/
|
||||
MMC_VERSION = "0.7.0-snapshot.1"
|
||||
|
||||
|
||||
def update_config():
|
||||
# 获取根目录路径
|
||||
root_dir = Path(__file__).parent.parent.parent
|
||||
template_dir = root_dir / "template"
|
||||
config_dir = root_dir / "config"
|
||||
old_config_dir = config_dir / "old"
|
||||
old_config_dir = f"{CONFIG_DIR}/old"
|
||||
|
||||
# 定义文件路径
|
||||
template_path = template_dir / "bot_config_template.toml"
|
||||
old_config_path = config_dir / "bot_config.toml"
|
||||
new_config_path = config_dir / "bot_config.toml"
|
||||
template_path = f"{TEMPLATE_DIR}/bot_config_template.toml"
|
||||
old_config_path = f"{CONFIG_DIR}/bot_config.toml"
|
||||
new_config_path = f"{CONFIG_DIR}/bot_config.toml"
|
||||
|
||||
# 检查配置文件是否存在
|
||||
if not old_config_path.exists():
|
||||
if not os.path.exists(old_config_path):
|
||||
logger.info("配置文件不存在,从模板创建新配置")
|
||||
# 创建文件夹
|
||||
old_config_dir.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(template_path, old_config_path)
|
||||
os.makedirs(CONFIG_DIR, exist_ok=True) # 创建文件夹
|
||||
shutil.copy2(template_path, old_config_path) # 复制模板文件
|
||||
logger.info(f"已创建新配置文件,请填写后重新运行: {old_config_path}")
|
||||
# 如果是新创建的配置文件,直接返回
|
||||
return quit()
|
||||
quit()
|
||||
|
||||
# 读取旧配置文件和模板文件
|
||||
with open(old_config_path, "r", encoding="utf-8") as f:
|
||||
@@ -75,13 +79,15 @@ def update_config():
|
||||
return
|
||||
else:
|
||||
logger.info(f"检测到版本号不同: 旧版本 v{old_version} -> 新版本 v{new_version}")
|
||||
else:
|
||||
logger.info("已有配置文件未检测到版本号,可能是旧版本。将进行更新")
|
||||
|
||||
# 创建old目录(如果不存在)
|
||||
old_config_dir.mkdir(exist_ok=True)
|
||||
os.makedirs(old_config_dir, exist_ok=True)
|
||||
|
||||
# 生成带时间戳的新文件名
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
old_backup_path = old_config_dir / f"bot_config_{timestamp}.toml"
|
||||
old_backup_path = f"{old_config_dir}/bot_config_{timestamp}.toml"
|
||||
|
||||
# 移动旧配置文件到old目录
|
||||
shutil.move(old_config_path, old_backup_path)
|
||||
@@ -91,24 +97,23 @@ def update_config():
|
||||
shutil.copy2(template_path, new_config_path)
|
||||
logger.info(f"已创建新配置文件: {new_config_path}")
|
||||
|
||||
# 递归更新配置
|
||||
def update_dict(target, source):
|
||||
def update_dict(target: TOMLDocument | dict, source: TOMLDocument | dict):
|
||||
"""
|
||||
将source字典的值更新到target字典中(如果target中存在相同的键)
|
||||
"""
|
||||
for key, value in source.items():
|
||||
# 跳过version字段的更新
|
||||
if key == "version":
|
||||
continue
|
||||
if key in target:
|
||||
if isinstance(value, dict) and isinstance(target[key], (dict, tomlkit.items.Table)):
|
||||
if isinstance(value, dict) and isinstance(target[key], (dict, Table)):
|
||||
update_dict(target[key], value)
|
||||
else:
|
||||
try:
|
||||
# 对数组类型进行特殊处理
|
||||
if isinstance(value, list):
|
||||
# 如果是空数组,确保它保持为空数组
|
||||
if not value:
|
||||
target[key] = tomlkit.array()
|
||||
else:
|
||||
target[key] = tomlkit.array(value)
|
||||
target[key] = tomlkit.array(str(value)) if value else tomlkit.array()
|
||||
else:
|
||||
# 其他类型使用item方法创建新值
|
||||
target[key] = tomlkit.item(value)
|
||||
@@ -123,619 +128,57 @@ def update_config():
|
||||
# 保存更新后的配置(保留注释和格式)
|
||||
with open(new_config_path, "w", encoding="utf-8") as f:
|
||||
f.write(tomlkit.dumps(new_config))
|
||||
logger.info("配置文件更新完成")
|
||||
logger.info("配置文件更新完成,建议检查新配置文件中的内容,以免丢失重要信息")
|
||||
quit()
|
||||
|
||||
|
||||
@dataclass
|
||||
class BotConfig:
|
||||
"""机器人配置类"""
|
||||
class Config(ConfigBase):
|
||||
"""总配置类"""
|
||||
|
||||
INNER_VERSION: Version = None
|
||||
MAI_VERSION: str = mai_version # 硬编码的版本信息
|
||||
MMC_VERSION: str = field(default=MMC_VERSION, repr=False, init=False) # 硬编码的版本信息
|
||||
|
||||
# bot
|
||||
BOT_QQ: Optional[str] = "114514"
|
||||
BOT_NICKNAME: Optional[str] = None
|
||||
BOT_ALIAS_NAMES: List[str] = field(default_factory=list) # 别名,可以通过这个叫它
|
||||
bot: BotConfig
|
||||
chat_target: ChatTargetConfig
|
||||
personality: PersonalityConfig
|
||||
identity: IdentityConfig
|
||||
platforms: PlatformsConfig
|
||||
chat: ChatConfig
|
||||
normal_chat: NormalChatConfig
|
||||
focus_chat: FocusChatConfig
|
||||
emoji: EmojiConfig
|
||||
memory: MemoryConfig
|
||||
mood: MoodConfig
|
||||
keyword_reaction: KeywordReactionConfig
|
||||
chinese_typo: ChineseTypoConfig
|
||||
response_splitter: ResponseSplitterConfig
|
||||
telemetry: TelemetryConfig
|
||||
experimental: ExperimentalConfig
|
||||
model: ModelConfig
|
||||
|
||||
# group
|
||||
talk_allowed_groups = set()
|
||||
talk_frequency_down_groups = set()
|
||||
ban_user_id = set()
|
||||
|
||||
# personality
|
||||
personality_core = "用一句话或几句话描述人格的核心特点" # 建议20字以内,谁再写3000字小作文敲谁脑袋
|
||||
personality_sides: List[str] = field(
|
||||
default_factory=lambda: [
|
||||
"用一句话或几句话描述人格的一些侧面",
|
||||
"用一句话或几句话描述人格的一些侧面",
|
||||
"用一句话或几句话描述人格的一些侧面",
|
||||
]
|
||||
)
|
||||
expression_style = "描述麦麦说话的表达风格,表达习惯"
|
||||
# identity
|
||||
identity_detail: List[str] = field(
|
||||
default_factory=lambda: [
|
||||
"身份特点",
|
||||
"身份特点",
|
||||
]
|
||||
)
|
||||
height: int = 170 # 身高 单位厘米
|
||||
weight: int = 50 # 体重 单位千克
|
||||
age: int = 20 # 年龄 单位岁
|
||||
gender: str = "男" # 性别
|
||||
appearance: str = "用几句话描述外貌特征" # 外貌特征
|
||||
|
||||
# chat
|
||||
allow_focus_mode: bool = True # 是否允许专注聊天状态
|
||||
|
||||
base_normal_chat_num: int = 3 # 最多允许多少个群进行普通聊天
|
||||
base_focused_chat_num: int = 2 # 最多允许多少个群进行专注聊天
|
||||
|
||||
observation_context_size: int = 12 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
||||
|
||||
message_buffer: bool = True # 消息缓冲器
|
||||
|
||||
ban_words = set()
|
||||
ban_msgs_regex = set()
|
||||
|
||||
# focus_chat
|
||||
reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发
|
||||
default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
|
||||
consecutive_no_reply_threshold = 3
|
||||
|
||||
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
|
||||
# normal_chat
|
||||
model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
|
||||
model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
|
||||
|
||||
emoji_chance: float = 0.2 # 发送表情包的基础概率
|
||||
thinking_timeout: int = 120 # 思考时间
|
||||
|
||||
willing_mode: str = "classical" # 意愿模式
|
||||
response_willing_amplifier: float = 1.0 # 回复意愿放大系数
|
||||
response_interested_rate_amplifier: float = 1.0 # 回复兴趣度放大系数
|
||||
down_frequency_rate: float = 3 # 降低回复频率的群组回复意愿降低系数
|
||||
emoji_response_penalty: float = 0.0 # 表情包回复惩罚
|
||||
mentioned_bot_inevitable_reply: bool = False # 提及 bot 必然回复
|
||||
at_bot_inevitable_reply: bool = False # @bot 必然回复
|
||||
|
||||
# emoji
|
||||
max_emoji_num: int = 200 # 表情包最大数量
|
||||
max_reach_deletion: bool = True # 开启则在达到最大数量时删除表情包,关闭则不会继续收集表情包
|
||||
EMOJI_CHECK_INTERVAL: int = 120 # 表情包检查间隔(分钟)
|
||||
|
||||
save_pic: bool = False # 是否保存图片
|
||||
save_emoji: bool = False # 是否保存表情包
|
||||
steal_emoji: bool = True # 是否偷取表情包,让麦麦可以发送她保存的这些表情包
|
||||
|
||||
EMOJI_CHECK: bool = False # 是否开启过滤
|
||||
EMOJI_CHECK_PROMPT: str = "符合公序良俗" # 表情包过滤要求
|
||||
|
||||
# memory
|
||||
build_memory_interval: int = 600 # 记忆构建间隔(秒)
|
||||
memory_build_distribution: list = field(
|
||||
default_factory=lambda: [4, 2, 0.6, 24, 8, 0.4]
|
||||
) # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重
|
||||
build_memory_sample_num: int = 10 # 记忆构建采样数量
|
||||
build_memory_sample_length: int = 20 # 记忆构建采样长度
|
||||
memory_compress_rate: float = 0.1 # 记忆压缩率
|
||||
|
||||
forget_memory_interval: int = 600 # 记忆遗忘间隔(秒)
|
||||
memory_forget_time: int = 24 # 记忆遗忘时间(小时)
|
||||
memory_forget_percentage: float = 0.01 # 记忆遗忘比例
|
||||
|
||||
consolidate_memory_interval: int = 1000 # 记忆整合间隔(秒)
|
||||
consolidation_similarity_threshold: float = 0.7 # 相似度阈值
|
||||
consolidate_memory_percentage: float = 0.01 # 检查节点比例
|
||||
|
||||
memory_ban_words: list = field(
|
||||
default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"]
|
||||
) # 添加新的配置项默认值
|
||||
|
||||
# mood
|
||||
mood_update_interval: float = 1.0 # 情绪更新间隔 单位秒
|
||||
mood_decay_rate: float = 0.95 # 情绪衰减率
|
||||
mood_intensity_factor: float = 0.7 # 情绪强度因子
|
||||
|
||||
# keywords
|
||||
keywords_reaction_rules = [] # 关键词回复规则
|
||||
|
||||
# chinese_typo
|
||||
chinese_typo_enable = True # 是否启用中文错别字生成器
|
||||
chinese_typo_error_rate = 0.03 # 单字替换概率
|
||||
chinese_typo_min_freq = 7 # 最小字频阈值
|
||||
chinese_typo_tone_error_rate = 0.2 # 声调错误概率
|
||||
chinese_typo_word_replace_rate = 0.02 # 整词替换概率
|
||||
|
||||
# response_splitter
|
||||
enable_kaomoji_protection = False # 是否启用颜文字保护
|
||||
enable_response_splitter = True # 是否启用回复分割器
|
||||
response_max_length = 100 # 回复允许的最大长度
|
||||
response_max_sentence_num = 3 # 回复允许的最大句子数
|
||||
|
||||
model_max_output_length: int = 800 # 最大回复长度
|
||||
|
||||
# remote
|
||||
remote_enable: bool = True # 是否启用远程控制
|
||||
|
||||
# experimental
|
||||
enable_friend_chat: bool = False # 是否启用好友聊天
|
||||
# enable_think_flow: bool = False # 是否启用思考流程
|
||||
talk_allowed_private = set()
|
||||
enable_pfc_chatting: bool = False # 是否启用PFC聊天
|
||||
|
||||
# 模型配置
|
||||
llm_reasoning: dict[str, str] = field(default_factory=lambda: {})
|
||||
# llm_reasoning_minor: dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_normal: Dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_topic_judge: Dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_summary: Dict[str, str] = field(default_factory=lambda: {})
|
||||
embedding: Dict[str, str] = field(default_factory=lambda: {})
|
||||
vlm: Dict[str, str] = field(default_factory=lambda: {})
|
||||
moderation: Dict[str, str] = field(default_factory=lambda: {})
|
||||
|
||||
llm_observation: Dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_sub_heartflow: Dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_heartflow: Dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_tool_use: Dict[str, str] = field(default_factory=lambda: {})
|
||||
llm_plan: Dict[str, str] = field(default_factory=lambda: {})
|
||||
|
||||
api_urls: Dict[str, str] = field(default_factory=lambda: {})
|
||||
|
||||
@staticmethod
|
||||
def get_config_dir() -> str:
|
||||
"""获取配置文件目录"""
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
root_dir = os.path.abspath(os.path.join(current_dir, "..", ".."))
|
||||
config_dir = os.path.join(root_dir, "config")
|
||||
if not os.path.exists(config_dir):
|
||||
os.makedirs(config_dir)
|
||||
return config_dir
|
||||
|
||||
@classmethod
|
||||
def convert_to_specifierset(cls, value: str) -> SpecifierSet:
|
||||
"""将 字符串 版本表达式转换成 SpecifierSet
|
||||
Args:
|
||||
value[str]: 版本表达式(字符串)
|
||||
Returns:
|
||||
SpecifierSet
|
||||
def load_config(config_path: str) -> Config:
|
||||
"""
|
||||
|
||||
try:
|
||||
converted = SpecifierSet(value)
|
||||
except InvalidSpecifier:
|
||||
logger.error(f"{value} 分类使用了错误的版本约束表达式\n", "请阅读 https://semver.org/lang/zh-CN/ 修改代码")
|
||||
exit(1)
|
||||
|
||||
return converted
|
||||
|
||||
@classmethod
|
||||
def get_config_version(cls, toml: dict) -> Version:
|
||||
"""提取配置文件的 SpecifierSet 版本数据
|
||||
Args:
|
||||
toml[dict]: 输入的配置文件字典
|
||||
Returns:
|
||||
Version
|
||||
加载配置文件
|
||||
:param config_path: 配置文件路径
|
||||
:return: Config对象
|
||||
"""
|
||||
# 读取配置文件
|
||||
with open(config_path, "r", encoding="utf-8") as f:
|
||||
config_data = tomlkit.load(f)
|
||||
|
||||
if "inner" in toml:
|
||||
# 创建Config对象
|
||||
try:
|
||||
config_version: str = toml["inner"]["version"]
|
||||
except KeyError as e:
|
||||
logger.error("配置文件中 inner 段 不存在, 这是错误的配置文件")
|
||||
raise KeyError(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件") from e
|
||||
else:
|
||||
toml["inner"] = {"version": "0.0.0"}
|
||||
config_version = toml["inner"]["version"]
|
||||
|
||||
try:
|
||||
ver = version.parse(config_version)
|
||||
except InvalidVersion as e:
|
||||
logger.error(
|
||||
"配置文件中 inner段 的 version 键是错误的版本描述\n"
|
||||
"请阅读 https://semver.org/lang/zh-CN/ 修改配置,并参考本项目指定的模板进行修改\n"
|
||||
"本项目在不同的版本下有不同的模板,请注意识别"
|
||||
)
|
||||
raise InvalidVersion("配置文件中 inner段 的 version 键是错误的版本描述\n") from e
|
||||
|
||||
return ver
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, config_path: str = None) -> "BotConfig":
|
||||
"""从TOML配置文件加载配置"""
|
||||
config = cls()
|
||||
|
||||
def personality(parent: dict):
|
||||
personality_config = parent["personality"]
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.2.4"):
|
||||
config.personality_core = personality_config.get("personality_core", config.personality_core)
|
||||
config.personality_sides = personality_config.get("personality_sides", config.personality_sides)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.7.0"):
|
||||
config.expression_style = personality_config.get("expression_style", config.expression_style)
|
||||
|
||||
def identity(parent: dict):
|
||||
identity_config = parent["identity"]
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.2.4"):
|
||||
config.identity_detail = identity_config.get("identity_detail", config.identity_detail)
|
||||
config.height = identity_config.get("height", config.height)
|
||||
config.weight = identity_config.get("weight", config.weight)
|
||||
config.age = identity_config.get("age", config.age)
|
||||
config.gender = identity_config.get("gender", config.gender)
|
||||
config.appearance = identity_config.get("appearance", config.appearance)
|
||||
|
||||
def emoji(parent: dict):
|
||||
emoji_config = parent["emoji"]
|
||||
config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
|
||||
config.EMOJI_CHECK_PROMPT = emoji_config.get("check_prompt", config.EMOJI_CHECK_PROMPT)
|
||||
config.EMOJI_CHECK = emoji_config.get("enable_check", config.EMOJI_CHECK)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.1.1"):
|
||||
config.max_emoji_num = emoji_config.get("max_emoji_num", config.max_emoji_num)
|
||||
config.max_reach_deletion = emoji_config.get("max_reach_deletion", config.max_reach_deletion)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.4.2"):
|
||||
config.save_pic = emoji_config.get("save_pic", config.save_pic)
|
||||
config.save_emoji = emoji_config.get("save_emoji", config.save_emoji)
|
||||
config.steal_emoji = emoji_config.get("steal_emoji", config.steal_emoji)
|
||||
|
||||
def bot(parent: dict):
|
||||
# 机器人基础配置
|
||||
bot_config = parent["bot"]
|
||||
bot_qq = bot_config.get("qq")
|
||||
config.BOT_QQ = str(bot_qq)
|
||||
config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
|
||||
config.BOT_ALIAS_NAMES = bot_config.get("alias_names", config.BOT_ALIAS_NAMES)
|
||||
|
||||
def chat(parent: dict):
|
||||
chat_config = parent["chat"]
|
||||
config.allow_focus_mode = chat_config.get("allow_focus_mode", config.allow_focus_mode)
|
||||
config.base_normal_chat_num = chat_config.get("base_normal_chat_num", config.base_normal_chat_num)
|
||||
config.base_focused_chat_num = chat_config.get("base_focused_chat_num", config.base_focused_chat_num)
|
||||
config.observation_context_size = chat_config.get(
|
||||
"observation_context_size", config.observation_context_size
|
||||
)
|
||||
config.message_buffer = chat_config.get("message_buffer", config.message_buffer)
|
||||
config.ban_words = chat_config.get("ban_words", config.ban_words)
|
||||
for r in chat_config.get("ban_msgs_regex", config.ban_msgs_regex):
|
||||
config.ban_msgs_regex.add(re.compile(r))
|
||||
|
||||
def normal_chat(parent: dict):
|
||||
normal_chat_config = parent["normal_chat"]
|
||||
config.model_reasoning_probability = normal_chat_config.get(
|
||||
"model_reasoning_probability", config.model_reasoning_probability
|
||||
)
|
||||
config.model_normal_probability = normal_chat_config.get(
|
||||
"model_normal_probability", config.model_normal_probability
|
||||
)
|
||||
config.emoji_chance = normal_chat_config.get("emoji_chance", config.emoji_chance)
|
||||
config.thinking_timeout = normal_chat_config.get("thinking_timeout", config.thinking_timeout)
|
||||
|
||||
config.willing_mode = normal_chat_config.get("willing_mode", config.willing_mode)
|
||||
config.response_willing_amplifier = normal_chat_config.get(
|
||||
"response_willing_amplifier", config.response_willing_amplifier
|
||||
)
|
||||
config.response_interested_rate_amplifier = normal_chat_config.get(
|
||||
"response_interested_rate_amplifier", config.response_interested_rate_amplifier
|
||||
)
|
||||
config.down_frequency_rate = normal_chat_config.get("down_frequency_rate", config.down_frequency_rate)
|
||||
config.emoji_response_penalty = normal_chat_config.get(
|
||||
"emoji_response_penalty", config.emoji_response_penalty
|
||||
)
|
||||
|
||||
config.mentioned_bot_inevitable_reply = normal_chat_config.get(
|
||||
"mentioned_bot_inevitable_reply", config.mentioned_bot_inevitable_reply
|
||||
)
|
||||
config.at_bot_inevitable_reply = normal_chat_config.get(
|
||||
"at_bot_inevitable_reply", config.at_bot_inevitable_reply
|
||||
)
|
||||
|
||||
def focus_chat(parent: dict):
|
||||
focus_chat_config = parent["focus_chat"]
|
||||
config.compressed_length = focus_chat_config.get("compressed_length", config.compressed_length)
|
||||
config.compress_length_limit = focus_chat_config.get("compress_length_limit", config.compress_length_limit)
|
||||
config.reply_trigger_threshold = focus_chat_config.get(
|
||||
"reply_trigger_threshold", config.reply_trigger_threshold
|
||||
)
|
||||
config.default_decay_rate_per_second = focus_chat_config.get(
|
||||
"default_decay_rate_per_second", config.default_decay_rate_per_second
|
||||
)
|
||||
config.consecutive_no_reply_threshold = focus_chat_config.get(
|
||||
"consecutive_no_reply_threshold", config.consecutive_no_reply_threshold
|
||||
)
|
||||
|
||||
def model(parent: dict):
|
||||
# 加载模型配置
|
||||
model_config: dict = parent["model"]
|
||||
|
||||
config_list = [
|
||||
"llm_reasoning",
|
||||
# "llm_reasoning_minor",
|
||||
"llm_normal",
|
||||
"llm_topic_judge",
|
||||
"llm_summary",
|
||||
"vlm",
|
||||
"embedding",
|
||||
"llm_tool_use",
|
||||
"llm_observation",
|
||||
"llm_sub_heartflow",
|
||||
"llm_plan",
|
||||
"llm_heartflow",
|
||||
"llm_PFC_action_planner",
|
||||
"llm_PFC_chat",
|
||||
"llm_PFC_reply_checker",
|
||||
]
|
||||
|
||||
for item in config_list:
|
||||
if item in model_config:
|
||||
cfg_item: dict = model_config[item]
|
||||
|
||||
# base_url 的例子: SILICONFLOW_BASE_URL
|
||||
# key 的例子: SILICONFLOW_KEY
|
||||
cfg_target = {
|
||||
"name": "",
|
||||
"base_url": "",
|
||||
"key": "",
|
||||
"stream": False,
|
||||
"pri_in": 0,
|
||||
"pri_out": 0,
|
||||
"temp": 0.7,
|
||||
}
|
||||
|
||||
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
|
||||
cfg_target = cfg_item
|
||||
|
||||
elif config.INNER_VERSION in SpecifierSet(">=0.0.1"):
|
||||
stable_item = ["name", "pri_in", "pri_out"]
|
||||
|
||||
stream_item = ["stream"]
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.0.1"):
|
||||
stable_item.append("stream")
|
||||
|
||||
pricing_item = ["pri_in", "pri_out"]
|
||||
|
||||
# 从配置中原始拷贝稳定字段
|
||||
for i in stable_item:
|
||||
# 如果 字段 属于计费项 且获取不到,那默认值是 0
|
||||
if i in pricing_item and i not in cfg_item:
|
||||
cfg_target[i] = 0
|
||||
|
||||
if i in stream_item and i not in cfg_item:
|
||||
cfg_target[i] = False
|
||||
|
||||
else:
|
||||
# 没有特殊情况则原样复制
|
||||
try:
|
||||
cfg_target[i] = cfg_item[i]
|
||||
except KeyError as e:
|
||||
logger.error(f"{item} 中的必要字段不存在,请检查")
|
||||
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
|
||||
|
||||
# 如果配置中有temp参数,就使用配置中的值
|
||||
if "temp" in cfg_item:
|
||||
cfg_target["temp"] = cfg_item["temp"]
|
||||
else:
|
||||
# 如果没有temp参数,就删除默认值
|
||||
cfg_target.pop("temp", None)
|
||||
|
||||
provider = cfg_item.get("provider")
|
||||
if provider is None:
|
||||
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
|
||||
raise KeyError(f"provider 字段在模型配置 {item} 中不存在,请检查")
|
||||
|
||||
cfg_target["base_url"] = f"{provider}_BASE_URL"
|
||||
cfg_target["key"] = f"{provider}_KEY"
|
||||
|
||||
# 如果 列表中的项目在 model_config 中,利用反射来设置对应项目
|
||||
setattr(config, item, cfg_target)
|
||||
else:
|
||||
logger.error(f"模型 {item} 在config中不存在,请检查,或尝试更新配置文件")
|
||||
raise KeyError(f"模型 {item} 在config中不存在,请检查,或尝试更新配置文件")
|
||||
|
||||
def memory(parent: dict):
|
||||
memory_config = parent["memory"]
|
||||
config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
|
||||
config.forget_memory_interval = memory_config.get("forget_memory_interval", config.forget_memory_interval)
|
||||
config.memory_ban_words = set(memory_config.get("memory_ban_words", []))
|
||||
config.memory_forget_time = memory_config.get("memory_forget_time", config.memory_forget_time)
|
||||
config.memory_forget_percentage = memory_config.get(
|
||||
"memory_forget_percentage", config.memory_forget_percentage
|
||||
)
|
||||
config.memory_compress_rate = memory_config.get("memory_compress_rate", config.memory_compress_rate)
|
||||
if config.INNER_VERSION in SpecifierSet(">=0.0.11"):
|
||||
config.memory_build_distribution = memory_config.get(
|
||||
"memory_build_distribution", config.memory_build_distribution
|
||||
)
|
||||
config.build_memory_sample_num = memory_config.get(
|
||||
"build_memory_sample_num", config.build_memory_sample_num
|
||||
)
|
||||
config.build_memory_sample_length = memory_config.get(
|
||||
"build_memory_sample_length", config.build_memory_sample_length
|
||||
)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.5.1"):
|
||||
config.consolidate_memory_interval = memory_config.get(
|
||||
"consolidate_memory_interval", config.consolidate_memory_interval
|
||||
)
|
||||
config.consolidation_similarity_threshold = memory_config.get(
|
||||
"consolidation_similarity_threshold", config.consolidation_similarity_threshold
|
||||
)
|
||||
config.consolidate_memory_percentage = memory_config.get(
|
||||
"consolidate_memory_percentage", config.consolidate_memory_percentage
|
||||
)
|
||||
|
||||
def remote(parent: dict):
|
||||
remote_config = parent["remote"]
|
||||
config.remote_enable = remote_config.get("enable", config.remote_enable)
|
||||
|
||||
def mood(parent: dict):
|
||||
mood_config = parent["mood"]
|
||||
config.mood_update_interval = mood_config.get("mood_update_interval", config.mood_update_interval)
|
||||
config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
|
||||
config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
|
||||
|
||||
def keywords_reaction(parent: dict):
|
||||
keywords_reaction_config = parent["keywords_reaction"]
|
||||
if keywords_reaction_config.get("enable", False):
|
||||
config.keywords_reaction_rules = keywords_reaction_config.get("rules", config.keywords_reaction_rules)
|
||||
for rule in config.keywords_reaction_rules:
|
||||
if rule.get("enable", False) and "regex" in rule:
|
||||
rule["regex"] = [re.compile(r) for r in rule.get("regex", [])]
|
||||
|
||||
def chinese_typo(parent: dict):
|
||||
chinese_typo_config = parent["chinese_typo"]
|
||||
config.chinese_typo_enable = chinese_typo_config.get("enable", config.chinese_typo_enable)
|
||||
config.chinese_typo_error_rate = chinese_typo_config.get("error_rate", config.chinese_typo_error_rate)
|
||||
config.chinese_typo_min_freq = chinese_typo_config.get("min_freq", config.chinese_typo_min_freq)
|
||||
config.chinese_typo_tone_error_rate = chinese_typo_config.get(
|
||||
"tone_error_rate", config.chinese_typo_tone_error_rate
|
||||
)
|
||||
config.chinese_typo_word_replace_rate = chinese_typo_config.get(
|
||||
"word_replace_rate", config.chinese_typo_word_replace_rate
|
||||
)
|
||||
|
||||
def response_splitter(parent: dict):
|
||||
response_splitter_config = parent["response_splitter"]
|
||||
config.enable_response_splitter = response_splitter_config.get(
|
||||
"enable_response_splitter", config.enable_response_splitter
|
||||
)
|
||||
config.response_max_length = response_splitter_config.get("response_max_length", config.response_max_length)
|
||||
config.response_max_sentence_num = response_splitter_config.get(
|
||||
"response_max_sentence_num", config.response_max_sentence_num
|
||||
)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.4.2"):
|
||||
config.enable_kaomoji_protection = response_splitter_config.get(
|
||||
"enable_kaomoji_protection", config.enable_kaomoji_protection
|
||||
)
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.6.0"):
|
||||
config.model_max_output_length = response_splitter_config.get(
|
||||
"model_max_output_length", config.model_max_output_length
|
||||
)
|
||||
|
||||
def groups(parent: dict):
|
||||
groups_config = parent["groups"]
|
||||
# config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
|
||||
config.talk_allowed_groups = set(str(group) for group in groups_config.get("talk_allowed", []))
|
||||
# config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
|
||||
config.talk_frequency_down_groups = set(
|
||||
str(group) for group in groups_config.get("talk_frequency_down", [])
|
||||
)
|
||||
# config.ban_user_id = set(groups_config.get("ban_user_id", []))
|
||||
config.ban_user_id = set(str(user) for user in groups_config.get("ban_user_id", []))
|
||||
|
||||
def experimental(parent: dict):
|
||||
experimental_config = parent["experimental"]
|
||||
config.enable_friend_chat = experimental_config.get("enable_friend_chat", config.enable_friend_chat)
|
||||
# config.enable_think_flow = experimental_config.get("enable_think_flow", config.enable_think_flow)
|
||||
config.talk_allowed_private = set(str(user) for user in experimental_config.get("talk_allowed_private", []))
|
||||
if config.INNER_VERSION in SpecifierSet(">=1.1.0"):
|
||||
config.enable_pfc_chatting = experimental_config.get("pfc_chatting", config.enable_pfc_chatting)
|
||||
|
||||
# 版本表达式:>=1.0.0,<2.0.0
|
||||
# 允许字段:func: method, support: str, notice: str, necessary: bool
|
||||
# 如果使用 notice 字段,在该组配置加载时,会展示该字段对用户的警示
|
||||
# 例如:"notice": "personality 将在 1.3.2 后被移除",那么在有效版本中的用户就会虽然可以
|
||||
# 正常执行程序,但是会看到这条自定义提示
|
||||
|
||||
# 版本格式:主版本号.次版本号.修订号,版本号递增规则如下:
|
||||
# 主版本号:当你做了不兼容的 API 修改,
|
||||
# 次版本号:当你做了向下兼容的功能性新增,
|
||||
# 修订号:当你做了向下兼容的问题修正。
|
||||
# 先行版本号及版本编译信息可以加到"主版本号.次版本号.修订号"的后面,作为延伸。
|
||||
|
||||
# 如果你做了break的修改,就应该改动主版本号
|
||||
# 如果做了一个兼容修改,就不应该要求这个选项是必须的!
|
||||
include_configs = {
|
||||
"bot": {"func": bot, "support": ">=0.0.0"},
|
||||
"groups": {"func": groups, "support": ">=0.0.0"},
|
||||
"personality": {"func": personality, "support": ">=0.0.0"},
|
||||
"identity": {"func": identity, "support": ">=1.2.4"},
|
||||
"emoji": {"func": emoji, "support": ">=0.0.0"},
|
||||
"model": {"func": model, "support": ">=0.0.0"},
|
||||
"memory": {"func": memory, "support": ">=0.0.0", "necessary": False},
|
||||
"mood": {"func": mood, "support": ">=0.0.0"},
|
||||
"remote": {"func": remote, "support": ">=0.0.10", "necessary": False},
|
||||
"keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False},
|
||||
"chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False},
|
||||
"response_splitter": {"func": response_splitter, "support": ">=0.0.11", "necessary": False},
|
||||
"experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False},
|
||||
"chat": {"func": chat, "support": ">=1.6.0", "necessary": False},
|
||||
"normal_chat": {"func": normal_chat, "support": ">=1.6.0", "necessary": False},
|
||||
"focus_chat": {"func": focus_chat, "support": ">=1.6.0", "necessary": False},
|
||||
}
|
||||
|
||||
# 原地修改,将 字符串版本表达式 转换成 版本对象
|
||||
for key in include_configs:
|
||||
item_support = include_configs[key]["support"]
|
||||
include_configs[key]["support"] = cls.convert_to_specifierset(item_support)
|
||||
|
||||
if os.path.exists(config_path):
|
||||
with open(config_path, "rb") as f:
|
||||
try:
|
||||
toml_dict = tomli.load(f)
|
||||
except tomli.TOMLDecodeError as e:
|
||||
logger.critical(f"配置文件bot_config.toml填写有误,请检查第{e.lineno}行第{e.colno}处:{e.msg}")
|
||||
exit(1)
|
||||
|
||||
# 获取配置文件版本
|
||||
config.INNER_VERSION = cls.get_config_version(toml_dict)
|
||||
|
||||
# 如果在配置中找到了需要的项,调用对应项的闭包函数处理
|
||||
for key in include_configs:
|
||||
if key in toml_dict:
|
||||
group_specifierset: SpecifierSet = include_configs[key]["support"]
|
||||
|
||||
# 检查配置文件版本是否在支持范围内
|
||||
if config.INNER_VERSION in group_specifierset:
|
||||
# 如果版本在支持范围内,检查是否存在通知
|
||||
if "notice" in include_configs[key]:
|
||||
logger.warning(include_configs[key]["notice"])
|
||||
|
||||
include_configs[key]["func"](toml_dict)
|
||||
|
||||
else:
|
||||
# 如果版本不在支持范围内,崩溃并提示用户
|
||||
logger.error(
|
||||
f"配置文件中的 '{key}' 字段的版本 ({config.INNER_VERSION}) 不在支持范围内。\n"
|
||||
f"当前程序仅支持以下版本范围: {group_specifierset}"
|
||||
)
|
||||
raise InvalidVersion(f"当前程序仅支持以下版本范围: {group_specifierset}")
|
||||
|
||||
# 如果 necessary 项目存在,而且显式声明是 False,进入特殊处理
|
||||
elif "necessary" in include_configs[key] and include_configs[key].get("necessary") is False:
|
||||
# 通过 pass 处理的项虽然直接忽略也是可以的,但是为了不增加理解困难,依然需要在这里显式处理
|
||||
if key == "keywords_reaction":
|
||||
pass
|
||||
|
||||
else:
|
||||
# 如果用户根本没有需要的配置项,提示缺少配置
|
||||
logger.error(f"配置文件中缺少必需的字段: '{key}'")
|
||||
raise KeyError(f"配置文件中缺少必需的字段: '{key}'")
|
||||
|
||||
# identity_detail字段非空检查
|
||||
if not config.identity_detail:
|
||||
logger.error("配置文件错误:[identity] 部分的 identity_detail 不能为空字符串")
|
||||
raise ValueError("配置文件错误:[identity] 部分的 identity_detail 不能为空字符串")
|
||||
|
||||
logger.success(f"成功加载配置文件: {config_path}")
|
||||
|
||||
return config
|
||||
return Config.from_dict(config_data)
|
||||
except Exception as e:
|
||||
logger.critical("配置文件解析失败")
|
||||
raise e
|
||||
|
||||
|
||||
# 获取配置文件路径
|
||||
logger.info(f"MaiCore当前版本: {mai_version}")
|
||||
logger.info(f"MaiCore当前版本: {MMC_VERSION}")
|
||||
update_config()
|
||||
|
||||
bot_config_floder_path = BotConfig.get_config_dir()
|
||||
logger.info(f"正在品鉴配置文件目录: {bot_config_floder_path}")
|
||||
|
||||
bot_config_path = os.path.join(bot_config_floder_path, "bot_config.toml")
|
||||
|
||||
if os.path.exists(bot_config_path):
|
||||
# 如果开发环境配置文件不存在,则使用默认配置文件
|
||||
logger.info(f"异常的新鲜,异常的美味: {bot_config_path}")
|
||||
else:
|
||||
# 配置文件不存在
|
||||
logger.error("配置文件不存在,请检查路径: {bot_config_path}")
|
||||
raise FileNotFoundError(f"配置文件不存在: {bot_config_path}")
|
||||
|
||||
global_config = BotConfig.load_config(config_path=bot_config_path)
|
||||
logger.info("正在品鉴配置文件...")
|
||||
global_config = load_config(config_path=f"{CONFIG_DIR}/bot_config.toml")
|
||||
logger.info("非常的新鲜,非常的美味!")
|
||||
|
||||
116
src/config/config_base.py
Normal file
116
src/config/config_base.py
Normal file
@@ -0,0 +1,116 @@
|
||||
from dataclasses import dataclass, fields, MISSING
|
||||
from typing import TypeVar, Type, Any, get_origin, get_args
|
||||
|
||||
T = TypeVar("T", bound="ConfigBase")
|
||||
|
||||
TOML_DICT_TYPE = {
|
||||
int,
|
||||
float,
|
||||
str,
|
||||
bool,
|
||||
list,
|
||||
dict,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigBase:
|
||||
"""配置类的基类"""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls: Type[T], data: dict[str, Any]) -> T:
|
||||
"""从字典加载配置字段"""
|
||||
if not isinstance(data, dict):
|
||||
raise TypeError(f"Expected a dictionary, got {type(data).__name__}")
|
||||
|
||||
init_args: dict[str, Any] = {}
|
||||
|
||||
for f in fields(cls):
|
||||
field_name = f.name
|
||||
|
||||
if field_name.startswith("_"):
|
||||
# 跳过以 _ 开头的字段
|
||||
continue
|
||||
|
||||
if field_name not in data:
|
||||
if f.default is not MISSING or f.default_factory is not MISSING:
|
||||
# 跳过未提供且有默认值/默认构造方法的字段
|
||||
continue
|
||||
else:
|
||||
raise ValueError(f"Missing required field: '{field_name}'")
|
||||
|
||||
value = data[field_name]
|
||||
field_type = f.type
|
||||
|
||||
try:
|
||||
init_args[field_name] = cls._convert_field(value, field_type)
|
||||
except TypeError as e:
|
||||
raise TypeError(f"Field '{field_name}' has a type error: {e}") from e
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to convert field '{field_name}' to target type: {e}") from e
|
||||
|
||||
return cls(**init_args)
|
||||
|
||||
@classmethod
|
||||
def _convert_field(cls, value: Any, field_type: Type[Any]) -> Any:
|
||||
"""
|
||||
转换字段值为指定类型
|
||||
|
||||
1. 对于嵌套的 dataclass,递归调用相应的 from_dict 方法
|
||||
2. 对于泛型集合类型(list, set, tuple),递归转换每个元素
|
||||
3. 对于基础类型(int, str, float, bool),直接转换
|
||||
4. 对于其他类型,尝试直接转换,如果失败则抛出异常
|
||||
"""
|
||||
|
||||
# 如果是嵌套的 dataclass,递归调用 from_dict 方法
|
||||
if isinstance(field_type, type) and issubclass(field_type, ConfigBase):
|
||||
if not isinstance(value, dict):
|
||||
raise TypeError(f"Expected a dictionary for {field_type.__name__}, got {type(value).__name__}")
|
||||
return field_type.from_dict(value)
|
||||
|
||||
# 处理泛型集合类型(list, set, tuple)
|
||||
field_origin_type = get_origin(field_type)
|
||||
field_type_args = get_args(field_type)
|
||||
|
||||
if field_origin_type in {list, set, tuple}:
|
||||
# 检查提供的value是否为list
|
||||
if not isinstance(value, list):
|
||||
raise TypeError(f"Expected an list for {field_type.__name__}, got {type(value).__name__}")
|
||||
|
||||
if field_origin_type is list:
|
||||
return [cls._convert_field(item, field_type_args[0]) for item in value]
|
||||
elif field_origin_type is set:
|
||||
return {cls._convert_field(item, field_type_args[0]) for item in value}
|
||||
elif field_origin_type is tuple:
|
||||
# 检查提供的value长度是否与类型参数一致
|
||||
if len(value) != len(field_type_args):
|
||||
raise TypeError(
|
||||
f"Expected {len(field_type_args)} items for {field_type.__name__}, got {len(value)}"
|
||||
)
|
||||
return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args))
|
||||
|
||||
if field_origin_type is dict:
|
||||
# 检查提供的value是否为dict
|
||||
if not isinstance(value, dict):
|
||||
raise TypeError(f"Expected a dictionary for {field_type.__name__}, got {type(value).__name__}")
|
||||
|
||||
# 检查字典的键值类型
|
||||
if len(field_type_args) != 2:
|
||||
raise TypeError(f"Expected a dictionary with two type arguments for {field_type.__name__}")
|
||||
key_type, value_type = field_type_args
|
||||
|
||||
return {cls._convert_field(k, key_type): cls._convert_field(v, value_type) for k, v in value.items()}
|
||||
|
||||
# 处理基础类型,例如 int, str 等
|
||||
if field_type is Any or isinstance(value, field_type):
|
||||
return value
|
||||
|
||||
# 其他类型,尝试直接转换
|
||||
try:
|
||||
return field_type(value)
|
||||
except (ValueError, TypeError) as e:
|
||||
raise TypeError(f"Cannot convert {type(value).__name__} to {field_type.__name__}") from e
|
||||
|
||||
def __str__(self):
|
||||
"""返回配置类的字符串表示"""
|
||||
return f"{self.__class__.__name__}({', '.join(f'{f.name}={getattr(self, f.name)}' for f in fields(self))})"
|
||||
399
src/config/official_configs.py
Normal file
399
src/config/official_configs.py
Normal file
@@ -0,0 +1,399 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from src.config.config_base import ConfigBase
|
||||
|
||||
"""
|
||||
须知:
|
||||
1. 本文件中记录了所有的配置项
|
||||
2. 所有新增的class都需要继承自ConfigBase
|
||||
3. 所有新增的class都应在config.py中的Config类中添加字段
|
||||
4. 对于新增的字段,若为可选项,则应在其后添加field()并设置default_factory或default
|
||||
"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class BotConfig(ConfigBase):
|
||||
"""QQ机器人配置类"""
|
||||
|
||||
qq_account: str
|
||||
"""QQ账号"""
|
||||
|
||||
nickname: str
|
||||
"""昵称"""
|
||||
|
||||
alias_names: list[str] = field(default_factory=lambda: [])
|
||||
"""别名列表"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatTargetConfig(ConfigBase):
|
||||
"""
|
||||
聊天目标配置类
|
||||
此类中有聊天的群组和用户配置
|
||||
"""
|
||||
|
||||
talk_allowed_groups: set[str] = field(default_factory=lambda: set())
|
||||
"""允许聊天的群组列表"""
|
||||
|
||||
talk_frequency_down_groups: set[str] = field(default_factory=lambda: set())
|
||||
"""降低聊天频率的群组列表"""
|
||||
|
||||
ban_user_id: set[str] = field(default_factory=lambda: set())
|
||||
"""禁止聊天的用户列表"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class PersonalityConfig(ConfigBase):
|
||||
"""人格配置类"""
|
||||
|
||||
personality_core: str
|
||||
"""核心人格"""
|
||||
|
||||
expression_style: str
|
||||
"""表达风格"""
|
||||
|
||||
personality_sides: list[str] = field(default_factory=lambda: [])
|
||||
"""人格侧写"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class IdentityConfig(ConfigBase):
|
||||
"""个体特征配置类"""
|
||||
|
||||
height: int = 170
|
||||
"""身高(单位:厘米)"""
|
||||
|
||||
weight: float = 50
|
||||
"""体重(单位:千克)"""
|
||||
|
||||
age: int = 18
|
||||
"""年龄(单位:岁)"""
|
||||
|
||||
gender: str = "女"
|
||||
"""性别(男/女)"""
|
||||
|
||||
appearance: str = "可爱"
|
||||
"""外貌描述"""
|
||||
|
||||
identity_detail: list[str] = field(default_factory=lambda: [])
|
||||
"""身份特征"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlatformsConfig(ConfigBase):
|
||||
"""平台配置类"""
|
||||
|
||||
qq: str
|
||||
"""QQ适配器连接URL配置"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatConfig(ConfigBase):
|
||||
"""聊天配置类"""
|
||||
|
||||
allow_focus_mode: bool = True
|
||||
"""是否允许专注聊天状态"""
|
||||
|
||||
base_normal_chat_num: int = 3
|
||||
"""最多允许多少个群进行普通聊天"""
|
||||
|
||||
base_focused_chat_num: int = 2
|
||||
"""最多允许多少个群进行专注聊天"""
|
||||
|
||||
observation_context_size: int = 12
|
||||
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
|
||||
|
||||
message_buffer: bool = True
|
||||
"""消息缓冲器"""
|
||||
|
||||
ban_words: set[str] = field(default_factory=lambda: set())
|
||||
"""过滤词列表"""
|
||||
|
||||
ban_msgs_regex: set[str] = field(default_factory=lambda: set())
|
||||
"""过滤正则表达式列表"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class NormalChatConfig(ConfigBase):
|
||||
"""普通聊天配置类"""
|
||||
|
||||
reasoning_model_probability: float = 0.3
|
||||
"""
|
||||
发言时选择推理模型的概率(0-1之间)
|
||||
选择普通模型的概率为 1 - reasoning_normal_model_probability
|
||||
"""
|
||||
|
||||
emoji_chance: float = 0.2
|
||||
"""发送表情包的基础概率"""
|
||||
|
||||
thinking_timeout: int = 120
|
||||
"""最长思考时间"""
|
||||
|
||||
willing_mode: str = "classical"
|
||||
"""意愿模式"""
|
||||
|
||||
response_willing_amplifier: float = 1.0
|
||||
"""回复意愿放大系数"""
|
||||
|
||||
response_interested_rate_amplifier: float = 1.0
|
||||
"""回复兴趣度放大系数"""
|
||||
|
||||
down_frequency_rate: float = 3.0
|
||||
"""降低回复频率的群组回复意愿降低系数"""
|
||||
|
||||
emoji_response_penalty: float = 0.0
|
||||
"""表情包回复惩罚系数"""
|
||||
|
||||
mentioned_bot_inevitable_reply: bool = False
|
||||
"""提及 bot 必然回复"""
|
||||
|
||||
at_bot_inevitable_reply: bool = False
|
||||
"""@bot 必然回复"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class FocusChatConfig(ConfigBase):
|
||||
"""专注聊天配置类"""
|
||||
|
||||
reply_trigger_threshold: float = 3.0
|
||||
"""心流聊天触发阈值,越低越容易触发"""
|
||||
|
||||
default_decay_rate_per_second: float = 0.98
|
||||
"""默认衰减率,越大衰减越快"""
|
||||
|
||||
consecutive_no_reply_threshold: int = 3
|
||||
"""连续不回复的次数阈值"""
|
||||
|
||||
compressed_length: int = 5
|
||||
"""心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5"""
|
||||
|
||||
compress_length_limit: int = 5
|
||||
"""最多压缩份数,超过该数值的压缩上下文会被删除"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmojiConfig(ConfigBase):
|
||||
"""表情包配置类"""
|
||||
|
||||
max_reg_num: int = 200
|
||||
"""表情包最大注册数量"""
|
||||
|
||||
do_replace: bool = True
|
||||
"""达到最大注册数量时替换旧表情包"""
|
||||
|
||||
check_interval: int = 120
|
||||
"""表情包检查间隔(分钟)"""
|
||||
|
||||
save_pic: bool = False
|
||||
"""是否保存图片"""
|
||||
|
||||
cache_emoji: bool = True
|
||||
"""是否缓存表情包"""
|
||||
|
||||
steal_emoji: bool = True
|
||||
"""是否偷取表情包,让麦麦可以发送她保存的这些表情包"""
|
||||
|
||||
content_filtration: bool = False
|
||||
"""是否开启表情包过滤"""
|
||||
|
||||
filtration_prompt: str = "符合公序良俗"
|
||||
"""表情包过滤要求"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryConfig(ConfigBase):
|
||||
"""记忆配置类"""
|
||||
|
||||
memory_build_interval: int = 600
|
||||
"""记忆构建间隔(秒)"""
|
||||
|
||||
memory_build_distribution: tuple[
|
||||
float,
|
||||
float,
|
||||
float,
|
||||
float,
|
||||
float,
|
||||
float,
|
||||
] = field(default_factory=lambda: (6.0, 3.0, 0.6, 32.0, 12.0, 0.4))
|
||||
"""记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重"""
|
||||
|
||||
memory_build_sample_num: int = 8
|
||||
"""记忆构建采样数量"""
|
||||
|
||||
memory_build_sample_length: int = 40
|
||||
"""记忆构建采样长度"""
|
||||
|
||||
memory_compress_rate: float = 0.1
|
||||
"""记忆压缩率"""
|
||||
|
||||
forget_memory_interval: int = 1000
|
||||
"""记忆遗忘间隔(秒)"""
|
||||
|
||||
memory_forget_time: int = 24
|
||||
"""记忆遗忘时间(小时)"""
|
||||
|
||||
memory_forget_percentage: float = 0.01
|
||||
"""记忆遗忘比例"""
|
||||
|
||||
consolidate_memory_interval: int = 1000
|
||||
"""记忆整合间隔(秒)"""
|
||||
|
||||
consolidation_similarity_threshold: float = 0.7
|
||||
"""整合相似度阈值"""
|
||||
|
||||
consolidate_memory_percentage: float = 0.01
|
||||
"""整合检查节点比例"""
|
||||
|
||||
memory_ban_words: list[str] = field(default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"])
|
||||
"""不允许记忆的词列表"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class MoodConfig(ConfigBase):
|
||||
"""情绪配置类"""
|
||||
|
||||
mood_update_interval: int = 1
|
||||
"""情绪更新间隔(秒)"""
|
||||
|
||||
mood_decay_rate: float = 0.95
|
||||
"""情绪衰减率"""
|
||||
|
||||
mood_intensity_factor: float = 0.7
|
||||
"""情绪强度因子"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeywordRuleConfig(ConfigBase):
|
||||
"""关键词规则配置类"""
|
||||
|
||||
enable: bool = True
|
||||
"""是否启用关键词规则"""
|
||||
|
||||
keywords: list[str] = field(default_factory=lambda: [])
|
||||
"""关键词列表"""
|
||||
|
||||
regex: list[str] = field(default_factory=lambda: [])
|
||||
"""正则表达式列表"""
|
||||
|
||||
reaction: str = ""
|
||||
"""关键词触发的反应"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class KeywordReactionConfig(ConfigBase):
|
||||
"""关键词配置类"""
|
||||
|
||||
enable: bool = True
|
||||
"""是否启用关键词反应"""
|
||||
|
||||
rules: list[KeywordRuleConfig] = field(default_factory=lambda: [])
|
||||
"""关键词反应规则列表"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChineseTypoConfig(ConfigBase):
|
||||
"""中文错别字配置类"""
|
||||
|
||||
enable: bool = True
|
||||
"""是否启用中文错别字生成器"""
|
||||
|
||||
error_rate: float = 0.01
|
||||
"""单字替换概率"""
|
||||
|
||||
min_freq: int = 9
|
||||
"""最小字频阈值"""
|
||||
|
||||
tone_error_rate: float = 0.1
|
||||
"""声调错误概率"""
|
||||
|
||||
word_replace_rate: float = 0.006
|
||||
"""整词替换概率"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResponseSplitterConfig(ConfigBase):
|
||||
"""回复分割器配置类"""
|
||||
|
||||
enable: bool = True
|
||||
"""是否启用回复分割器"""
|
||||
|
||||
max_length: int = 256
|
||||
"""回复允许的最大长度"""
|
||||
|
||||
max_sentence_num: int = 3
|
||||
"""回复允许的最大句子数"""
|
||||
|
||||
enable_kaomoji_protection: bool = False
|
||||
"""是否启用颜文字保护"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class TelemetryConfig(ConfigBase):
|
||||
"""遥测配置类"""
|
||||
|
||||
enable: bool = True
|
||||
"""是否启用遥测"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExperimentalConfig(ConfigBase):
|
||||
"""实验功能配置类"""
|
||||
|
||||
enable_friend_chat: bool = False
|
||||
"""是否启用好友聊天"""
|
||||
|
||||
talk_allowed_private: set[str] = field(default_factory=lambda: set())
|
||||
"""允许聊天的私聊列表"""
|
||||
|
||||
pfc_chatting: bool = False
|
||||
"""是否启用PFC"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelConfig(ConfigBase):
|
||||
"""模型配置类"""
|
||||
|
||||
model_max_output_length: int = 800 # 最大回复长度
|
||||
|
||||
reasoning: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""推理模型配置"""
|
||||
|
||||
normal: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""普通模型配置"""
|
||||
|
||||
topic_judge: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""主题判断模型配置"""
|
||||
|
||||
summary: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""摘要模型配置"""
|
||||
|
||||
vlm: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""视觉语言模型配置"""
|
||||
|
||||
heartflow: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""心流模型配置"""
|
||||
|
||||
observation: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""观察模型配置"""
|
||||
|
||||
sub_heartflow: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""子心流模型配置"""
|
||||
|
||||
plan: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""计划模型配置"""
|
||||
|
||||
embedding: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""嵌入模型配置"""
|
||||
|
||||
pfc_action_planner: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""PFC动作规划模型配置"""
|
||||
|
||||
pfc_chat: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""PFC聊天模型配置"""
|
||||
|
||||
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""PFC回复检查模型配置"""
|
||||
|
||||
tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""工具使用模型配置"""
|
||||
@@ -114,7 +114,7 @@ class ActionPlanner:
|
||||
request_type="action_planning",
|
||||
)
|
||||
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.name = global_config.bot.nickname
|
||||
self.private_name = private_name
|
||||
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
|
||||
# self.action_planner_info = ActionPlannerInfo() # 移除未使用的变量
|
||||
@@ -140,7 +140,7 @@ class ActionPlanner:
|
||||
# (这部分逻辑不变)
|
||||
time_since_last_bot_message_info = ""
|
||||
try:
|
||||
bot_id = str(global_config.BOT_QQ)
|
||||
bot_id = str(global_config.bot.qq_account)
|
||||
if hasattr(observation_info, "chat_history") and observation_info.chat_history:
|
||||
for i in range(len(observation_info.chat_history) - 1, -1, -1):
|
||||
msg = observation_info.chat_history[i]
|
||||
|
||||
@@ -323,7 +323,7 @@ class ChatObserver:
|
||||
for msg in messages:
|
||||
try:
|
||||
user_info = UserInfo.from_dict(msg.get("user_info", {}))
|
||||
if user_info.user_id == global_config.BOT_QQ:
|
||||
if user_info.user_id == global_config.bot.qq_account:
|
||||
self.update_bot_speak_time(msg["time"])
|
||||
else:
|
||||
self.update_user_speak_time(msg["time"])
|
||||
|
||||
@@ -42,8 +42,8 @@ class DirectMessageSender:
|
||||
|
||||
# 获取麦麦的信息
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
user_id=global_config.bot.qq_account,
|
||||
user_nickname=global_config.bot.nickname,
|
||||
platform=chat_stream.platform,
|
||||
)
|
||||
|
||||
|
||||
@@ -42,13 +42,14 @@ class GoalAnalyzer:
|
||||
"""对话目标分析器"""
|
||||
|
||||
def __init__(self, stream_id: str, private_name: str):
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
|
||||
model=global_config.model.normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
|
||||
)
|
||||
|
||||
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.nick_name = global_config.BOT_ALIAS_NAMES
|
||||
self.name = global_config.bot.nickname
|
||||
self.nick_name = global_config.bot.alias_names
|
||||
self.private_name = private_name
|
||||
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
|
||||
|
||||
|
||||
@@ -14,9 +14,10 @@ class KnowledgeFetcher:
|
||||
"""知识调取器"""
|
||||
|
||||
def __init__(self, private_name: str):
|
||||
# TODO: API-Adapter修改标记
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
model=global_config.model.normal,
|
||||
temperature=global_config.model.normal["temp"],
|
||||
max_tokens=1000,
|
||||
request_type="knowledge_fetch",
|
||||
)
|
||||
|
||||
@@ -16,7 +16,7 @@ class ReplyChecker:
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_PFC_reply_checker, temperature=0.50, max_tokens=1000, request_type="reply_check"
|
||||
)
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.name = global_config.bot.nickname
|
||||
self.private_name = private_name
|
||||
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
|
||||
self.max_retries = 3 # 最大重试次数
|
||||
@@ -43,7 +43,7 @@ class ReplyChecker:
|
||||
bot_messages = []
|
||||
for msg in reversed(chat_history):
|
||||
user_info = UserInfo.from_dict(msg.get("user_info", {}))
|
||||
if str(user_info.user_id) == str(global_config.BOT_QQ): # 确保比较的是字符串
|
||||
if str(user_info.user_id) == str(global_config.bot.qq_account): # 确保比较的是字符串
|
||||
bot_messages.append(msg.get("processed_plain_text", ""))
|
||||
if len(bot_messages) >= 2: # 只和最近的两条比较
|
||||
break
|
||||
|
||||
@@ -93,7 +93,7 @@ class ReplyGenerator:
|
||||
request_type="reply_generation",
|
||||
)
|
||||
self.personality_info = Individuality.get_instance().get_prompt(x_person=2, level=3)
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.name = global_config.bot.nickname
|
||||
self.private_name = private_name
|
||||
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
|
||||
self.reply_checker = ReplyChecker(stream_id, private_name)
|
||||
|
||||
@@ -19,7 +19,7 @@ class Waiter:
|
||||
|
||||
def __init__(self, stream_id: str, private_name: str):
|
||||
self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
self.name = global_config.bot.nickname
|
||||
self.private_name = private_name
|
||||
# self.wait_accumulated_time = 0 # 不再需要累加计时
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ class MessageProcessor:
|
||||
@staticmethod
|
||||
def _check_ban_words(text: str, chat, userinfo) -> bool:
|
||||
"""检查消息中是否包含过滤词"""
|
||||
for word in global_config.ban_words:
|
||||
for word in global_config.chat.ban_words:
|
||||
if word in text:
|
||||
logger.info(
|
||||
f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
|
||||
@@ -28,7 +28,7 @@ class MessageProcessor:
|
||||
@staticmethod
|
||||
def _check_ban_regex(text: str, chat, userinfo) -> bool:
|
||||
"""检查消息是否匹配过滤正则表达式"""
|
||||
for pattern in global_config.ban_msgs_regex:
|
||||
for pattern in global_config.chat.ban_msgs_regex:
|
||||
if pattern.search(text):
|
||||
logger.info(
|
||||
f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
|
||||
|
||||
32
src/main.py
32
src/main.py
@@ -40,7 +40,7 @@ class MainSystem:
|
||||
|
||||
async def initialize(self):
|
||||
"""初始化系统组件"""
|
||||
logger.debug(f"正在唤醒{global_config.BOT_NICKNAME}......")
|
||||
logger.debug(f"正在唤醒{global_config.bot.nickname}......")
|
||||
|
||||
# 其他初始化任务
|
||||
await asyncio.gather(self._init_components())
|
||||
@@ -84,7 +84,7 @@ class MainSystem:
|
||||
asyncio.create_task(chat_manager._auto_save_task())
|
||||
|
||||
# 使用HippocampusManager初始化海马体
|
||||
self.hippocampus_manager.initialize(global_config=global_config)
|
||||
self.hippocampus_manager.initialize()
|
||||
# await asyncio.sleep(0.5) #防止logger输出飞了
|
||||
|
||||
# 将bot.py中的chat_bot.message_process消息处理函数注册到api.py的消息处理基类中
|
||||
@@ -92,15 +92,15 @@ class MainSystem:
|
||||
|
||||
# 初始化个体特征
|
||||
self.individuality.initialize(
|
||||
bot_nickname=global_config.BOT_NICKNAME,
|
||||
personality_core=global_config.personality_core,
|
||||
personality_sides=global_config.personality_sides,
|
||||
identity_detail=global_config.identity_detail,
|
||||
height=global_config.height,
|
||||
weight=global_config.weight,
|
||||
age=global_config.age,
|
||||
gender=global_config.gender,
|
||||
appearance=global_config.appearance,
|
||||
bot_nickname=global_config.bot.nickname,
|
||||
personality_core=global_config.personality.personality_core,
|
||||
personality_sides=global_config.personality.personality_sides,
|
||||
identity_detail=global_config.identity.identity_detail,
|
||||
height=global_config.identity.height,
|
||||
weight=global_config.identity.weight,
|
||||
age=global_config.identity.age,
|
||||
gender=global_config.identity.gender,
|
||||
appearance=global_config.identity.appearance,
|
||||
)
|
||||
logger.success("个体特征初始化成功")
|
||||
|
||||
@@ -141,7 +141,7 @@ class MainSystem:
|
||||
async def build_memory_task():
|
||||
"""记忆构建任务"""
|
||||
while True:
|
||||
await asyncio.sleep(global_config.build_memory_interval)
|
||||
await asyncio.sleep(global_config.memory.memory_build_interval)
|
||||
logger.info("正在进行记忆构建")
|
||||
await HippocampusManager.get_instance().build_memory()
|
||||
|
||||
@@ -149,16 +149,18 @@ class MainSystem:
|
||||
async def forget_memory_task():
|
||||
"""记忆遗忘任务"""
|
||||
while True:
|
||||
await asyncio.sleep(global_config.forget_memory_interval)
|
||||
await asyncio.sleep(global_config.memory.forget_memory_interval)
|
||||
print("\033[1;32m[记忆遗忘]\033[0m 开始遗忘记忆...")
|
||||
await HippocampusManager.get_instance().forget_memory(percentage=global_config.memory_forget_percentage)
|
||||
await HippocampusManager.get_instance().forget_memory(
|
||||
percentage=global_config.memory.memory_forget_percentage
|
||||
)
|
||||
print("\033[1;32m[记忆遗忘]\033[0m 记忆遗忘完成")
|
||||
|
||||
@staticmethod
|
||||
async def consolidate_memory_task():
|
||||
"""记忆整合任务"""
|
||||
while True:
|
||||
await asyncio.sleep(global_config.consolidate_memory_interval)
|
||||
await asyncio.sleep(global_config.memory.consolidate_memory_interval)
|
||||
print("\033[1;32m[记忆整合]\033[0m 开始整合记忆...")
|
||||
await HippocampusManager.get_instance().consolidate_memory()
|
||||
print("\033[1;32m[记忆整合]\033[0m 记忆整合完成")
|
||||
|
||||
@@ -34,14 +34,14 @@ class MoodUpdateTask(AsyncTask):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
task_name="Mood Update Task",
|
||||
wait_before_start=global_config.mood_update_interval,
|
||||
run_interval=global_config.mood_update_interval,
|
||||
wait_before_start=global_config.mood.mood_update_interval,
|
||||
run_interval=global_config.mood.mood_update_interval,
|
||||
)
|
||||
|
||||
# 从配置文件获取衰减率
|
||||
self.decay_rate_valence: float = 1 - global_config.mood_decay_rate
|
||||
self.decay_rate_valence: float = 1 - global_config.mood.mood_decay_rate
|
||||
"""愉悦度衰减率"""
|
||||
self.decay_rate_arousal: float = 1 - global_config.mood_decay_rate
|
||||
self.decay_rate_arousal: float = 1 - global_config.mood.mood_decay_rate
|
||||
"""唤醒度衰减率"""
|
||||
|
||||
self.last_update = time.time()
|
||||
|
||||
@@ -44,7 +44,7 @@ class ChangeMoodTool(BaseTool):
|
||||
_ori_response = ",".join(response_set)
|
||||
# _stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text)
|
||||
emotion = "平静"
|
||||
mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
|
||||
mood_manager.update_mood_from_emotion(emotion, global_config.mood.mood_intensity_factor)
|
||||
return {"name": "change_mood", "content": f"你的心情刚刚变化了,现在的心情是: {emotion}"}
|
||||
except Exception as e:
|
||||
logger.error(f"心情改变工具执行失败: {str(e)}")
|
||||
|
||||
@@ -15,7 +15,7 @@ logger = get_logger("tool_use")
|
||||
class ToolUser:
|
||||
def __init__(self):
|
||||
self.llm_model_tool = LLMRequest(
|
||||
model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
||||
model=global_config.model.tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@@ -37,7 +37,7 @@ class ToolUser:
|
||||
# print(f"intol111111111111111111111111111111111222222222222mid_memory_info:{mid_memory_info}")
|
||||
|
||||
# 这些信息应该从调用者传入,而不是从self获取
|
||||
bot_name = global_config.BOT_NICKNAME
|
||||
bot_name = global_config.bot.nickname
|
||||
prompt = ""
|
||||
prompt += mid_memory_info
|
||||
prompt += "你正在思考如何回复群里的消息。\n"
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
[inner.version]
|
||||
describe = "版本号"
|
||||
important = true
|
||||
can_edit = false
|
||||
|
||||
[bot.qq]
|
||||
describe = "机器人的QQ号"
|
||||
important = true
|
||||
can_edit = true
|
||||
|
||||
[bot.nickname]
|
||||
describe = "机器人的昵称"
|
||||
important = true
|
||||
can_edit = true
|
||||
|
||||
[bot.alias_names]
|
||||
describe = "机器人的别名列表,该选项还在调试中,暂时未生效"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[groups.talk_allowed]
|
||||
describe = "可以回复消息的群号码列表"
|
||||
important = true
|
||||
can_edit = true
|
||||
|
||||
[groups.talk_frequency_down]
|
||||
describe = "降低回复频率的群号码列表"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[groups.ban_user_id]
|
||||
describe = "禁止回复和读取消息的QQ号列表"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[personality.personality_core]
|
||||
describe = "用一句话或几句话描述人格的核心特点,建议20字以内"
|
||||
important = true
|
||||
can_edit = true
|
||||
|
||||
[personality.personality_sides]
|
||||
describe = "用一句话或几句话描述人格的一些细节,条数任意,不能为0,该选项还在调试中"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[identity.identity_detail]
|
||||
describe = "身份特点列表,条数任意,不能为0,该选项还在调试中"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[identity.age]
|
||||
describe = "年龄,单位岁"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[identity.gender]
|
||||
describe = "性别"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[identity.appearance]
|
||||
describe = "外貌特征描述,该选项还在调试中,暂时未生效"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[platforms.nonebot-qq]
|
||||
describe = "nonebot-qq适配器提供的链接"
|
||||
important = true
|
||||
can_edit = true
|
||||
|
||||
[chat.allow_focus_mode]
|
||||
describe = "是否允许专注聊天状态"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[chat.base_normal_chat_num]
|
||||
describe = "最多允许多少个群进行普通聊天"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[chat.base_focused_chat_num]
|
||||
describe = "最多允许多少个群进行专注聊天"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[chat.observation_context_size]
|
||||
describe = "观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[chat.message_buffer]
|
||||
describe = "启用消息缓冲器,启用此项以解决消息的拆分问题,但会使麦麦的回复延迟"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[chat.ban_words]
|
||||
describe = "需要过滤的消息列表"
|
||||
important = false
|
||||
can_edit = true
|
||||
|
||||
[chat.ban_msgs_regex]
|
||||
describe = "需要过滤的消息(原始消息)匹配的正则表达式,匹配到的消息将被过滤(支持CQ码)"
|
||||
important = false
|
||||
can_edit = true
|
||||
@@ -1,18 +1,10 @@
|
||||
[inner]
|
||||
version = "1.7.0"
|
||||
version = "2.0.0"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请在修改后将version的值进行变更
|
||||
#如果新增项目,请在BotConfig类下新增相应的变量
|
||||
#1.如果你修改的是[]层级项目,例如你新增了 [memory],那么请在config.py的 load_config函数中的include_configs字典中新增"内容":{
|
||||
#"func":memory,
|
||||
#"support":">=0.0.0", #新的版本号
|
||||
#"necessary":False #是否必须
|
||||
#}
|
||||
#2.如果你修改的是[]下的项目,例如你新增了[memory]下的 memory_ban_words ,那么请在config.py的 load_config函数中的 memory函数下新增版本判断:
|
||||
# if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
|
||||
# config.memory_ban_words = set(memory_config.get("memory_ban_words", []))
|
||||
|
||||
#如果新增项目,请阅读src/config/official_configs.py中的说明
|
||||
#
|
||||
# 版本格式:主版本号.次版本号.修订号,版本号递增规则如下:
|
||||
# 主版本号:当你做了不兼容的 API 修改,
|
||||
# 次版本号:当你做了向下兼容的功能性新增,
|
||||
@@ -21,11 +13,11 @@ version = "1.7.0"
|
||||
#----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
|
||||
[bot]
|
||||
qq = 1145141919810
|
||||
qq_account = 1145141919810
|
||||
nickname = "麦麦"
|
||||
alias_names = ["麦叠", "牢麦"] #该选项还在调试中,暂时未生效
|
||||
|
||||
[groups]
|
||||
[chat_target]
|
||||
talk_allowed = [
|
||||
123,
|
||||
123,
|
||||
@@ -53,10 +45,13 @@ identity_detail = [
|
||||
"身份特点",
|
||||
"身份特点",
|
||||
]# 条数任意,不能为0, 该选项还在调试中
|
||||
|
||||
#外貌特征
|
||||
age = 20 # 年龄 单位岁
|
||||
gender = "男" # 性别
|
||||
appearance = "用几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效
|
||||
age = 18 # 年龄 单位岁
|
||||
gender = "女" # 性别
|
||||
height = "170" # 身高(单位cm)
|
||||
weight = "50" # 体重(单位kg)
|
||||
appearance = "用一句或几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效
|
||||
|
||||
[platforms] # 必填项目,填写每个平台适配器提供的链接
|
||||
qq="http://127.0.0.1:18002/api/message"
|
||||
@@ -85,11 +80,10 @@ ban_msgs_regex = [
|
||||
|
||||
[normal_chat] #普通聊天
|
||||
#一般回复参数
|
||||
model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率
|
||||
model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率
|
||||
reasoning_model_probability = 0.3 # 麦麦回答时选择推理模型的概率(与之相对的,普通模型的概率为1 - reasoning_model_probability)
|
||||
|
||||
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发
|
||||
thinking_timeout = 100 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
|
||||
willing_mode = "classical" # 回复意愿模式 —— 经典模式:classical,mxp模式:mxp,自定义模式:custom(需要你自己实现)
|
||||
response_willing_amplifier = 1 # 麦麦回复意愿放大系数,一般为1
|
||||
@@ -100,8 +94,8 @@ mentioned_bot_inevitable_reply = false # 提及 bot 必然回复
|
||||
at_bot_inevitable_reply = false # @bot 必然回复
|
||||
|
||||
[focus_chat] #专注聊天
|
||||
reply_trigger_threshold = 3.6 # 专注聊天触发阈值,越低越容易进入专注聊天
|
||||
default_decay_rate_per_second = 0.95 # 默认衰减率,越大衰减越快,越高越难进入专注聊天
|
||||
reply_trigger_threshold = 3.0 # 专注聊天触发阈值,越低越容易进入专注聊天
|
||||
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入专注聊天
|
||||
consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
|
||||
|
||||
# 以下选项暂时无效
|
||||
@@ -110,20 +104,20 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下
|
||||
|
||||
|
||||
[emoji]
|
||||
max_emoji_num = 40 # 表情包最大数量
|
||||
max_reach_deletion = true # 开启则在达到最大数量时删除表情包,关闭则达到最大数量时不删除,只是不会继续收集表情包
|
||||
check_interval = 10 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
|
||||
max_reg_num = 40 # 表情包最大注册数量
|
||||
do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
|
||||
check_interval = 120 # 检查表情包(注册,破损,删除)的时间间隔(分钟)
|
||||
save_pic = false # 是否保存图片
|
||||
save_emoji = false # 是否保存表情包
|
||||
cache_emoji = true # 是否缓存表情包
|
||||
steal_emoji = true # 是否偷取表情包,让麦麦可以发送她保存的这些表情包
|
||||
enable_check = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
|
||||
check_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存
|
||||
content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
|
||||
filtration_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存
|
||||
|
||||
[memory]
|
||||
build_memory_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
|
||||
build_memory_distribution = [6.0,3.0,0.6,32.0,12.0,0.4] # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重
|
||||
build_memory_sample_num = 8 # 采样数量,数值越高记忆采样次数越多
|
||||
build_memory_sample_length = 40 # 采样长度,数值越高一段记忆内容越丰富
|
||||
memory_build_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
|
||||
memory_build_distribution = [6.0, 3.0, 0.6, 32.0, 12.0, 0.4] # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重
|
||||
memory_build_sample_num = 8 # 采样数量,数值越高记忆采样次数越多
|
||||
memory_build_sample_length = 40 # 采样长度,数值越高一段记忆内容越丰富
|
||||
memory_compress_rate = 0.1 # 记忆压缩率 控制记忆精简程度 建议保持默认,调高可以获得更多信息,但是冗余信息也会增多
|
||||
|
||||
forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习
|
||||
@@ -135,49 +129,45 @@ consolidation_similarity_threshold = 0.7 # 相似度阈值
|
||||
consolidation_check_percentage = 0.01 # 检查节点比例
|
||||
|
||||
#不希望记忆的词,已经记忆的不会受到影响
|
||||
memory_ban_words = [
|
||||
# "403","张三"
|
||||
]
|
||||
memory_ban_words = [ "表情包", "图片", "回复", "聊天记录" ]
|
||||
|
||||
[mood]
|
||||
mood_update_interval = 1.0 # 情绪更新间隔 单位秒
|
||||
mood_decay_rate = 0.95 # 情绪衰减率
|
||||
mood_intensity_factor = 1.0 # 情绪强度因子
|
||||
|
||||
[keywords_reaction] # 针对某个关键词作出反应
|
||||
[keyword_reaction] # 针对某个关键词作出反应
|
||||
enable = true # 关键词反应功能的总开关
|
||||
|
||||
[[keywords_reaction.rules]] # 如果想要新增多个关键词,直接复制本条,修改keywords和reaction即可
|
||||
[[keyword_reaction.rules]] # 如果想要新增多个关键词,直接复制本条,修改keywords和reaction即可
|
||||
enable = true # 是否启用此条(为了人类在未来AI战争能更好地识别AI(bushi),默认开启)
|
||||
keywords = ["人机", "bot", "机器", "入机", "robot", "机器人","ai","AI"] # 会触发反应的关键词
|
||||
reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" # 触发之后添加的提示词
|
||||
|
||||
[[keywords_reaction.rules]] # 就像这样复制
|
||||
[[keyword_reaction.rules]] # 就像这样复制
|
||||
enable = false # 仅作示例,不会触发
|
||||
keywords = ["测试关键词回复","test",""]
|
||||
reaction = "回答“测试成功”" # 修复错误的引号
|
||||
|
||||
[[keywords_reaction.rules]] # 使用正则表达式匹配句式
|
||||
[[keyword_reaction.rules]] # 使用正则表达式匹配句式
|
||||
enable = false # 仅作示例,不会触发
|
||||
regex = ["^(?P<n>\\S{1,20})是这样的$"] # 将匹配到的词汇命名为n,反应中对应的[n]会被替换为匹配到的内容,若不了解正则表达式请勿编写
|
||||
reaction = "请按照以下模板造句:[n]是这样的,xx只要xx就可以,可是[n]要考虑的事情就很多了,比如什么时候xx,什么时候xx,什么时候xx。(请自由发挥替换xx部分,只需保持句式结构,同时表达一种将[n]过度重视的反讽意味)"
|
||||
|
||||
[chinese_typo]
|
||||
enable = true # 是否启用中文错别字生成器
|
||||
error_rate=0.001 # 单字替换概率
|
||||
error_rate=0.01 # 单字替换概率
|
||||
min_freq=9 # 最小字频阈值
|
||||
tone_error_rate=0.1 # 声调错误概率
|
||||
word_replace_rate=0.006 # 整词替换概率
|
||||
|
||||
[response_splitter]
|
||||
enable_response_splitter = true # 是否启用回复分割器
|
||||
response_max_length = 256 # 回复允许的最大长度
|
||||
response_max_sentence_num = 4 # 回复允许的最大句子数
|
||||
enable = true # 是否启用回复分割器
|
||||
max_length = 256 # 回复允许的最大长度
|
||||
max_sentence_num = 4 # 回复允许的最大句子数
|
||||
enable_kaomoji_protection = false # 是否启用颜文字保护
|
||||
|
||||
model_max_output_length = 256 # 模型单次返回的最大token数
|
||||
|
||||
[remote] #发送统计信息,主要是看全球有多少只麦麦
|
||||
[telemetry] #发送统计信息,主要是看全球有多少只麦麦
|
||||
enable = true
|
||||
|
||||
[experimental] #实验性功能
|
||||
@@ -194,14 +184,17 @@ pfc_chatting = false # 是否启用PFC聊天,该功能仅作用于私聊,与
|
||||
# stream = <true|false> : 用于指定模型是否是使用流式输出
|
||||
# 如果不指定,则该项是 False
|
||||
|
||||
[model]
|
||||
model_max_output_length = 800 # 模型单次返回的最大token数
|
||||
|
||||
#这个模型必须是推理模型
|
||||
[model.llm_reasoning] # 一般聊天模式的推理回复模型
|
||||
[model.reasoning] # 一般聊天模式的推理回复模型
|
||||
name = "Pro/deepseek-ai/DeepSeek-R1"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
|
||||
pri_out = 4.0 #模型的输出价格(非必填,可以记录消耗)
|
||||
|
||||
[model.llm_normal] #V3 回复模型 专注和一般聊天模式共用的回复模型
|
||||
[model.normal] #V3 回复模型 专注和一般聊天模式共用的回复模型
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
||||
@@ -209,13 +202,13 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||
|
||||
[model.llm_topic_judge] #主题判断模型:建议使用qwen2.5 7b
|
||||
[model.topic_judge] #主题判断模型:建议使用qwen2.5 7b
|
||||
name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 0.35
|
||||
pri_out = 0.35
|
||||
|
||||
[model.llm_summary] #概括模型,建议使用qwen2.5 32b 及以上
|
||||
[model.summary] #概括模型,建议使用qwen2.5 32b 及以上
|
||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 1.26
|
||||
@@ -227,27 +220,27 @@ provider = "SILICONFLOW"
|
||||
pri_in = 0.35
|
||||
pri_out = 0.35
|
||||
|
||||
[model.llm_heartflow] # 用于控制麦麦是否参与聊天的模型
|
||||
[model.heartflow] # 用于控制麦麦是否参与聊天的模型
|
||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 1.26
|
||||
pri_out = 1.26
|
||||
|
||||
[model.llm_observation] #观察模型,压缩聊天内容,建议用免费的
|
||||
[model.observation] #观察模型,压缩聊天内容,建议用免费的
|
||||
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||
name = "Qwen/Qwen2.5-7B-Instruct"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 0
|
||||
pri_out = 0
|
||||
|
||||
[model.llm_sub_heartflow] #心流:认真水群时,生成麦麦的内心想法,必须使用具有工具调用能力的模型
|
||||
[model.sub_heartflow] #心流:认真水群时,生成麦麦的内心想法,必须使用具有工具调用能力的模型
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 2
|
||||
pri_out = 8
|
||||
temp = 0.3 #模型的温度,新V3建议0.1-0.3
|
||||
|
||||
[model.llm_plan] #决策:认真水群时,负责决定麦麦该做什么
|
||||
[model.plan] #决策:认真水群时,负责决定麦麦该做什么
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 2
|
||||
@@ -265,7 +258,7 @@ pri_out = 0
|
||||
#私聊PFC:需要开启PFC功能,默认三个模型均为硅基流动v3,如果需要支持多人同时私聊或频繁调用,建议把其中的一个或两个换成官方v3或其它模型,以免撞到429
|
||||
|
||||
#PFC决策模型
|
||||
[model.llm_PFC_action_planner]
|
||||
[model.pfc_action_planner]
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
temp = 0.3
|
||||
@@ -273,7 +266,7 @@ pri_in = 2
|
||||
pri_out = 8
|
||||
|
||||
#PFC聊天模型
|
||||
[model.llm_PFC_chat]
|
||||
[model.pfc_chat]
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
temp = 0.3
|
||||
@@ -281,7 +274,7 @@ pri_in = 2
|
||||
pri_out = 8
|
||||
|
||||
#PFC检查模型
|
||||
[model.llm_PFC_reply_checker]
|
||||
[model.pfc_reply_checker]
|
||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 2
|
||||
@@ -294,7 +287,7 @@ pri_out = 8
|
||||
#以下模型暂时没有使用!!
|
||||
#以下模型暂时没有使用!!
|
||||
|
||||
[model.llm_tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
|
||||
[model.tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
|
||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 1.26
|
||||
|
||||
7
tests/test_config.py
Normal file
7
tests/test_config.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from src.config.config import global_config
|
||||
|
||||
|
||||
class TestConfig:
|
||||
def test_load(self):
|
||||
config = global_config
|
||||
print(config)
|
||||
Reference in New Issue
Block a user