diff --git a/requirements.txt b/requirements.txt
index 8330c8d06..1e9e5ff25 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/run-WebUI.bat b/run-WebUI.bat
new file mode 100644
index 000000000..8fbbe3dbf
--- /dev/null
+++ b/run-WebUI.bat
@@ -0,0 +1,4 @@
+CHCP 65001
+@echo off
+python webui.py
+pause
\ No newline at end of file
diff --git a/run.sh b/run.sh
index c3f6969b6..663fc8a67 100644
--- a/run.sh
+++ b/run.sh
@@ -97,8 +97,8 @@ check_python() {
# 5/6: 选择分支
choose_branch() {
BRANCH=$(whiptail --title "🔀 [5/6] 选择 Maimbot 分支" --menu "请选择要安装的 Maimbot 分支:" 15 60 2 \
- "main" "稳定版本(推荐)" \
- "debug" "开发版本(可能不稳定)" 3>&1 1>&2 2>&3)
+ "main" "稳定版本(推荐,供下载使用)" \
+ "main-fix" "生产环境紧急修复" 3>&1 1>&2 2>&3)
if [[ -z "$BRANCH" ]]; then
BRANCH="main"
@@ -201,6 +201,8 @@ install_napcat() {
}
# 运行安装步骤
+whiptail --title "⚠️ 警告:安装前详阅" --msgbox "项目处于活跃开发阶段,代码可能随时更改\n文档未完善,有问题可以提交 Issue 或者 Discussion\nQQ机器人存在被限制风险,请自行了解,谨慎使用\n由于持续迭代,可能存在一些已知或未知的bug\n由于开发中,可能消耗较多token\n\n本脚本可能更新不及时,如遇到bug请优先尝试手动部署以确定是否为脚本问题" 14 60
+
check_system
check_mongodb
check_napcat
@@ -233,7 +235,7 @@ fi
if [[ "$IS_INSTALL_NAPCAT" == "true" ]]; then
echo -e "${GREEN}安装 NapCat...${RESET}"
- curl -o napcat.sh https://nclatest.znin.net/NapNeko/NapCat-Installer/main/script/install.sh && bash napcat.sh
+ curl -o napcat.sh https://nclatest.znin.net/NapNeko/NapCat-Installer/main/script/install.sh && bash napcat.sh --cli y --docker n
fi
echo -e "${GREEN}创建 Python 虚拟环境...${RESET}"
diff --git a/src/plugins/chat/__init__.py b/src/plugins/chat/__init__.py
index 6b8f639ae..6a30d3fba 100644
--- a/src/plugins/chat/__init__.py
+++ b/src/plugins/chat/__init__.py
@@ -15,7 +15,7 @@ from .bot import chat_bot
from .config import global_config
from .emoji_manager import emoji_manager
from .relationship_manager import relationship_manager
-from .willing_manager import willing_manager
+from ..willing.willing_manager import willing_manager
from .chat_stream import chat_manager
from ..memory_system.memory import hippocampus, memory_graph
from .bot import ChatBot
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 00c03f038..34c788d98 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -29,7 +29,7 @@ from .storage import MessageStorage
from .utils import calculate_typing_time, is_mentioned_bot_in_message
from .utils_image import image_path_to_base64
from .utils_user import get_user_nickname, get_user_cardname, get_groupname
-from .willing_manager import willing_manager # 导入意愿管理器
+from ..willing.willing_manager import willing_manager # 导入意愿管理器
from .message_base import UserInfo, GroupInfo, Seg
from ..utils.logger_config import LogClassification, LogModule
@@ -55,117 +55,18 @@ class ChatBot:
if not self._started:
self._started = True
- async def handle_notice(self, event: NoticeEvent, bot: Bot) -> None:
- """处理收到的通知"""
- # 戳一戳通知
- if isinstance(event, PokeNotifyEvent):
- # 不处理其他人的戳戳
- if not event.is_tome():
- return
-
- # 用户屏蔽,不区分私聊/群聊
- if event.user_id in global_config.ban_user_id:
- return
-
- reply_poke_probability = 1.0 # 回复戳一戳的概率,如果要改可以在这里改,暂不提取到配置文件
-
- if random() < reply_poke_probability:
- raw_message = "[戳了戳]你" # 默认类型
- if info := event.raw_info:
- poke_type = info[2].get("txt", "戳了戳") # 戳戳类型,例如“拍一拍”、“揉一揉”、“捏一捏”
- custom_poke_message = info[4].get("txt", "") # 自定义戳戳消息,若不存在会为空字符串
- raw_message = f"[{poke_type}]你{custom_poke_message}"
-
- raw_message += "(这是一个类似摸摸头的友善行为,而不是恶意行为,请不要作出攻击发言)"
- await self.directly_reply(raw_message, event.user_id, event.group_id)
-
- if isinstance(event, GroupRecallNoticeEvent) or isinstance(event, FriendRecallNoticeEvent):
- user_info = UserInfo(
- user_id=event.user_id,
- user_nickname=get_user_nickname(event.user_id) or None,
- user_cardname=get_user_cardname(event.user_id) or None,
- platform="qq",
- )
-
- if isinstance(event, GroupRecallNoticeEvent):
- group_info = GroupInfo(
- group_id=event.group_id, group_name=None, platform="qq"
- )
- else:
- group_info = None
-
- chat = await chat_manager.get_or_create_stream(
- platform=user_info.platform, user_info=user_info, group_info=group_info
- )
-
- await self.storage.store_recalled_message(event.message_id, time.time(), chat)
-
- async def handle_message(self, event: MessageEvent, bot: Bot) -> None:
- """处理收到的消息"""
-
- self.bot = bot # 更新 bot 实例
-
- # 用户屏蔽,不区分私聊/群聊
- if event.user_id in global_config.ban_user_id:
- return
-
- if (
- event.reply
- and hasattr(event.reply, "sender")
- and hasattr(event.reply.sender, "user_id")
- and event.reply.sender.user_id in global_config.ban_user_id
- ):
- logger.debug(f"跳过处理回复来自被ban用户 {event.reply.sender.user_id} 的消息")
- return
- # 处理私聊消息
- if isinstance(event, PrivateMessageEvent):
- if not global_config.enable_friend_chat: # 私聊过滤
- return
- else:
- try:
- user_info = UserInfo(
- user_id=event.user_id,
- user_nickname=(await bot.get_stranger_info(user_id=event.user_id, no_cache=True))["nickname"],
- user_cardname=None,
- platform="qq",
- )
- except Exception as e:
- logger.error(f"获取陌生人信息失败: {e}")
- return
- logger.debug(user_info)
-
- # group_info = GroupInfo(group_id=0, group_name="私聊", platform="qq")
- group_info = None
-
- # 处理群聊消息
- else:
- # 白名单设定由nontbot侧完成
- if event.group_id:
- if event.group_id not in global_config.talk_allowed_groups:
- return
-
- user_info = UserInfo(
- user_id=event.user_id,
- user_nickname=event.sender.nickname,
- user_cardname=event.sender.card or None,
- platform="qq",
- )
-
- group_info = GroupInfo(group_id=event.group_id, group_name=None, platform="qq")
-
- # group_info = await bot.get_group_info(group_id=event.group_id)
- # sender_info = await bot.get_group_member_info(group_id=event.group_id, user_id=event.user_id, no_cache=True)
-
- message_cq = MessageRecvCQ(
- message_id=event.message_id,
- user_info=user_info,
- raw_message=str(event.original_message),
- group_info=group_info,
- reply_message=event.reply,
- platform="qq",
- )
+ async def message_process(self, message_cq: MessageRecvCQ) -> None:
+ """处理转化后的统一格式消息
+ 1. 过滤消息
+ 2. 记忆激活
+ 3. 意愿激活
+ 4. 生成回复并发送
+ 5. 更新关系
+ 6. 更新情绪
+ """
await message_cq.initialize()
message_json = message_cq.to_dict()
+ # 哦我嘞个json
# 进入maimbot
message = MessageRecv(message_json)
@@ -175,16 +76,20 @@ class ChatBot:
# 消息过滤,涉及到config有待更新
+ # 创建聊天流
chat = await chat_manager.get_or_create_stream(
- platform=messageinfo.platform, user_info=userinfo, group_info=groupinfo
+ platform=messageinfo.platform, user_info=userinfo, group_info=groupinfo #我嘞个gourp_info
)
message.update_chat_stream(chat)
await relationship_manager.update_relationship(
chat_stream=chat,
)
- await relationship_manager.update_relationship_value(chat_stream=chat, relationship_value=0.5)
+ await relationship_manager.update_relationship_value(
+ chat_stream=chat, relationship_value=0.5
+ )
await message.process()
+
# 过滤词
for word in global_config.ban_words:
if word in message.processed_plain_text:
@@ -203,12 +108,15 @@ class ChatBot:
logger.info(f"[正则表达式过滤]消息匹配到{pattern},filtered")
return
- current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(messageinfo.time))
-
- # topic=await topic_identifier.identify_topic_llm(message.processed_plain_text)
+ current_time = time.strftime(
+ "%Y-%m-%d %H:%M:%S", time.localtime(messageinfo.time)
+ )
+ #根据话题计算激活度
topic = ""
- interested_rate = await hippocampus.memory_activate_value(message.processed_plain_text) / 100
+ interested_rate = (
+ await hippocampus.memory_activate_value(message.processed_plain_text) / 100
+ )
logger.debug(f"对{message.processed_plain_text}的激活度:{interested_rate}")
# logger.info(f"\033[1;32m[主题识别]\033[0m 使用{global_config.topic_extract}主题: {topic}")
@@ -217,7 +125,6 @@ class ChatBot:
is_mentioned = is_mentioned_bot_in_message(message)
reply_probability = await willing_manager.change_reply_willing_received(
chat_stream=chat,
- topic=topic[0] if topic else None,
is_mentioned_bot=is_mentioned,
config=global_config,
is_emoji=message.is_emoji,
@@ -265,7 +172,10 @@ class ChatBot:
# 找到message,删除
# print(f"开始找思考消息")
for msg in container.messages:
- if isinstance(msg, MessageThinking) and msg.message_info.message_id == think_id:
+ if (
+ isinstance(msg, MessageThinking)
+ and msg.message_info.message_id == think_id
+ ):
# print(f"找到思考消息: {msg}")
thinking_message = msg
container.messages.remove(msg)
@@ -359,79 +269,171 @@ class ChatBot:
chat_stream=chat, relationship_value=valuedict[emotion[0]]
)
# 使用情绪管理器更新情绪
- self.mood_manager.update_mood_from_emotion(emotion[0], global_config.mood_intensity_factor)
+ self.mood_manager.update_mood_from_emotion(
+ emotion[0], global_config.mood_intensity_factor
+ )
# willing_manager.change_reply_willing_after_sent(
# chat_stream=chat
# )
- async def directly_reply(self, raw_message: str, user_id: int, group_id: int):
- """
- 直接回复发来的消息,不经过意愿管理器
- """
+ async def handle_notice(self, event: NoticeEvent, bot: Bot) -> None:
+ """处理收到的通知"""
+ if isinstance(event, PokeNotifyEvent):
+ # 戳一戳 通知
+ # 不处理其他人的戳戳
+ if not event.is_tome():
+ return
- # 构造用户信息和群组信息
- user_info = UserInfo(
- user_id=user_id,
- user_nickname=get_user_nickname(user_id) or None,
- user_cardname=get_user_cardname(user_id) or None,
- platform="qq",
- )
- group_info = GroupInfo(group_id=group_id, group_name=None, platform="qq")
+ # 用户屏蔽,不区分私聊/群聊
+ if event.user_id in global_config.ban_user_id:
+ return
+
+ # 白名单模式
+ if event.group_id:
+ if event.group_id not in global_config.talk_allowed_groups:
+ return
+
+ raw_message = f"[戳了戳]{global_config.BOT_NICKNAME}" # 默认类型
+ if info := event.raw_info:
+ poke_type = info[2].get(
+ "txt", "戳了戳"
+ ) # 戳戳类型,例如“拍一拍”、“揉一揉”、“捏一捏”
+ custom_poke_message = info[4].get(
+ "txt", ""
+ ) # 自定义戳戳消息,若不存在会为空字符串
+ raw_message = (
+ f"[{poke_type}]{global_config.BOT_NICKNAME}{custom_poke_message}"
+ )
+
+ raw_message += "(这是一个类似摸摸头的友善行为,而不是恶意行为,请不要作出攻击发言)"
+
+ user_info = UserInfo(
+ user_id=event.user_id,
+ user_nickname=(
+ await bot.get_stranger_info(user_id=event.user_id, no_cache=True)
+ )["nickname"],
+ user_cardname=None,
+ platform="qq",
+ )
+
+ if event.group_id:
+ group_info = GroupInfo(
+ group_id=event.group_id, group_name=None, platform="qq"
+ )
+ else:
+ group_info = None
+
+ message_cq = MessageRecvCQ(
+ message_id=0,
+ user_info=user_info,
+ raw_message=str(raw_message),
+ group_info=group_info,
+ reply_message=None,
+ platform="qq",
+ )
+
+ await self.message_process(message_cq)
+
+ elif isinstance(event, GroupRecallNoticeEvent) or isinstance(
+ event, FriendRecallNoticeEvent
+ ):
+ user_info = UserInfo(
+ user_id=event.user_id,
+ user_nickname=get_user_nickname(event.user_id) or None,
+ user_cardname=get_user_cardname(event.user_id) or None,
+ platform="qq",
+ )
+
+ if isinstance(event, GroupRecallNoticeEvent):
+ group_info = GroupInfo(
+ group_id=event.group_id, group_name=None, platform="qq"
+ )
+ else:
+ group_info = None
+
+ chat = await chat_manager.get_or_create_stream(
+ platform=user_info.platform, user_info=user_info, group_info=group_info
+ )
+
+ await self.storage.store_recalled_message(
+ event.message_id, time.time(), chat
+ )
+
+ async def handle_message(self, event: MessageEvent, bot: Bot) -> None:
+ """处理收到的消息"""
+
+ self.bot = bot # 更新 bot 实例
+
+ # 用户屏蔽,不区分私聊/群聊
+ if event.user_id in global_config.ban_user_id:
+ return
+
+ if (
+ event.reply
+ and hasattr(event.reply, "sender")
+ and hasattr(event.reply.sender, "user_id")
+ and event.reply.sender.user_id in global_config.ban_user_id
+ ):
+ logger.debug(
+ f"跳过处理回复来自被ban用户 {event.reply.sender.user_id} 的消息"
+ )
+ return
+ # 处理私聊消息
+ if isinstance(event, PrivateMessageEvent):
+ if not global_config.enable_friend_chat: # 私聊过滤
+ return
+ else:
+ try:
+ user_info = UserInfo(
+ user_id=event.user_id,
+ user_nickname=(
+ await bot.get_stranger_info(
+ user_id=event.user_id, no_cache=True
+ )
+ )["nickname"],
+ user_cardname=None,
+ platform="qq",
+ )
+ except Exception as e:
+ logger.error(f"获取陌生人信息失败: {e}")
+ return
+ logger.debug(user_info)
+
+ # group_info = GroupInfo(group_id=0, group_name="私聊", platform="qq")
+ group_info = None
+
+ # 处理群聊消息
+ else:
+ # 白名单设定由nontbot侧完成
+ if event.group_id:
+ if event.group_id not in global_config.talk_allowed_groups:
+ return
+
+ user_info = UserInfo(
+ user_id=event.user_id,
+ user_nickname=event.sender.nickname,
+ user_cardname=event.sender.card or None,
+ platform="qq",
+ )
+
+ group_info = GroupInfo(
+ group_id=event.group_id, group_name=None, platform="qq"
+ )
+
+ # group_info = await bot.get_group_info(group_id=event.group_id)
+ # sender_info = await bot.get_group_member_info(group_id=event.group_id, user_id=event.user_id, no_cache=True)
message_cq = MessageRecvCQ(
- message_id=None,
+ message_id=event.message_id,
user_info=user_info,
- raw_message=raw_message,
+ raw_message=str(event.original_message),
group_info=group_info,
- reply_message=None,
+ reply_message=event.reply,
platform="qq",
)
- await message_cq.initialize()
- message_json = message_cq.to_dict()
-
- message = MessageRecv(message_json)
- groupinfo = message.message_info.group_info
- userinfo = message.message_info.user_info
- messageinfo = message.message_info
-
- chat = await chat_manager.get_or_create_stream(
- platform=messageinfo.platform, user_info=userinfo, group_info=groupinfo
- )
- message.update_chat_stream(chat)
- await message.process()
-
- bot_user_info = UserInfo(
- user_id=global_config.BOT_QQ,
- user_nickname=global_config.BOT_NICKNAME,
- platform=messageinfo.platform,
- )
-
- current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(messageinfo.time))
- logger.info(
- f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]{chat.user_info.user_nickname}:"
- f"{message.processed_plain_text}"
- )
-
- # 使用大模型生成回复
- response, raw_content = await self.gpt.generate_response(message)
-
- if response:
- for msg in response:
- message_segment = Seg(type="text", data=msg)
-
- bot_message = MessageSending(
- message_id=None,
- chat_stream=chat,
- bot_user_info=bot_user_info,
- sender_info=userinfo,
- message_segment=message_segment,
- reply=None,
- is_head=False,
- is_emoji=False,
- )
- message_manager.add_message(bot_message)
+ await self.message_process(message_cq)
# 创建全局ChatBot实例
chat_bot = ChatBot()
diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py
index 88cb31ed5..db9dd17b5 100644
--- a/src/plugins/chat/config.py
+++ b/src/plugins/chat/config.py
@@ -73,6 +73,8 @@ class BotConfig:
mood_update_interval: float = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate: float = 0.95 # 情绪衰减率
mood_intensity_factor: float = 0.7 # 情绪强度因子
+
+ willing_mode: str = "classical" # 意愿模式
keywords_reaction_rules = [] # 关键词回复规则
@@ -212,6 +214,10 @@ class BotConfig:
"model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
)
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
+
+ def willing(parent: dict):
+ willing_config = parent["willing"]
+ config.willing_mode = willing_config.get("willing_mode", config.willing_mode)
def model(parent: dict):
# 加载模型配置
@@ -353,6 +359,7 @@ class BotConfig:
"cq_code": {"func": cq_code, "support": ">=0.0.0"},
"bot": {"func": bot, "support": ">=0.0.0"},
"response": {"func": response, "support": ">=0.0.0"},
+ "willing": {"func": willing, "support": ">=0.0.9", "necessary": False},
"model": {"func": model, "support": ">=0.0.0"},
"message": {"func": message, "support": ">=0.0.0"},
"memory": {"func": memory, "support": ">=0.0.0", "necessary": False},
diff --git a/src/plugins/chat/prompt_builder.py b/src/plugins/chat/prompt_builder.py
index ec0dac3d0..16d8882e4 100644
--- a/src/plugins/chat/prompt_builder.py
+++ b/src/plugins/chat/prompt_builder.py
@@ -1,7 +1,6 @@
import random
import time
from typing import Optional
-from loguru import logger
from ...common.database import db
from ..memory_system.memory import hippocampus, memory_graph
@@ -11,6 +10,13 @@ from .config import global_config
from .utils import get_embedding, get_recent_group_detailed_plain_text
from .chat_stream import chat_manager
+from ..utils.logger_config import LogClassification, LogModule
+
+log_module = LogModule()
+logger = log_module.setup_logger(LogClassification.PBUILDER)
+
+logger.info("初始化Prompt系统")
+
class PromptBuilder:
def __init__(self):
@@ -163,7 +169,7 @@ class PromptBuilder:
prompt_ger += "你喜欢用文言文"
# 额外信息要求
- extra_info = """但是记得回复平淡一些,简短一些,尤其注意在没明确提到时不要过多提及自身的背景, 不要直接回复别人发的表情包,记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只需要输出回复内容就好,不要输出其他任何内容"""
+ extra_info = """但是记得回复平淡一些,简短一些,尤其注意在没明确提到时不要过多提及自身的背景, 不要直接回复别人发的表情包,记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情,@,等),只需要输出回复内容就好,不要输出其他任何内容"""
# 合并prompt
prompt = ""
@@ -239,7 +245,7 @@ class PromptBuilder:
return prompt_for_check, memory
def _build_initiative_prompt(self, selected_node, prompt_regular, memory):
- prompt_for_initiative = f"{prompt_regular}你现在想在群里发言,回忆了一下,想到一个话题,是{selected_node['concept']},关于这个话题的记忆有\n{memory}\n,请在把握群里的聊天内容的基础上,综合群内的氛围,以日常且口语化的口吻,简短且随意一点进行发言,不要说的太有条理,可以有个性。记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情等)"
+ prompt_for_initiative = f"{prompt_regular}你现在想在群里发言,回忆了一下,想到一个话题,是{selected_node['concept']},关于这个话题的记忆有\n{memory}\n,请在把握群里的聊天内容的基础上,综合群内的氛围,以日常且口语化的口吻,简短且随意一点进行发言,不要说的太有条理,可以有个性。记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情,@等)"
return prompt_for_initiative
async def get_prompt_info(self, message: str, threshold: float):
diff --git a/src/plugins/chat/thinking_idea.py b/src/plugins/chat/thinking_idea.py
deleted file mode 100644
index 0cc300219..000000000
--- a/src/plugins/chat/thinking_idea.py
+++ /dev/null
@@ -1,14 +0,0 @@
-#Broca's Area
-# 功能:语言产生、语法处理和言语运动控制。
-# 损伤后果:布洛卡失语症(表达困难,但理解保留)。
-
-import time
-
-
-class Thinking_Idea:
- def __init__(self, message_id: str):
- self.messages = [] # 消息列表集合
- self.current_thoughts = [] # 当前思考内容列表
- self.time = time.time() # 创建时间
- self.id = str(int(time.time() * 1000)) # 使用时间戳生成唯一标识ID
-
\ No newline at end of file
diff --git a/src/plugins/remote/__init__.py b/src/plugins/remote/__init__.py
index 7a4a88472..02b19518a 100644
--- a/src/plugins/remote/__init__.py
+++ b/src/plugins/remote/__init__.py
@@ -1,4 +1,5 @@
import asyncio
from .remote import main
-asyncio.run(main())
+# 启动心跳线程
+heartbeat_thread = main()
diff --git a/src/plugins/remote/remote.py b/src/plugins/remote/remote.py
index f2741b222..6020398e8 100644
--- a/src/plugins/remote/remote.py
+++ b/src/plugins/remote/remote.py
@@ -4,13 +4,12 @@ import uuid
import platform
import os
import json
+import threading
from loguru import logger
-import asyncio
# UUID文件路径
UUID_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "client_uuid.json")
-
# 生成或获取客户端唯一ID
def get_unique_id():
# 检查是否已经有保存的UUID
@@ -37,7 +36,6 @@ def get_unique_id():
return client_id
-
# 生成客户端唯一ID
def generate_unique_id():
# 结合主机名、系统信息和随机UUID生成唯一ID
@@ -45,7 +43,6 @@ def generate_unique_id():
unique_id = f"{system_info}-{uuid.uuid4()}"
return unique_id
-
def send_heartbeat(server_url, client_id):
"""向服务器发送心跳"""
sys = platform.system()
@@ -66,31 +63,40 @@ def send_heartbeat(server_url, client_id):
logger.debug(f"发送心跳时出错: {e}")
return False
-
-async def main():
- # 配置
- SERVER_URL = "http://hyybuth.xyz:10058" # 更改为你的服务器地址
- HEARTBEAT_INTERVAL = 300 # 5分钟(秒)
-
- # 获取或生成客户端ID
- client_id = get_unique_id()
- logger.debug(f"客户端已启动,ID: {client_id}")
-
- # 主心跳循环
- try:
- while True:
- if send_heartbeat(SERVER_URL, client_id):
- print(f"{HEARTBEAT_INTERVAL}秒后发送下一次心跳...")
+class HeartbeatThread(threading.Thread):
+ """心跳线程类"""
+
+ def __init__(self, server_url, interval):
+ super().__init__(daemon=True) # 设置为守护线程,主程序结束时自动结束
+ self.server_url = server_url
+ self.interval = interval
+ self.client_id = get_unique_id()
+ self.running = True
+
+ def run(self):
+ """线程运行函数"""
+ logger.debug(f"心跳线程已启动,客户端ID: {self.client_id}")
+
+ while self.running:
+ if send_heartbeat(self.server_url, self.client_id):
+ logger.info(f"{self.interval}秒后发送下一次心跳...")
else:
- print(f"{HEARTBEAT_INTERVAL}秒后重试...")
+ logger.info(f"{self.interval}秒后重试...")
+
+ time.sleep(self.interval) # 使用同步的睡眠
+
+ def stop(self):
+ """停止线程"""
+ self.running = False
- await asyncio.sleep(HEARTBEAT_INTERVAL)
-
- except KeyboardInterrupt:
- print("用户已停止客户端")
- except Exception as e:
- print(f"发生意外错误: {e}")
-
-
-if __name__ == "__main__":
- asyncio.run(main())
+def main():
+ """主函数,启动心跳线程"""
+ # 配置
+ SERVER_URL = "http://hyybuth.xyz:10058"
+ HEARTBEAT_INTERVAL = 300 # 5分钟(秒)
+
+ # 创建并启动心跳线程
+ heartbeat_thread = HeartbeatThread(SERVER_URL, HEARTBEAT_INTERVAL)
+ heartbeat_thread.start()
+
+ return heartbeat_thread # 返回线程对象,便于外部控制
\ No newline at end of file
diff --git a/src/plugins/utils/logger_config.py b/src/plugins/utils/logger_config.py
index fff5a50d3..d11211a16 100644
--- a/src/plugins/utils/logger_config.py
+++ b/src/plugins/utils/logger_config.py
@@ -7,6 +7,7 @@ class LogClassification(Enum):
MEMORY = "memory"
EMOJI = "emoji"
CHAT = "chat"
+ PBUILDER = "promptbuilder"
class LogModule:
logger = loguru.logger.opt()
@@ -32,6 +33,10 @@ class LogModule:
# 表情包系统日志格式
emoji_format = "{time:HH:mm} | {level: <8} | 表情包 | {function}:{line} - {message}"
+
+ promptbuilder_format = "{time:HH:mm} | {level: <8} | Prompt | {function}:{line} - {message}"
+
+
# 根据日志类型选择日志格式和输出
if log_type == LogClassification.CHAT:
self.logger.add(
@@ -39,6 +44,12 @@ class LogModule:
format=chat_format,
# level="INFO"
)
+ elif log_type == LogClassification.PBUILDER:
+ self.logger.add(
+ sys.stderr,
+ format=promptbuilder_format,
+ # level="INFO"
+ )
elif log_type == LogClassification.MEMORY:
# 同时输出到控制台和文件
diff --git a/src/plugins/willing/mode_classical.py b/src/plugins/willing/mode_classical.py
new file mode 100644
index 000000000..14ae81c7a
--- /dev/null
+++ b/src/plugins/willing/mode_classical.py
@@ -0,0 +1,98 @@
+import asyncio
+from typing import Dict
+from ..chat.chat_stream import ChatStream
+
+class WillingManager:
+ def __init__(self):
+ self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿
+ self._decay_task = None
+ self._started = False
+
+ async def _decay_reply_willing(self):
+ """定期衰减回复意愿"""
+ while True:
+ await asyncio.sleep(1)
+ for chat_id in self.chat_reply_willing:
+ self.chat_reply_willing[chat_id] = max(0, self.chat_reply_willing[chat_id] * 0.9)
+
+ def get_willing(self, chat_stream: ChatStream) -> float:
+ """获取指定聊天流的回复意愿"""
+ if chat_stream:
+ return self.chat_reply_willing.get(chat_stream.stream_id, 0)
+ return 0
+
+ def set_willing(self, chat_id: str, willing: float):
+ """设置指定聊天流的回复意愿"""
+ self.chat_reply_willing[chat_id] = willing
+
+ async def change_reply_willing_received(self,
+ chat_stream: ChatStream,
+ is_mentioned_bot: bool = False,
+ config = None,
+ is_emoji: bool = False,
+ interested_rate: float = 0,
+ sender_id: str = None) -> float:
+ """改变指定聊天流的回复意愿并返回回复概率"""
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+
+ interested_rate = interested_rate * config.response_interested_rate_amplifier
+
+ if interested_rate > 0.5:
+ current_willing += (interested_rate - 0.5)
+
+ if is_mentioned_bot and current_willing < 1.0:
+ current_willing += 1
+ elif is_mentioned_bot:
+ current_willing += 0.05
+
+ if is_emoji:
+ current_willing *= 0.2
+
+ self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
+
+
+ reply_probability = min(max((current_willing - 0.5),0.03)* config.response_willing_amplifier * 2,1)
+
+ # 检查群组权限(如果是群聊)
+ if chat_stream.group_info and config:
+ if chat_stream.group_info.group_id not in config.talk_allowed_groups:
+ current_willing = 0
+ reply_probability = 0
+
+ if chat_stream.group_info.group_id in config.talk_frequency_down_groups:
+ reply_probability = reply_probability / 3.5
+
+ return reply_probability
+
+ def change_reply_willing_sent(self, chat_stream: ChatStream):
+ """发送消息后降低聊天流的回复意愿"""
+ if chat_stream:
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+ self.chat_reply_willing[chat_id] = max(0, current_willing - 1.8)
+
+ def change_reply_willing_not_sent(self, chat_stream: ChatStream):
+ """未发送消息后降低聊天流的回复意愿"""
+ if chat_stream:
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+ self.chat_reply_willing[chat_id] = max(0, current_willing - 0)
+
+ def change_reply_willing_after_sent(self, chat_stream: ChatStream):
+ """发送消息后提高聊天流的回复意愿"""
+ if chat_stream:
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+ if current_willing < 1:
+ self.chat_reply_willing[chat_id] = min(1, current_willing + 0.4)
+
+ async def ensure_started(self):
+ """确保衰减任务已启动"""
+ if not self._started:
+ if self._decay_task is None:
+ self._decay_task = asyncio.create_task(self._decay_reply_willing())
+ self._started = True
+
+# 创建全局实例
+willing_manager = WillingManager()
\ No newline at end of file
diff --git a/src/plugins/willing/mode_custom.py b/src/plugins/willing/mode_custom.py
new file mode 100644
index 000000000..1e17130be
--- /dev/null
+++ b/src/plugins/willing/mode_custom.py
@@ -0,0 +1,102 @@
+import asyncio
+from typing import Dict
+from ..chat.chat_stream import ChatStream
+
+class WillingManager:
+ def __init__(self):
+ self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿
+ self._decay_task = None
+ self._started = False
+
+ async def _decay_reply_willing(self):
+ """定期衰减回复意愿"""
+ while True:
+ await asyncio.sleep(3)
+ for chat_id in self.chat_reply_willing:
+ # 每分钟衰减10%的回复意愿
+ self.chat_reply_willing[chat_id] = max(0, self.chat_reply_willing[chat_id] * 0.6)
+
+ def get_willing(self, chat_stream: ChatStream) -> float:
+ """获取指定聊天流的回复意愿"""
+ if chat_stream:
+ return self.chat_reply_willing.get(chat_stream.stream_id, 0)
+ return 0
+
+ def set_willing(self, chat_id: str, willing: float):
+ """设置指定聊天流的回复意愿"""
+ self.chat_reply_willing[chat_id] = willing
+
+ async def change_reply_willing_received(self,
+ chat_stream: ChatStream,
+ topic: str = None,
+ is_mentioned_bot: bool = False,
+ config = None,
+ is_emoji: bool = False,
+ interested_rate: float = 0,
+ sender_id: str = None) -> float:
+ """改变指定聊天流的回复意愿并返回回复概率"""
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+
+ if topic and current_willing < 1:
+ current_willing += 0.2
+ elif topic:
+ current_willing += 0.05
+
+ if is_mentioned_bot and current_willing < 1.0:
+ current_willing += 0.9
+ elif is_mentioned_bot:
+ current_willing += 0.05
+
+ if is_emoji:
+ current_willing *= 0.2
+
+ self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
+
+ reply_probability = (current_willing - 0.5) * 2
+
+ # 检查群组权限(如果是群聊)
+ if chat_stream.group_info and config:
+ if chat_stream.group_info.group_id not in config.talk_allowed_groups:
+ current_willing = 0
+ reply_probability = 0
+
+ if chat_stream.group_info.group_id in config.talk_frequency_down_groups:
+ reply_probability = reply_probability / 3.5
+
+ if is_mentioned_bot and sender_id == "1026294844":
+ reply_probability = 1
+
+ return reply_probability
+
+ def change_reply_willing_sent(self, chat_stream: ChatStream):
+ """发送消息后降低聊天流的回复意愿"""
+ if chat_stream:
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+ self.chat_reply_willing[chat_id] = max(0, current_willing - 1.8)
+
+ def change_reply_willing_not_sent(self, chat_stream: ChatStream):
+ """未发送消息后降低聊天流的回复意愿"""
+ if chat_stream:
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+ self.chat_reply_willing[chat_id] = max(0, current_willing - 0)
+
+ def change_reply_willing_after_sent(self, chat_stream: ChatStream):
+ """发送消息后提高聊天流的回复意愿"""
+ if chat_stream:
+ chat_id = chat_stream.stream_id
+ current_willing = self.chat_reply_willing.get(chat_id, 0)
+ if current_willing < 1:
+ self.chat_reply_willing[chat_id] = min(1, current_willing + 0.4)
+
+ async def ensure_started(self):
+ """确保衰减任务已启动"""
+ if not self._started:
+ if self._decay_task is None:
+ self._decay_task = asyncio.create_task(self._decay_reply_willing())
+ self._started = True
+
+# 创建全局实例
+willing_manager = WillingManager()
\ No newline at end of file
diff --git a/src/plugins/chat/willing_manager.py b/src/plugins/willing/mode_dynamic.py
similarity index 99%
rename from src/plugins/chat/willing_manager.py
rename to src/plugins/willing/mode_dynamic.py
index 6df27f3a4..bab9a0d08 100644
--- a/src/plugins/chat/willing_manager.py
+++ b/src/plugins/willing/mode_dynamic.py
@@ -5,9 +5,8 @@ from typing import Dict
from loguru import logger
-from .config import global_config
-from .chat_stream import ChatStream
-
+from ..chat.config import global_config
+from ..chat.chat_stream import ChatStream
class WillingManager:
def __init__(self):
diff --git a/src/plugins/willing/willing_manager.py b/src/plugins/willing/willing_manager.py
new file mode 100644
index 000000000..1da3705ca
--- /dev/null
+++ b/src/plugins/willing/willing_manager.py
@@ -0,0 +1,32 @@
+from typing import Optional
+from loguru import logger
+
+from ..chat.config import global_config
+from .mode_classical import WillingManager as ClassicalWillingManager
+from .mode_dynamic import WillingManager as DynamicWillingManager
+from .mode_custom import WillingManager as CustomWillingManager
+
+def init_willing_manager() -> Optional[object]:
+ """
+ 根据配置初始化并返回对应的WillingManager实例
+
+ Returns:
+ 对应mode的WillingManager实例
+ """
+ mode = global_config.willing_mode.lower()
+
+ if mode == "classical":
+ logger.info("使用经典回复意愿管理器")
+ return ClassicalWillingManager()
+ elif mode == "dynamic":
+ logger.info("使用动态回复意愿管理器")
+ return DynamicWillingManager()
+ elif mode == "custom":
+ logger.warning(f"自定义的回复意愿管理器模式: {mode}")
+ return CustomWillingManager()
+ else:
+ logger.warning(f"未知的回复意愿管理器模式: {mode}, 将使用经典模式")
+ return ClassicalWillingManager()
+
+# 全局willing_manager对象
+willing_manager = init_willing_manager()
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 089be69b0..89ebbe162 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,6 +1,7 @@
[inner]
-version = "0.0.8"
+version = "0.0.9"
+#以下是给开发人员阅读的,一般用户不需要阅读
#如果你想要修改配置文件,请在修改后将version的值进行变更
#如果新增项目,请在BotConfig类下新增相应的变量
#1.如果你修改的是[]层级项目,例如你新增了 [memory],那么请在config.py的 load_config函数中的include_configs字典中新增"内容":{
@@ -19,14 +20,14 @@ alias_names = ["小麦", "阿麦"]
[personality]
prompt_personality = [
- "曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧", # 贴吧人格
- "是一个女大学生,你有黑色头发,你会刷小红书", # 小红书人格
- "是一个女大学生,你会刷b站,对ACG文化感兴趣" # b站人格
+ "用一句话或几句话描述性格特点和其他特征",
+ "用一句话或几句话描述性格特点和其他特征",
+ "例如,是一个热爱国家热爱党的新时代好青年"
]
personality_1_probability = 0.6 # 第一种人格出现概率
personality_2_probability = 0.3 # 第二种人格出现概率
personality_3_probability = 0.1 # 第三种人格出现概率,请确保三个概率相加等于1
-prompt_schedule = "一个曾经学习地质,现在学习心理学和脑科学的女大学生,喜欢刷qq,贴吧,知乎和小红书"
+prompt_schedule = "用一句话或几句话描述描述性格特点和其他特征"
[message]
min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息
@@ -64,11 +65,16 @@ model_v3_probability = 0.1 # 麦麦回答时选择次要回复模型2 模型的
model_r1_distill_probability = 0.1 # 麦麦回答时选择次要回复模型3 模型的概率
max_response_length = 1024 # 麦麦回答的最大token数
+[willing]
+willing_mode = "classical"
+# willing_mode = "dynamic"
+# willing_mode = "custom"
+
[memory]
-build_memory_interval = 600 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
+build_memory_interval = 2000 # 记忆构建间隔 单位秒 间隔越低,麦麦学习越多,但是冗余信息也会增多
memory_compress_rate = 0.1 # 记忆压缩率 控制记忆精简程度 建议保持默认,调高可以获得更多信息,但是冗余信息也会增多
-forget_memory_interval = 600 # 记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习
+forget_memory_interval = 1000 # 记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习
memory_forget_time = 24 #多长时间后的记忆会被遗忘 单位小时
memory_forget_percentage = 0.01 # 记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认
@@ -116,6 +122,9 @@ talk_allowed = [
talk_frequency_down = [] #降低回复频率的群
ban_user_id = [] #禁止回复消息的QQ号
+[remote] #测试功能,发送统计信息,主要是看全球有多少只麦麦
+enable = false #默认关闭
+
#V3
#name = "deepseek-chat"
@@ -178,8 +187,6 @@ pri_out = 0
name = "Pro/Qwen/Qwen2-VL-7B-Instruct"
provider = "SILICONFLOW"
-
-
#嵌入模型
[model.embedding] #嵌入
diff --git a/webui.py b/webui.py
new file mode 100644
index 000000000..a9041749b
--- /dev/null
+++ b/webui.py
@@ -0,0 +1,1157 @@
+import gradio as gr
+import os
+import sys
+import toml
+from loguru import logger
+import shutil
+import ast
+import json
+
+
+is_share = False
+debug = True
+config_data = toml.load("config/bot_config.toml")
+
+#==============================================
+#env环境配置文件读取部分
+def parse_env_config(config_file):
+ """
+ 解析配置文件并将配置项存储到相应的变量中(变量名以env_为前缀)。
+ """
+ env_variables = {}
+
+ # 读取配置文件
+ with open(config_file, "r", encoding="utf-8") as f:
+ lines = f.readlines()
+
+ # 逐行处理配置
+ for line in lines:
+ line = line.strip()
+ # 忽略空行和注释
+ if not line or line.startswith("#"):
+ continue
+
+ # 拆分键值对
+ key, value = line.split("=", 1)
+
+ # 去掉空格并去除两端引号(如果有的话)
+ key = key.strip()
+ value = value.strip().strip('"').strip("'")
+
+ # 将配置项存入以env_为前缀的变量
+ env_variable = f"env_{key}"
+ env_variables[env_variable] = value
+
+ # 动态创建环境变量
+ os.environ[env_variable] = value
+
+ return env_variables
+
+#env环境配置文件保存函数
+def save_to_env_file(env_variables, filename=".env.prod"):
+ """
+ 将修改后的变量保存到指定的.env文件中,并在第一次保存前备份文件(如果备份文件不存在)。
+ """
+ backup_filename = f"{filename}.bak"
+
+ # 如果备份文件不存在,则备份原文件
+ if not os.path.exists(backup_filename):
+ if os.path.exists(filename):
+ logger.info(f"{filename} 已存在,正在备份到 {backup_filename}...")
+ shutil.copy(filename, backup_filename) # 备份文件
+ logger.success(f"文件已备份到 {backup_filename}")
+ else:
+ logger.warning(f"{filename} 不存在,无法进行备份。")
+
+ # 保存新配置
+ with open(filename, "w",encoding="utf-8") as f:
+ for var, value in env_variables.items():
+ f.write(f"{var[4:]}={value}\n") # 移除env_前缀
+ logger.info(f"配置已保存到 {filename}")
+
+env_config_file = ".env.prod" # 配置文件路径
+env_config_data = parse_env_config(env_config_file)
+#env读取保存结束
+#==============================================
+
+#==============================================
+#env环境文件中插件修改更新函数
+def add_item(new_item, current_list):
+ updated_list = current_list.copy()
+ if new_item.strip():
+ updated_list.append(new_item.strip())
+ return [
+ updated_list, # 更新State
+ "\n".join(updated_list), # 更新TextArea
+ gr.update(choices=updated_list), # 更新Dropdown
+ ", ".join(updated_list) # 更新最终结果
+ ]
+
+def delete_item(selected_item, current_list):
+ updated_list = current_list.copy()
+ if selected_item in updated_list:
+ updated_list.remove(selected_item)
+ return [
+ updated_list,
+ "\n".join(updated_list),
+ gr.update(choices=updated_list),
+ ", ".join(updated_list)
+ ]
+
+def add_int_item(new_item, current_list):
+ updated_list = current_list.copy()
+ stripped_item = new_item.strip()
+ if stripped_item:
+ try:
+ item = int(stripped_item)
+ updated_list.append(item)
+ except ValueError:
+ pass
+ return [
+ updated_list, # 更新State
+ "\n".join(map(str, updated_list)), # 更新TextArea
+ gr.update(choices=updated_list), # 更新Dropdown
+ ", ".join(map(str, updated_list)) # 更新最终结果
+ ]
+
+def delete_int_item(selected_item, current_list):
+ updated_list = current_list.copy()
+ if selected_item in updated_list:
+ updated_list.remove(selected_item)
+ return [
+ updated_list,
+ "\n".join(map(str, updated_list)),
+ gr.update(choices=updated_list),
+ ", ".join(map(str, updated_list))
+ ]
+#env文件中插件值处理函数
+def parse_list_str(input_str):
+ """
+ 将形如["src2.plugins.chat"]的字符串解析为Python列表
+ parse_list_str('["src2.plugins.chat"]')
+ ['src2.plugins.chat']
+ parse_list_str("['plugin1', 'plugin2']")
+ ['plugin1', 'plugin2']
+ """
+ try:
+ return ast.literal_eval(input_str.strip())
+ except (ValueError, SyntaxError):
+ # 处理不符合Python列表格式的字符串
+ cleaned = input_str.strip(" []") # 去除方括号
+ return [item.strip(" '\"") for item in cleaned.split(",") if item.strip()]
+
+def format_list_to_str(lst):
+ """
+ 将Python列表转换为形如["src2.plugins.chat"]的字符串格式
+ format_list_to_str(['src2.plugins.chat'])
+ '["src2.plugins.chat"]'
+ format_list_to_str([1, "two", 3.0])
+ '[1, "two", 3.0]'
+ """
+ resarr = lst.split(", ")
+ res = ""
+ for items in resarr:
+ temp = '"' + str(items) + '"'
+ res += temp + ","
+
+ res = res[:-1]
+ return "[" + res + "]"
+
+def format_list_to_str_alias(lst):
+ """
+ 将Python列表转换为形如["src2.plugins.chat"]的字符串格式
+ format_list_to_str(['src2.plugins.chat'])
+ '["src2.plugins.chat"]'
+ format_list_to_str([1, "two", 3.0])
+ '[1, "two", 3.0]'
+ """
+ resarr = []
+ if len(lst) != 0:
+ resarr = lst.split(", ")
+
+ return resarr
+
+def format_list_to_int(lst):
+ resarr = []
+ if len(lst) != 0:
+ resarr = lst.split(", ")
+ # print(resarr)
+ # print(type(resarr))
+ ans = []
+ if len(resarr) != 0:
+ for lsts in resarr:
+ temp = int(lsts)
+ ans.append(temp)
+ # print(ans)
+ # print(type(ans))
+ return ans
+
+#env保存函数
+def save_trigger(server_address, server_port, final_result_list,t_mongodb_host,t_mongodb_port,t_mongodb_database_name,t_chatanywhere_base_url,t_chatanywhere_key,t_siliconflow_base_url,t_siliconflow_key,t_deepseek_base_url,t_deepseek_key):
+ final_result_lists = format_list_to_str(final_result_list)
+ env_config_data["env_HOST"] = server_address
+ env_config_data["env_PORT"] = server_port
+ env_config_data["env_PLUGINS"] = final_result_lists
+ env_config_data["env_MONGODB_HOST"] = t_mongodb_host
+ env_config_data["env_MONGODB_PORT"] = t_mongodb_port
+ env_config_data["env_DATABASE_NAME"] = t_mongodb_database_name
+ env_config_data["env_CHAT_ANY_WHERE_BASE_URL"] = t_chatanywhere_base_url
+ env_config_data["env_CHAT_ANY_WHERE_KEY"] = t_chatanywhere_key
+ env_config_data["env_SILICONFLOW_BASE_URL"] = t_siliconflow_base_url
+ env_config_data["env_SILICONFLOW_KEY"] = t_siliconflow_key
+ env_config_data["env_DEEP_SEEK_BASE_URL"] = t_deepseek_base_url
+ env_config_data["env_DEEP_SEEK_KEY"] = t_deepseek_key
+ save_to_env_file(env_config_data)
+ logger.success("配置已保存到 .env.prod 文件中")
+ return "配置已保存"
+
+#==============================================
+
+
+#==============================================
+#主要配置文件保存函数
+def save_config_to_file(t_config_data):
+ filename = "config/bot_config.toml"
+ backup_filename = f"{filename}.bak"
+ if not os.path.exists(backup_filename):
+ if os.path.exists(filename):
+ logger.info(f"{filename} 已存在,正在备份到 {backup_filename}...")
+ shutil.copy(filename, backup_filename) # 备份文件
+ logger.success(f"文件已备份到 {backup_filename}")
+ else:
+ logger.warning(f"{filename} 不存在,无法进行备份。")
+
+
+ with open(filename, "w", encoding="utf-8") as f:
+ toml.dump(t_config_data, f)
+ logger.success("配置已保存到 bot_config.toml 文件中")
+def save_bot_config(t_qqbot_qq, t_nickname,t_nickname_final_result):
+ config_data["bot"]["qq"] = int(t_qqbot_qq)
+ config_data["bot"]["nickname"] = t_nickname
+ config_data["bot"]["alias_names"] = format_list_to_str_alias(t_nickname_final_result)
+ save_config_to_file(config_data)
+ logger.info("Bot配置已保存")
+ return "Bot配置已保存"
+
+# 监听滑块的值变化,确保总和不超过 1,并显示警告
+def adjust_greater_probabilities(t_personality_1, t_personality_2, t_personality_3):
+ total = t_personality_1 + t_personality_2 + t_personality_3
+ if total > 1.0:
+ warning_message = f"警告: 人格1、人格2和人格3的概率总和为 {total:.2f},超过了 1.0!请调整滑块使总和等于 1.0。"
+ return warning_message
+ else:
+ return "" # 没有警告时返回空字符串
+
+def adjust_less_probabilities(t_personality_1, t_personality_2, t_personality_3):
+ total = t_personality_1 + t_personality_2 + t_personality_3
+ if total < 1.0:
+ warning_message = f"警告: 人格1、人格2和人格3的概率总和为 {total:.2f},小于 1.0!请调整滑块使总和等于 1.0。"
+ return warning_message
+ else:
+ return "" # 没有警告时返回空字符串
+
+def adjust_model_greater_probabilities(t_personality_1, t_personality_2, t_personality_3):
+ total = t_personality_1 + t_personality_2 + t_personality_3
+ if total > 1.0:
+ warning_message = f"警告: 选择模型1、模型2和模型3的概率总和为 {total:.2f},超过了 1.0!请调整滑块使总和等于 1.0。"
+ return warning_message
+ else:
+ return "" # 没有警告时返回空字符串
+
+def adjust_model_less_probabilities(t_personality_1, t_personality_2, t_personality_3):
+ total = t_personality_1 + t_personality_2 + t_personality_3
+ if total > 1.0:
+ warning_message = f"警告: 选择模型1、模型2和模型3的概率总和为 {total:.2f},小于了 1.0!请调整滑块使总和等于 1.0。"
+ return warning_message
+ else:
+ return "" # 没有警告时返回空字符串
+
+#==============================================
+#人格保存函数
+def save_personality_config(t_personality_1, t_personality_2, t_personality_3, t_prompt_schedule):
+ config_data["personality"]["personality_1_probability"] = t_personality_1
+ config_data["personality"]["personality_2_probability"] = t_personality_2
+ config_data["personality"]["personality_3_probability"] = t_personality_3
+ config_data["personality"]["prompt_schedule"] = t_prompt_schedule
+ save_config_to_file(config_data)
+ logger.info("人格配置已保存到 bot_config.toml 文件中")
+ return "人格配置已保存"
+
+def save_message_and_emoji_config(t_min_text_length,
+ t_max_context_size,
+ t_emoji_chance,
+ t_thinking_timeout,
+ t_response_willing_amplifier,
+ t_response_interested_rate_amplifier,
+ t_down_frequency_rate,
+ t_ban_words_final_result,
+ t_ban_msgs_regex_final_result,
+ t_check_interval,
+ t_register_interval,
+ t_auto_save,
+ t_enable_check,
+ t_check_prompt):
+ config_data["message"]["min_text_length"] = t_min_text_length
+ config_data["message"]["max_context_size"] = t_max_context_size
+ config_data["message"]["emoji_chance"] = t_emoji_chance
+ config_data["message"]["thinking_timeout"] = t_thinking_timeout
+ config_data["message"]["response_willing_amplifier"] = t_response_willing_amplifier
+ config_data["message"]["response_interested_rate_amplifier"] = t_response_interested_rate_amplifier
+ config_data["message"]["down_frequency_rate"] = t_down_frequency_rate
+ config_data["message"]["ban_words"] = format_list_to_str_alias(t_ban_words_final_result)
+ config_data["message"]["ban_msgs_regex"] = format_list_to_str_alias(t_ban_msgs_regex_final_result)
+ config_data["emoji"]["check_interval"] = t_check_interval
+ config_data["emoji"]["register_interval"] = t_register_interval
+ config_data["emoji"]["auto_save"] = t_auto_save
+ config_data["emoji"]["enable_check"] = t_enable_check
+ config_data["emoji"]["check_prompt"] = t_check_prompt
+ save_config_to_file(config_data)
+ logger.info("消息和表情配置已保存到 bot_config.toml 文件中")
+ return "消息和表情配置已保存"
+
+def save_response_model_config(t_model_r1_probability,
+ t_model_r2_probability,
+ t_model_r3_probability,
+ t_max_response_length,
+ t_model1_name,
+ t_model1_provider,
+ t_model1_pri_in,
+ t_model1_pri_out,
+ t_model2_name,
+ t_model2_provider,
+ t_model3_name,
+ t_model3_provider,
+ t_emotion_model_name,
+ t_emotion_model_provider,
+ t_topic_judge_model_name,
+ t_topic_judge_model_provider,
+ t_summary_by_topic_model_name,
+ t_summary_by_topic_model_provider,
+ t_vlm_model_name,
+ t_vlm_model_provider):
+ config_data["response"]["model_r1_probability"] = t_model_r1_probability
+ config_data["response"]["model_v3_probability"] = t_model_r2_probability
+ config_data["response"]["model_r1_distill_probability"] = t_model_r3_probability
+ config_data["response"]["max_response_length"] = t_max_response_length
+ config_data['model']['llm_reasoning']['name'] = t_model1_name
+ config_data['model']['llm_reasoning']['provider'] = t_model1_provider
+ config_data['model']['llm_reasoning']['pri_in'] = t_model1_pri_in
+ config_data['model']['llm_reasoning']['pri_out'] = t_model1_pri_out
+ config_data['model']['llm_normal']['name'] = t_model2_name
+ config_data['model']['llm_normal']['provider'] = t_model2_provider
+ config_data['model']['llm_reasoning_minor']['name'] = t_model3_name
+ config_data['model']['llm_normal']['provider'] = t_model3_provider
+ config_data['model']['llm_emotion_judge']['name'] = t_emotion_model_name
+ config_data['model']['llm_emotion_judge']['provider'] = t_emotion_model_provider
+ config_data['model']['llm_topic_judge']['name'] = t_topic_judge_model_name
+ config_data['model']['llm_topic_judge']['provider'] = t_topic_judge_model_provider
+ config_data['model']['llm_summary_by_topic']['name'] = t_summary_by_topic_model_name
+ config_data['model']['llm_summary_by_topic']['provider'] = t_summary_by_topic_model_provider
+ config_data['model']['vlm']['name'] = t_vlm_model_name
+ config_data['model']['vlm']['provider'] = t_vlm_model_provider
+ save_config_to_file(config_data)
+ logger.info("回复&模型设置已保存到 bot_config.toml 文件中")
+ return "回复&模型设置已保存"
+def save_memory_mood_config(t_build_memory_interval, t_memory_compress_rate, t_forget_memory_interval, t_memory_forget_time, t_memory_forget_percentage, t_memory_ban_words_final_result, t_mood_update_interval, t_mood_decay_rate, t_mood_intensity_factor):
+ config_data["memory"]["build_memory_interval"] = t_build_memory_interval
+ config_data["memory"]["memory_compress_rate"] = t_memory_compress_rate
+ config_data["memory"]["forget_memory_interval"] = t_forget_memory_interval
+ config_data["memory"]["memory_forget_time"] = t_memory_forget_time
+ config_data["memory"]["memory_forget_percentage"] = t_memory_forget_percentage
+ config_data["memory"]["memory_ban_words"] = format_list_to_str_alias(t_memory_ban_words_final_result)
+ config_data["mood"]["update_interval"] = t_mood_update_interval
+ config_data["mood"]["decay_rate"] = t_mood_decay_rate
+ config_data["mood"]["intensity_factor"] = t_mood_intensity_factor
+ save_config_to_file(config_data)
+ logger.info("记忆和心情设置已保存到 bot_config.toml 文件中")
+ return "记忆和心情设置已保存"
+
+def save_other_config(t_keywords_reaction_enabled,t_enable_advance_output, t_enable_kuuki_read, t_enable_debug_output, t_enable_friend_chat, t_chinese_typo_enabled, t_error_rate, t_min_freq, t_tone_error_rate, t_word_replace_rate):
+ config_data['keywords_reaction']['enable'] = t_keywords_reaction_enabled
+ config_data['others']['enable_advance_output'] = t_enable_advance_output
+ config_data['others']['enable_kuuki_read'] = t_enable_kuuki_read
+ config_data['others']['enable_debug_output'] = t_enable_debug_output
+ config_data["chinese_typo"]["enable"] = t_chinese_typo_enabled
+ config_data["chinese_typo"]["error_rate"] = t_error_rate
+ config_data["chinese_typo"]["min_freq"] = t_min_freq
+ config_data["chinese_typo"]["tone_error_rate"] = t_tone_error_rate
+ config_data["chinese_typo"]["word_replace_rate"] = t_word_replace_rate
+ save_config_to_file(config_data)
+ logger.info("其他设置已保存到 bot_config.toml 文件中")
+ return "其他设置已保存"
+
+def save_group_config(t_talk_allowed_final_result,
+ t_talk_frequency_down_final_result,
+ t_ban_user_id_final_result,):
+ config_data["groups"]["talk_allowed"] = format_list_to_int(t_talk_allowed_final_result)
+ config_data["groups"]["talk_frequency_down"] = format_list_to_int(t_talk_frequency_down_final_result)
+ config_data["groups"]["ban_user_id"] = format_list_to_int(t_ban_user_id_final_result)
+ save_config_to_file(config_data)
+ logger.info("群聊设置已保存到 bot_config.toml 文件中")
+ return "群聊设置已保存"
+
+with gr.Blocks(title="MaimBot配置文件编辑") as app:
+ gr.Markdown(
+ value="""
+ 欢迎使用由墨梓柒MotricSeven编写的MaimBot配置文件编辑器\n
+ """
+ )
+ gr.Markdown(
+ value="配置文件版本:" + config_data["inner"]["version"]
+ )
+ with gr.Tabs():
+ with gr.TabItem("0-环境设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ gr.Markdown(
+ value="""
+ MaimBot服务器地址,默认127.0.0.1\n
+ 不熟悉配置的不要轻易改动此项!!\n
+ """
+ )
+ with gr.Row():
+ server_address = gr.Textbox(
+ label="服务器地址",
+ value=env_config_data["env_HOST"],
+ interactive=True
+ )
+ with gr.Row():
+ server_port = gr.Textbox(
+ label="服务器端口",
+ value=env_config_data["env_PORT"],
+ interactive=True
+ )
+ with gr.Row():
+ plugin_list = parse_list_str(env_config_data['env_PLUGINS'])
+ with gr.Blocks():
+ list_state = gr.State(value=plugin_list.copy())
+
+ with gr.Row():
+ list_display = gr.TextArea(
+ value="\n".join(plugin_list),
+ label="插件列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ new_item_input = gr.Textbox(label="添加新插件")
+ add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ item_to_delete = gr.Dropdown(
+ choices=plugin_list,
+ label="选择要删除的插件"
+ )
+ delete_btn = gr.Button("删除", scale=1)
+
+ final_result = gr.Text(label="修改后的列表")
+ add_btn.click(
+ add_item,
+ inputs=[new_item_input, list_state],
+ outputs=[list_state, list_display, item_to_delete, final_result]
+ )
+
+ delete_btn.click(
+ delete_item,
+ inputs=[item_to_delete, list_state],
+ outputs=[list_state, list_display, item_to_delete, final_result]
+ )
+ with gr.Row():
+ gr.Markdown(
+ '''MongoDB设置项\n
+ 保持默认即可,如果你有能力承担修改过后的后果(简称能改回来(笑))\n
+ 可以对以下配置项进行修改\n
+ '''
+ )
+ with gr.Row():
+ mongodb_host = gr.Textbox(
+ label="MongoDB服务器地址",
+ value=env_config_data["env_MONGODB_HOST"],
+ interactive=True
+ )
+ with gr.Row():
+ mongodb_port = gr.Textbox(
+ label="MongoDB服务器端口",
+ value=env_config_data["env_MONGODB_PORT"],
+ interactive=True
+ )
+ with gr.Row():
+ mongodb_database_name = gr.Textbox(
+ label="MongoDB数据库名称",
+ value=env_config_data["env_DATABASE_NAME"],
+ interactive=True
+ )
+ with gr.Row():
+ gr.Markdown(
+ '''ChatAntWhere的baseURL和APIkey\n
+ 改完了记得保存!!!
+ '''
+ )
+ with gr.Row():
+ chatanywhere_base_url = gr.Textbox(
+ label="ChatAntWhere的BaseURL",
+ value=env_config_data["env_CHAT_ANY_WHERE_BASE_URL"],
+ interactive=True
+ )
+ with gr.Row():
+ chatanywhere_key = gr.Textbox(
+ label="ChatAntWhere的key",
+ value=env_config_data["env_CHAT_ANY_WHERE_KEY"],
+ interactive=True
+ )
+ with gr.Row():
+ gr.Markdown(
+ '''SiliconFlow的baseURL和APIkey\n
+ 改完了记得保存!!!
+ '''
+ )
+ with gr.Row():
+ siliconflow_base_url = gr.Textbox(
+ label="SiliconFlow的BaseURL",
+ value=env_config_data["env_SILICONFLOW_BASE_URL"],
+ interactive=True
+ )
+ with gr.Row():
+ siliconflow_key = gr.Textbox(
+ label="SiliconFlow的key",
+ value=env_config_data["env_SILICONFLOW_KEY"],
+ interactive=True
+ )
+ with gr.Row():
+ gr.Markdown(
+ '''DeepSeek的baseURL和APIkey\n
+ 改完了记得保存!!!
+ '''
+ )
+ with gr.Row():
+ deepseek_base_url = gr.Textbox(
+ label="DeepSeek的BaseURL",
+ value=env_config_data["env_DEEP_SEEK_BASE_URL"],
+ interactive=True
+ )
+ with gr.Row():
+ deepseek_key = gr.Textbox(
+ label="DeepSeek的key",
+ value=env_config_data["env_DEEP_SEEK_KEY"],
+ interactive=True
+ )
+ with gr.Row():
+ save_env_btn = gr.Button("保存环境配置")
+ with gr.Row():
+ save_env_btn.click(
+ save_trigger,
+ inputs=[server_address,server_port,final_result,mongodb_host,mongodb_port,mongodb_database_name,chatanywhere_base_url,chatanywhere_key,siliconflow_base_url,siliconflow_key,deepseek_base_url,deepseek_key],
+ outputs=[gr.Textbox(
+ label="保存结果",
+ interactive=False
+ )]
+ )
+ with gr.TabItem("1-Bot基础设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ qqbot_qq = gr.Textbox(
+ label="QQ机器人QQ号",
+ value=config_data["bot"]["qq"],
+ interactive=True
+ )
+ with gr.Row():
+ nickname = gr.Textbox(
+ label="昵称",
+ value=config_data["bot"]["nickname"],
+ interactive=True
+ )
+ with gr.Row():
+ nickname_list = config_data['bot']['alias_names']
+ with gr.Blocks():
+ nickname_list_state = gr.State(value=nickname_list.copy())
+
+ with gr.Row():
+ nickname_list_display = gr.TextArea(
+ value="\n".join(nickname_list),
+ label="别名列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ nickname_new_item_input = gr.Textbox(label="添加新别名")
+ nickname_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ nickname_item_to_delete = gr.Dropdown(
+ choices=nickname_list,
+ label="选择要删除的别名"
+ )
+ nickname_delete_btn = gr.Button("删除", scale=1)
+
+ nickname_final_result = gr.Text(label="修改后的列表")
+ nickname_add_btn.click(
+ add_item,
+ inputs=[nickname_new_item_input, nickname_list_state],
+ outputs=[nickname_list_state, nickname_list_display, nickname_item_to_delete, nickname_final_result]
+ )
+
+ nickname_delete_btn.click(
+ delete_item,
+ inputs=[nickname_item_to_delete, nickname_list_state],
+ outputs=[nickname_list_state, nickname_list_display, nickname_item_to_delete, nickname_final_result]
+ )
+ gr.Button(
+ "保存Bot配置",
+ variant="primary",
+ elem_id="save_bot_btn",
+ elem_classes="save_bot_btn"
+ ).click(
+ save_bot_config,
+ inputs=[qqbot_qq, nickname,nickname_final_result],
+ outputs=[gr.Textbox(
+ label="保存Bot结果"
+ )]
+ )
+ with gr.TabItem("2-人格设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ prompt_personality_1 = gr.Textbox(
+ label="人格1提示词",
+ value=config_data['personality']['prompt_personality'][0],
+ interactive=True
+ )
+ with gr.Row():
+ prompt_personality_2 = gr.Textbox(
+ label="人格2提示词",
+ value=config_data['personality']['prompt_personality'][1],
+ interactive=True
+ )
+ with gr.Row():
+ prompt_personality_3 = gr.Textbox(
+ label="人格3提示词",
+ value=config_data['personality']['prompt_personality'][2],
+ interactive=True
+ )
+ with gr.Column(scale=3):
+ # 创建三个滑块
+ personality_1 = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data["personality"]["personality_1_probability"], label="人格1概率")
+ personality_2 = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data["personality"]["personality_2_probability"], label="人格2概率")
+ personality_3 = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data["personality"]["personality_3_probability"], label="人格3概率")
+
+ # 用于显示警告消息
+ warning_greater_text = gr.Markdown()
+ warning_less_text = gr.Markdown()
+
+ # 绑定滑块的值变化事件,确保总和必须等于 1.0
+ personality_1.change(adjust_greater_probabilities, inputs=[personality_1, personality_2, personality_3], outputs=[warning_greater_text])
+ personality_2.change(adjust_greater_probabilities, inputs=[personality_1, personality_2, personality_3], outputs=[warning_greater_text])
+ personality_3.change(adjust_greater_probabilities, inputs=[personality_1, personality_2, personality_3], outputs=[warning_greater_text])
+ personality_1.change(adjust_less_probabilities, inputs=[personality_1, personality_2, personality_3], outputs=[warning_less_text])
+ personality_2.change(adjust_less_probabilities, inputs=[personality_1, personality_2, personality_3], outputs=[warning_less_text])
+ personality_3.change(adjust_less_probabilities, inputs=[personality_1, personality_2, personality_3], outputs=[warning_less_text])
+ with gr.Row():
+ prompt_schedule = gr.Textbox(
+ label="日程生成提示词",
+ value=config_data["personality"]["prompt_schedule"],
+ interactive=True
+ )
+ with gr.Row():
+ gr.Button(
+ "保存人格配置",
+ variant="primary",
+ elem_id="save_personality_btn",
+ elem_classes="save_personality_btn"
+ ).click(
+ save_personality_config,
+ inputs=[personality_1, personality_2, personality_3, prompt_schedule],
+ outputs=[gr.Textbox(
+ label="保存人格结果"
+ )]
+ )
+ with gr.TabItem("3-消息&表情包设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ min_text_length = gr.Number(value=config_data['message']['min_text_length'], label="与麦麦聊天时麦麦只会回答文本大于等于此数的消息")
+ with gr.Row():
+ max_context_size = gr.Number(value=config_data['message']['max_context_size'], label="麦麦获得的上文数量")
+ with gr.Row():
+ emoji_chance = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['message']['emoji_chance'], label="麦麦使用表情包的概率")
+ with gr.Row():
+ thinking_timeout = gr.Number(value=config_data['message']['thinking_timeout'], label="麦麦正在思考时,如果超过此秒数,则停止思考")
+ with gr.Row():
+ response_willing_amplifier = gr.Number(value=config_data['message']['response_willing_amplifier'], label="麦麦回复意愿放大系数,一般为1")
+ with gr.Row():
+ response_interested_rate_amplifier = gr.Number(value=config_data['message']['response_interested_rate_amplifier'], label="麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数")
+ with gr.Row():
+ down_frequency_rate = gr.Number(value=config_data['message']['down_frequency_rate'], label="降低回复频率的群组回复意愿降低系数")
+ with gr.Row():
+ gr.Markdown("### 违禁词列表")
+ with gr.Row():
+ ban_words_list = config_data['message']['ban_words']
+ with gr.Blocks():
+ ban_words_list_state = gr.State(value=ban_words_list.copy())
+ with gr.Row():
+ ban_words_list_display = gr.TextArea(
+ value="\n".join(ban_words_list),
+ label="违禁词列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ ban_words_new_item_input = gr.Textbox(label="添加新违禁词")
+ ban_words_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ ban_words_item_to_delete = gr.Dropdown(
+ choices=ban_words_list,
+ label="选择要删除的违禁词"
+ )
+ ban_words_delete_btn = gr.Button("删除", scale=1)
+
+ ban_words_final_result = gr.Text(label="修改后的违禁词")
+ ban_words_add_btn.click(
+ add_item,
+ inputs=[ban_words_new_item_input, ban_words_list_state],
+ outputs=[ban_words_list_state, ban_words_list_display, ban_words_item_to_delete, ban_words_final_result]
+ )
+
+ ban_words_delete_btn.click(
+ delete_item,
+ inputs=[ban_words_item_to_delete, ban_words_list_state],
+ outputs=[ban_words_list_state, ban_words_list_display, ban_words_item_to_delete, ban_words_final_result]
+ )
+ with gr.Row():
+ gr.Markdown("### 检测违禁消息正则表达式列表")
+ with gr.Row():
+ gr.Markdown(
+ """
+ 需要过滤的消息(原始消息)匹配的正则表达式,匹配到的消息将被过滤(支持CQ码),若不了解正则表达式请勿修改\n
+ "https?://[^\\s]+", # 匹配https链接\n
+ "\\d{4}-\\d{2}-\\d{2}", # 匹配日期\n
+ "\\[CQ:at,qq=\\d+\\]" # 匹配@\n
+ """
+ )
+ with gr.Row():
+ ban_msgs_regex_list = config_data['message']['ban_msgs_regex']
+ with gr.Blocks():
+ ban_msgs_regex_list_state = gr.State(value=ban_msgs_regex_list.copy())
+ with gr.Row():
+ ban_msgs_regex_list_display = gr.TextArea(
+ value="\n".join(ban_msgs_regex_list),
+ label="违禁消息正则列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ ban_msgs_regex_new_item_input = gr.Textbox(label="添加新违禁消息正则")
+ ban_msgs_regex_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ ban_msgs_regex_item_to_delete = gr.Dropdown(
+ choices=ban_msgs_regex_list,
+ label="选择要删除的违禁消息正则"
+ )
+ ban_msgs_regex_delete_btn = gr.Button("删除", scale=1)
+
+ ban_msgs_regex_final_result = gr.Text(label="修改后的违禁消息正则")
+ ban_msgs_regex_add_btn.click(
+ add_item,
+ inputs=[ban_msgs_regex_new_item_input, ban_msgs_regex_list_state],
+ outputs=[ban_msgs_regex_list_state, ban_msgs_regex_list_display, ban_msgs_regex_item_to_delete, ban_msgs_regex_final_result]
+ )
+
+ ban_msgs_regex_delete_btn.click(
+ delete_item,
+ inputs=[ban_msgs_regex_item_to_delete, ban_msgs_regex_list_state],
+ outputs=[ban_msgs_regex_list_state, ban_msgs_regex_list_display, ban_msgs_regex_item_to_delete, ban_msgs_regex_final_result]
+ )
+ with gr.Row():
+ check_interval = gr.Number(value=config_data['emoji']['check_interval'], label="检查表情包的时间间隔")
+ with gr.Row():
+ register_interval = gr.Number(value=config_data['emoji']['register_interval'], label="注册表情包的时间间隔")
+ with gr.Row():
+ auto_save = gr.Checkbox(value=config_data['emoji']['auto_save'], label="自动保存表情包")
+ with gr.Row():
+ enable_check = gr.Checkbox(value=config_data['emoji']['enable_check'], label="启用表情包检查")
+ with gr.Row():
+ check_prompt = gr.Textbox(value=config_data['emoji']['check_prompt'], label="表情包过滤要求")
+ with gr.Row():
+ gr.Button(
+ "保存消息&表情包设置",
+ variant="primary",
+ elem_id="save_personality_btn",
+ elem_classes="save_personality_btn"
+ ).click(
+ save_message_and_emoji_config,
+ inputs=[
+ min_text_length,
+ max_context_size,
+ emoji_chance,
+ thinking_timeout,
+ response_willing_amplifier,
+ response_interested_rate_amplifier,
+ down_frequency_rate,
+ ban_words_final_result,
+ ban_msgs_regex_final_result,
+ check_interval,
+ register_interval,
+ auto_save,
+ enable_check,
+ check_prompt
+ ],
+ outputs=[gr.Textbox(
+ label="消息&表情包设置保存结果"
+ )]
+ )
+ with gr.TabItem("4-回复&模型设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ gr.Markdown(
+ """### 回复设置"""
+ )
+ with gr.Row():
+ model_r1_probability = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['response']['model_r1_probability'], label="麦麦回答时选择主要回复模型1 模型的概率")
+ with gr.Row():
+ model_r2_probability = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['response']['model_v3_probability'], label="麦麦回答时选择主要回复模型2 模型的概率")
+ with gr.Row():
+ model_r3_probability = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['response']['model_r1_distill_probability'], label="麦麦回答时选择主要回复模型3 模型的概率")
+ # 用于显示警告消息
+ with gr.Row():
+ model_warning_greater_text = gr.Markdown()
+ model_warning_less_text = gr.Markdown()
+
+ # 绑定滑块的值变化事件,确保总和必须等于 1.0
+ model_r1_probability.change(adjust_model_greater_probabilities, inputs=[model_r1_probability, model_r2_probability, model_r3_probability], outputs=[model_warning_greater_text])
+ model_r2_probability.change(adjust_model_greater_probabilities, inputs=[model_r1_probability, model_r2_probability, model_r3_probability], outputs=[model_warning_greater_text])
+ model_r3_probability.change(adjust_model_greater_probabilities, inputs=[model_r1_probability, model_r2_probability, model_r3_probability], outputs=[model_warning_greater_text])
+ model_r1_probability.change(adjust_model_less_probabilities, inputs=[model_r1_probability, model_r2_probability, model_r3_probability], outputs=[model_warning_less_text])
+ model_r2_probability.change(adjust_model_less_probabilities, inputs=[model_r1_probability, model_r2_probability, model_r3_probability], outputs=[model_warning_less_text])
+ model_r3_probability.change(adjust_model_less_probabilities, inputs=[model_r1_probability, model_r2_probability, model_r3_probability], outputs=[model_warning_less_text])
+ with gr.Row():
+ max_response_length = gr.Number(value=config_data['response']['max_response_length'], label="麦麦回答的最大token数")
+ with gr.Row():
+ gr.Markdown(
+ """### 模型设置"""
+ )
+ with gr.Tabs():
+ with gr.TabItem("1-主要回复模型"):
+ with gr.Row():
+ model1_name = gr.Textbox(value=config_data['model']['llm_reasoning']['name'], label="模型1的名称")
+ with gr.Row():
+ model1_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['llm_reasoning']['provider'], label="模型1(主要回复模型)提供商")
+ with gr.Row():
+ model1_pri_in = gr.Number(value=config_data['model']['llm_reasoning']['pri_in'], label="模型1(主要回复模型)的输入价格(非必填,可以记录消耗)")
+ with gr.Row():
+ model1_pri_out = gr.Number(value=config_data['model']['llm_reasoning']['pri_out'], label="模型1(主要回复模型)的输出价格(非必填,可以记录消耗)")
+ with gr.TabItem("2-次要回复模型"):
+ with gr.Row():
+ model2_name = gr.Textbox(value=config_data['model']['llm_normal']['name'], label="模型2的名称")
+ with gr.Row():
+ model2_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['llm_normal']['provider'], label="模型2提供商")
+ with gr.TabItem("3-次要模型"):
+ with gr.Row():
+ model3_name = gr.Textbox(value=config_data['model']['llm_reasoning_minor']['name'], label="模型3的名称")
+ with gr.Row():
+ model3_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['llm_reasoning_minor']['provider'], label="模型3提供商")
+ with gr.TabItem("4-情感&主题模型"):
+ with gr.Row():
+ gr.Markdown(
+ """### 情感模型设置"""
+ )
+ with gr.Row():
+ emotion_model_name = gr.Textbox(value=config_data['model']['llm_emotion_judge']['name'], label="情感模型名称")
+ with gr.Row():
+ emotion_model_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['llm_emotion_judge']['provider'], label="情感模型提供商")
+ with gr.Row():
+ gr.Markdown(
+ """### 主题模型设置"""
+ )
+ with gr.Row():
+ topic_judge_model_name = gr.Textbox(value=config_data['model']['llm_topic_judge']['name'], label="主题判断模型名称")
+ with gr.Row():
+ topic_judge_model_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['llm_topic_judge']['provider'], label="主题判断模型提供商")
+ with gr.Row():
+ summary_by_topic_model_name = gr.Textbox(value=config_data['model']['llm_summary_by_topic']['name'], label="主题总结模型名称")
+ with gr.Row():
+ summary_by_topic_model_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['llm_summary_by_topic']['provider'], label="主题总结模型提供商")
+ with gr.TabItem("5-识图模型"):
+ with gr.Row():
+ gr.Markdown(
+ """### 识图模型设置"""
+ )
+ with gr.Row():
+ vlm_model_name = gr.Textbox(value=config_data['model']['vlm']['name'], label="识图模型名称")
+ with gr.Row():
+ vlm_model_provider = gr.Dropdown(choices=["SILICONFLOW","DEEP_SEEK", "CHAT_ANY_WHERE"], value=config_data['model']['vlm']['provider'], label="识图模型提供商")
+ with gr.Row():
+ save_model_btn = gr.Button("保存回复&模型设置")
+ with gr.Row():
+ save_btn_message = gr.Textbox()
+ save_model_btn.click(
+ save_response_model_config,
+ inputs=[model_r1_probability,model_r2_probability,model_r3_probability,max_response_length,model1_name, model1_provider, model1_pri_in, model1_pri_out, model2_name, model2_provider, model3_name, model3_provider, emotion_model_name, emotion_model_provider, topic_judge_model_name, topic_judge_model_provider, summary_by_topic_model_name,summary_by_topic_model_provider,vlm_model_name, vlm_model_provider],
+ outputs=[save_btn_message]
+ )
+ with gr.TabItem("5-记忆&心情设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ gr.Markdown(
+ """### 记忆设置"""
+ )
+ with gr.Row():
+ build_memory_interval = gr.Number(value=config_data['memory']['build_memory_interval'], label="记忆构建间隔 单位秒,间隔越低,麦麦学习越多,但是冗余信息也会增多")
+ with gr.Row():
+ memory_compress_rate = gr.Number(value=config_data['memory']['memory_compress_rate'], label="记忆压缩率 控制记忆精简程度 建议保持默认,调高可以获得更多信息,但是冗余信息也会增多")
+ with gr.Row():
+ forget_memory_interval = gr.Number(value=config_data['memory']['forget_memory_interval'], label="记忆遗忘间隔 单位秒 间隔越低,麦麦遗忘越频繁,记忆更精简,但更难学习")
+ with gr.Row():
+ memory_forget_time = gr.Number(value=config_data['memory']['memory_forget_time'], label="多长时间后的记忆会被遗忘 单位小时 ")
+ with gr.Row():
+ memory_forget_percentage = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['memory']['memory_forget_percentage'], label="记忆遗忘比例 控制记忆遗忘程度 越大遗忘越多 建议保持默认")
+ with gr.Row():
+ memory_ban_words_list = config_data['memory']['memory_ban_words']
+ with gr.Blocks():
+ memory_ban_words_list_state = gr.State(value=memory_ban_words_list.copy())
+
+ with gr.Row():
+ memory_ban_words_list_display = gr.TextArea(
+ value="\n".join(memory_ban_words_list),
+ label="不希望记忆词列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ memory_ban_words_new_item_input = gr.Textbox(label="添加不希望记忆词")
+ memory_ban_words_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ memory_ban_words_item_to_delete = gr.Dropdown(
+ choices=memory_ban_words_list,
+ label="选择要删除的不希望记忆词"
+ )
+ memory_ban_words_delete_btn = gr.Button("删除", scale=1)
+
+ memory_ban_words_final_result = gr.Text(label="修改后的不希望记忆词列表")
+ memory_ban_words_add_btn.click(
+ add_item,
+ inputs=[memory_ban_words_new_item_input, memory_ban_words_list_state],
+ outputs=[memory_ban_words_list_state, memory_ban_words_list_display, memory_ban_words_item_to_delete, memory_ban_words_final_result]
+ )
+
+ memory_ban_words_delete_btn.click(
+ delete_item,
+ inputs=[memory_ban_words_item_to_delete, memory_ban_words_list_state],
+ outputs=[memory_ban_words_list_state, memory_ban_words_list_display, memory_ban_words_item_to_delete, memory_ban_words_final_result]
+ )
+ with gr.Row():
+ mood_update_interval = gr.Number(value=config_data['mood']['mood_update_interval'], label="心情更新间隔 单位秒")
+ with gr.Row():
+ mood_decay_rate = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['mood']['mood_decay_rate'], label="心情衰减率")
+ with gr.Row():
+ mood_intensity_factor = gr.Number(value=config_data['mood']['mood_intensity_factor'], label="心情强度因子")
+ with gr.Row():
+ save_memory_mood_btn = gr.Button("保存 [Memory] 配置")
+ with gr.Row():
+ save_memory_mood_message = gr.Textbox()
+ with gr.Row():
+ save_memory_mood_btn.click(
+ save_memory_mood_config,
+ inputs=[build_memory_interval, memory_compress_rate, forget_memory_interval, memory_forget_time, memory_forget_percentage, memory_ban_words_final_result, mood_update_interval, mood_decay_rate, mood_intensity_factor],
+ outputs=[save_memory_mood_message]
+ )
+ with gr.TabItem("6-群组设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ gr.Markdown(
+ """## 群组设置"""
+ )
+ with gr.Row():
+ gr.Markdown(
+ """### 可以回复消息的群"""
+ )
+ with gr.Row():
+ talk_allowed_list = config_data['groups']['talk_allowed']
+ with gr.Blocks():
+ talk_allowed_list_state = gr.State(value=talk_allowed_list.copy())
+
+ with gr.Row():
+ talk_allowed_list_display = gr.TextArea(
+ value="\n".join(map(str, talk_allowed_list)),
+ label="可以回复消息的群列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ talk_allowed_new_item_input = gr.Textbox(label="添加新群")
+ talk_allowed_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ talk_allowed_item_to_delete = gr.Dropdown(
+ choices=talk_allowed_list,
+ label="选择要删除的群"
+ )
+ talk_allowed_delete_btn = gr.Button("删除", scale=1)
+
+ talk_allowed_final_result = gr.Text(label="修改后的可以回复消息的群列表")
+ talk_allowed_add_btn.click(
+ add_int_item,
+ inputs=[talk_allowed_new_item_input, talk_allowed_list_state],
+ outputs=[talk_allowed_list_state, talk_allowed_list_display, talk_allowed_item_to_delete, talk_allowed_final_result]
+ )
+
+ talk_allowed_delete_btn.click(
+ delete_int_item,
+ inputs=[talk_allowed_item_to_delete, talk_allowed_list_state],
+ outputs=[talk_allowed_list_state, talk_allowed_list_display, talk_allowed_item_to_delete, talk_allowed_final_result]
+ )
+ with gr.Row():
+ talk_frequency_down_list = config_data['groups']['talk_frequency_down']
+ with gr.Blocks():
+ talk_frequency_down_list_state = gr.State(value=talk_frequency_down_list.copy())
+
+ with gr.Row():
+ talk_frequency_down_list_display = gr.TextArea(
+ value="\n".join(map(str, talk_frequency_down_list)),
+ label="降低回复频率的群列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ talk_frequency_down_new_item_input = gr.Textbox(label="添加新群")
+ talk_frequency_down_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ talk_frequency_down_item_to_delete = gr.Dropdown(
+ choices=talk_frequency_down_list,
+ label="选择要删除的群"
+ )
+ talk_frequency_down_delete_btn = gr.Button("删除", scale=1)
+
+ talk_frequency_down_final_result = gr.Text(label="修改后的降低回复频率的群列表")
+ talk_frequency_down_add_btn.click(
+ add_int_item,
+ inputs=[talk_frequency_down_new_item_input, talk_frequency_down_list_state],
+ outputs=[talk_frequency_down_list_state, talk_frequency_down_list_display, talk_frequency_down_item_to_delete, talk_frequency_down_final_result]
+ )
+
+ talk_frequency_down_delete_btn.click(
+ delete_int_item,
+ inputs=[talk_frequency_down_item_to_delete, talk_frequency_down_list_state],
+ outputs=[talk_frequency_down_list_state, talk_frequency_down_list_display, talk_frequency_down_item_to_delete, talk_frequency_down_final_result]
+ )
+ with gr.Row():
+ ban_user_id_list = config_data['groups']['ban_user_id']
+ with gr.Blocks():
+ ban_user_id_list_state = gr.State(value=ban_user_id_list.copy())
+
+ with gr.Row():
+ ban_user_id_list_display = gr.TextArea(
+ value="\n".join(map(str, ban_user_id_list)),
+ label="禁止回复消息的QQ号列表",
+ interactive=False,
+ lines=5
+ )
+ with gr.Row():
+ with gr.Column(scale=3):
+ ban_user_id_new_item_input = gr.Textbox(label="添加新QQ号")
+ ban_user_id_add_btn = gr.Button("添加", scale=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ ban_user_id_item_to_delete = gr.Dropdown(
+ choices=ban_user_id_list,
+ label="选择要删除的QQ号"
+ )
+ ban_user_id_delete_btn = gr.Button("删除", scale=1)
+
+ ban_user_id_final_result = gr.Text(label="修改后的禁止回复消息的QQ号列表")
+ ban_user_id_add_btn.click(
+ add_int_item,
+ inputs=[ban_user_id_new_item_input, ban_user_id_list_state],
+ outputs=[ban_user_id_list_state, ban_user_id_list_display, ban_user_id_item_to_delete, ban_user_id_final_result]
+ )
+
+ ban_user_id_delete_btn.click(
+ delete_int_item,
+ inputs=[ban_user_id_item_to_delete, ban_user_id_list_state],
+ outputs=[ban_user_id_list_state, ban_user_id_list_display, ban_user_id_item_to_delete, ban_user_id_final_result]
+ )
+ with gr.Row():
+ save_group_btn = gr.Button("保存群组设置")
+ with gr.Row():
+ save_group_btn_message = gr.Textbox()
+ with gr.Row():
+ save_group_btn.click(
+ save_group_config,
+ inputs=[
+ talk_allowed_final_result,
+ talk_frequency_down_final_result,
+ ban_user_id_final_result,
+ ],
+ outputs=[save_group_btn_message]
+ )
+ with gr.TabItem("7-其他设置"):
+ with gr.Row():
+ with gr.Column(scale=3):
+ with gr.Row():
+ gr.Markdown(
+ """### 其他设置"""
+ )
+ with gr.Row():
+ keywords_reaction_enabled = gr.Checkbox(value=config_data['keywords_reaction']['enable'], label="是否针对某个关键词作出反应")
+ with gr.Row():
+ enable_advance_output = gr.Checkbox(value=config_data['others']['enable_advance_output'], label="是否开启高级输出")
+ with gr.Row():
+ enable_kuuki_read = gr.Checkbox(value=config_data['others']['enable_kuuki_read'], label="是否启用读空气功能")
+ with gr.Row():
+ enable_debug_output = gr.Checkbox(value=config_data['others']['enable_debug_output'], label="是否开启调试输出")
+ with gr.Row():
+ enable_friend_chat = gr.Checkbox(value=config_data['others']['enable_friend_chat'], label="是否开启好友聊天")
+ with gr.Row():
+ gr.Markdown(
+ """### 中文错别字设置"""
+ )
+ with gr.Row():
+ chinese_typo_enabled = gr.Checkbox(value=config_data['chinese_typo']['enable'], label="是否开启中文错别字")
+ with gr.Row():
+ error_rate = gr.Slider(minimum=0, maximum=1, step=0.001, value=config_data['chinese_typo']['error_rate'], label="单字替换概率")
+ with gr.Row():
+ min_freq = gr.Number(value=config_data['chinese_typo']['min_freq'], label="最小字频阈值")
+ with gr.Row():
+ tone_error_rate = gr.Slider(minimum=0, maximum=1, step=0.01, value=config_data['chinese_typo']['tone_error_rate'], label="声调错误概率")
+ with gr.Row():
+ word_replace_rate = gr.Slider(minimum=0, maximum=1, step=0.001, value=config_data['chinese_typo']['word_replace_rate'], label="整词替换概率")
+ with gr.Row():
+ save_other_config_btn = gr.Button("保存其他配置")
+ with gr.Row():
+ save_other_config_message = gr.Textbox()
+ with gr.Row():
+ save_other_config_btn.click(
+ save_other_config,
+ inputs=[keywords_reaction_enabled,enable_advance_output, enable_kuuki_read, enable_debug_output, enable_friend_chat, chinese_typo_enabled, error_rate, min_freq, tone_error_rate, word_replace_rate],
+ outputs=[save_other_config_message]
+ )
+ app.queue().launch(#concurrency_count=511, max_size=1022
+ server_name="0.0.0.0",
+ inbrowser=True,
+ share=is_share,
+ server_port=7000,
+ debug=debug,
+ quiet=True,
+ )
\ No newline at end of file