Merge branch 'SengokuCola:debug' into debug
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,6 +21,7 @@ __pycache__/
|
||||
llm_statistics.txt
|
||||
mongodb
|
||||
napcat
|
||||
run_dev.bat
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
@@ -36,7 +36,8 @@
|
||||
> - 由于持续迭代,可能存在一些已知或未知的bug
|
||||
> - 由于开发中,可能消耗较多token
|
||||
|
||||
**交流群**: 766798517(仅用于开发和建议相关讨论)不一定有空回复,但大家可以自行交流部署问题,我会优先写文档和代码
|
||||
**交流群**: 766798517 一群人较多,建议加下面的(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
|
||||
**交流群**: 571780722 另一个群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
|
||||
|
||||
##
|
||||
<div align="left">
|
||||
@@ -148,4 +149,4 @@ SengokuCola纯编程外行,面向cursor编程,很多代码史一样多多包
|
||||
|
||||
|
||||
## Stargazers over time
|
||||
[](https://starchart.cc/SengokuCola/MaiMBot)
|
||||
[](https://starchart.cc/SengokuCola/MaiMBot)
|
||||
|
||||
5
bot.py
5
bot.py
@@ -1,11 +1,12 @@
|
||||
import os
|
||||
|
||||
import nonebot
|
||||
from nonebot.adapters.onebot.v11 import Adapter
|
||||
from dotenv import load_dotenv
|
||||
from loguru import logger
|
||||
from nonebot.adapters.onebot.v11 import Adapter
|
||||
|
||||
'''彩蛋'''
|
||||
from colorama import init, Fore
|
||||
from colorama import Fore, init
|
||||
|
||||
init()
|
||||
text = "多年以后,面对AI行刑队,张三将会回想起他2023年在会议上讨论人工智能的那个下午"
|
||||
|
||||
@@ -5,4 +5,19 @@ description = "New Bot Project"
|
||||
|
||||
[tool.nonebot]
|
||||
plugins = ["src.plugins.chat"]
|
||||
plugin_dirs = ["src/plugins"]
|
||||
plugin_dirs = ["src/plugins"]
|
||||
|
||||
[tool.ruff]
|
||||
# 设置 Python 版本
|
||||
target-version = "py39"
|
||||
|
||||
# 启用的规则
|
||||
select = [
|
||||
"E", # pycodestyle 错误
|
||||
"F", # pyflakes
|
||||
"I", # isort
|
||||
"B", # flake8-bugbear
|
||||
]
|
||||
|
||||
# 行长度设置
|
||||
line-length = 88
|
||||
4
run.py
4
run.py
@@ -1,7 +1,7 @@
|
||||
import subprocess
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import zipfile
|
||||
|
||||
import requests
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
2
setup.py
2
setup.py
@@ -1,4 +1,4 @@
|
||||
from setuptools import setup, find_packages
|
||||
from setuptools import find_packages, setup
|
||||
|
||||
setup(
|
||||
name="maimai-bot",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from pymongo import MongoClient
|
||||
from typing import Optional
|
||||
|
||||
from pymongo import MongoClient
|
||||
|
||||
|
||||
class Database:
|
||||
_instance: Optional["Database"] = None
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import customtkinter as ctk
|
||||
from typing import Dict, List
|
||||
import json
|
||||
from datetime import datetime
|
||||
import time
|
||||
import threading
|
||||
import os
|
||||
import queue
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Dict, List
|
||||
|
||||
import customtkinter as ctk
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# 获取当前文件的目录
|
||||
@@ -25,9 +25,11 @@ else:
|
||||
print("未找到环境配置文件")
|
||||
sys.exit(1)
|
||||
|
||||
from pymongo import MongoClient
|
||||
from typing import Optional
|
||||
|
||||
from pymongo import MongoClient
|
||||
|
||||
|
||||
class Database:
|
||||
_instance: Optional["Database"] = None
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from typing import Dict, List, Union, Optional, Any
|
||||
import base64
|
||||
import os
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
"""
|
||||
OneBot v11 Message Segment Builder
|
||||
|
||||
@@ -1,21 +1,23 @@
|
||||
from loguru import logger
|
||||
from nonebot import on_message, on_command, require, get_driver
|
||||
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent, Message, MessageSegment
|
||||
from nonebot.typing import T_State
|
||||
from ...common.database import Database
|
||||
from .config import global_config
|
||||
import os
|
||||
import asyncio
|
||||
import os
|
||||
import random
|
||||
from .relationship_manager import relationship_manager
|
||||
from ..schedule.schedule_generator import bot_schedule
|
||||
from .willing_manager import willing_manager
|
||||
from nonebot.rule import to_me
|
||||
from .bot import chat_bot
|
||||
from .emoji_manager import emoji_manager
|
||||
from ..moods.moods import MoodManager # 导入情绪管理器
|
||||
import time
|
||||
|
||||
from loguru import logger
|
||||
from nonebot import get_driver, on_command, on_message, require
|
||||
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent, Message, MessageSegment
|
||||
from nonebot.rule import to_me
|
||||
from nonebot.typing import T_State
|
||||
|
||||
from ...common.database import Database
|
||||
from ..moods.moods import MoodManager # 导入情绪管理器
|
||||
from ..schedule.schedule_generator import bot_schedule
|
||||
from ..utils.statistic import LLMStatistics
|
||||
from .bot import chat_bot
|
||||
from .config import global_config
|
||||
from .emoji_manager import emoji_manager
|
||||
from .relationship_manager import relationship_manager
|
||||
from .willing_manager import willing_manager
|
||||
|
||||
# 创建LLM统计实例
|
||||
llm_stats = LLMStatistics("llm_statistics.txt")
|
||||
@@ -39,12 +41,11 @@ print("\033[1;32m[初始化数据库完成]\033[0m")
|
||||
|
||||
|
||||
# 导入其他模块
|
||||
from ..memory_system.memory import hippocampus, memory_graph
|
||||
from .bot import ChatBot
|
||||
from .emoji_manager import emoji_manager
|
||||
|
||||
# from .message_send_control import message_sender
|
||||
from .relationship_manager import relationship_manager
|
||||
from .message_sender import message_manager,message_sender
|
||||
from ..memory_system.memory import memory_graph,hippocampus
|
||||
from .message_sender import message_manager, message_sender
|
||||
|
||||
# 初始化表情管理器
|
||||
emoji_manager.initialize()
|
||||
|
||||
@@ -1,23 +1,27 @@
|
||||
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message as EventMessage, Bot
|
||||
from .message import Message, MessageSet, Message_Sending
|
||||
from .config import BotConfig, global_config
|
||||
from .storage import MessageStorage
|
||||
from .llm_generator import ResponseGenerator
|
||||
# from .message_stream import MessageStream, MessageStreamContainer
|
||||
from .topic_identifier import topic_identifier
|
||||
from random import random, choice
|
||||
from .emoji_manager import emoji_manager # 导入表情包管理器
|
||||
from ..moods.moods import MoodManager # 导入情绪管理器
|
||||
import time
|
||||
import os
|
||||
from .cq_code import CQCode # 导入CQCode模块
|
||||
from .message_sender import message_manager # 导入新的消息管理器
|
||||
from .message import Message_Thinking # 导入 Message_Thinking 类
|
||||
from .relationship_manager import relationship_manager
|
||||
from .willing_manager import willing_manager # 导入意愿管理器
|
||||
from .utils import is_mentioned_bot_in_txt, calculate_typing_time
|
||||
from ..memory_system.memory import memory_graph,hippocampus
|
||||
from random import random
|
||||
|
||||
from loguru import logger
|
||||
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent
|
||||
|
||||
from ..memory_system.memory import hippocampus
|
||||
from ..moods.moods import MoodManager # 导入情绪管理器
|
||||
from .config import global_config
|
||||
from .cq_code import CQCode # 导入CQCode模块
|
||||
from .emoji_manager import emoji_manager # 导入表情包管理器
|
||||
from .llm_generator import ResponseGenerator
|
||||
from .message import (
|
||||
Message,
|
||||
Message_Sending,
|
||||
Message_Thinking, # 导入 Message_Thinking 类
|
||||
MessageSet,
|
||||
)
|
||||
from .message_sender import message_manager # 导入新的消息管理器
|
||||
from .relationship_manager import relationship_manager
|
||||
from .storage import MessageStorage
|
||||
from .utils import calculate_typing_time, is_mentioned_bot_in_txt
|
||||
from .willing_manager import willing_manager # 导入意愿管理器
|
||||
|
||||
|
||||
class ChatBot:
|
||||
def __init__(self):
|
||||
@@ -123,6 +127,11 @@ class ChatBot:
|
||||
container.messages.remove(msg)
|
||||
# print(f"\033[1;32m[思考消息删除]\033[0m 已找到思考消息对象,开始删除")
|
||||
break
|
||||
|
||||
# 如果找不到思考消息,直接返回
|
||||
if not thinking_message:
|
||||
print(f"\033[1;33m[警告]\033[0m 未找到对应的思考消息,可能已超时被移除")
|
||||
return
|
||||
|
||||
#记录开始思考的时间,避免从思考到回复的时间太久
|
||||
thinking_start_time = thinking_message.thinking_start_time
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Any, Optional, Set
|
||||
import os
|
||||
import configparser
|
||||
import tomli
|
||||
import sys
|
||||
from loguru import logger
|
||||
from nonebot import get_driver
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Optional
|
||||
|
||||
import tomli
|
||||
from loguru import logger
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -24,6 +21,12 @@ class BotConfig:
|
||||
|
||||
talk_allowed_groups = set()
|
||||
talk_frequency_down_groups = set()
|
||||
thinking_timeout: int = 100 # 思考时间
|
||||
|
||||
response_willing_amplifier: float = 1.0 # 回复意愿放大系数
|
||||
response_interested_rate_amplifier: float = 1.0 # 回复兴趣度放大系数
|
||||
down_frequency_rate: float = 3.5 # 降低回复频率的群组回复意愿降低系数
|
||||
|
||||
ban_user_id = set()
|
||||
|
||||
build_memory_interval: int = 30 # 记忆构建间隔(秒)
|
||||
@@ -61,6 +64,8 @@ class BotConfig:
|
||||
mood_decay_rate: float = 0.95 # 情绪衰减率
|
||||
mood_intensity_factor: float = 0.7 # 情绪强度因子
|
||||
|
||||
keywords_reaction_rules = [] # 关键词回复规则
|
||||
|
||||
# 默认人设
|
||||
PROMPT_PERSONALITY=[
|
||||
"曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧",
|
||||
@@ -175,6 +180,10 @@ class BotConfig:
|
||||
config.MAX_CONTEXT_SIZE = msg_config.get("max_context_size", config.MAX_CONTEXT_SIZE)
|
||||
config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
|
||||
config.ban_words=msg_config.get("ban_words",config.ban_words)
|
||||
config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
|
||||
config.response_willing_amplifier = msg_config.get("response_willing_amplifier", config.response_willing_amplifier)
|
||||
config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
|
||||
config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
|
||||
|
||||
if "memory" in toml_dict:
|
||||
memory_config = toml_dict["memory"]
|
||||
@@ -187,6 +196,13 @@ class BotConfig:
|
||||
config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
|
||||
config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
|
||||
|
||||
# print(toml_dict)
|
||||
if "keywords_reaction" in toml_dict:
|
||||
# 读取关键词回复配置
|
||||
keywords_reaction_config = toml_dict["keywords_reaction"]
|
||||
if keywords_reaction_config.get("enable", False):
|
||||
config.keywords_reaction_rules = keywords_reaction_config.get("rules", config.keywords_reaction_rules)
|
||||
|
||||
# 群组配置
|
||||
if "groups" in toml_dict:
|
||||
groups_config = toml_dict["groups"]
|
||||
|
||||
@@ -1,24 +1,23 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Optional, List, Union
|
||||
import html
|
||||
import requests
|
||||
import base64
|
||||
from PIL import Image
|
||||
import html
|
||||
import os
|
||||
from random import random
|
||||
from nonebot.adapters.onebot.v11 import Bot
|
||||
from .config import global_config
|
||||
import time
|
||||
import asyncio
|
||||
from .utils_image import storage_image, storage_emoji
|
||||
from .utils_user import get_user_nickname
|
||||
from ..models.utils_model import LLM_request
|
||||
from .mapper import emojimapper
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Optional
|
||||
|
||||
import requests
|
||||
|
||||
# 解析各种CQ码
|
||||
# 包含CQ码类
|
||||
import urllib3
|
||||
from urllib3.util import create_urllib3_context
|
||||
from nonebot import get_driver
|
||||
from urllib3.util import create_urllib3_context
|
||||
|
||||
from ..models.utils_model import LLM_request
|
||||
from .config import global_config
|
||||
from .mapper import emojimapper
|
||||
from .utils_image import storage_emoji, storage_image
|
||||
from .utils_user import get_user_nickname
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -81,7 +80,7 @@ class CQCode:
|
||||
if user_nickname:
|
||||
self.translated_plain_text = f"[@{user_nickname}]"
|
||||
else:
|
||||
self.translated_plain_text = f"@某人"
|
||||
self.translated_plain_text = "@某人"
|
||||
elif self.type == 'reply':
|
||||
self.translated_plain_text = await self.translate_reply()
|
||||
elif self.type == 'face':
|
||||
@@ -308,7 +307,7 @@ class CQCode:
|
||||
return f"[回复 {self.reply_message.sender.nickname} 的消息: {message_obj.processed_plain_text}]"
|
||||
|
||||
else:
|
||||
print(f"\033[1;31m[错误]\033[0m 回复消息的sender.user_id为空")
|
||||
print("\033[1;31m[错误]\033[0m 回复消息的sender.user_id为空")
|
||||
return '[回复某人消息]'
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -1,27 +1,18 @@
|
||||
from typing import List, Dict, Optional
|
||||
import random
|
||||
from ...common.database import Database
|
||||
import os
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
import jieba.analyse as jieba_analyse
|
||||
import aiohttp
|
||||
import hashlib
|
||||
from datetime import datetime
|
||||
import base64
|
||||
import shutil
|
||||
import asyncio
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from PIL import Image
|
||||
import io
|
||||
from loguru import logger
|
||||
import traceback
|
||||
from typing import Optional
|
||||
|
||||
from loguru import logger
|
||||
from nonebot import get_driver
|
||||
|
||||
from ...common.database import Database
|
||||
from ..chat.config import global_config
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..chat.utils_image import image_path_to_base64
|
||||
from ..chat.utils import get_embedding
|
||||
from ..chat.utils_image import image_path_to_base64
|
||||
from ..models.utils_model import LLM_request
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -275,7 +266,7 @@ class EmojiManager:
|
||||
async def _periodic_scan(self, interval_MINS: int = 10):
|
||||
"""定期扫描新表情包"""
|
||||
while True:
|
||||
print(f"\033[1;36m[表情包]\033[0m 开始扫描新表情包...")
|
||||
print("\033[1;36m[表情包]\033[0m 开始扫描新表情包...")
|
||||
await self.scan_new_emojis()
|
||||
await asyncio.sleep(interval_MINS * 60) # 每600秒扫描一次
|
||||
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
from typing import Dict, Any, List, Optional, Union, Tuple
|
||||
from openai import OpenAI
|
||||
import asyncio
|
||||
from functools import partial
|
||||
from .message import Message
|
||||
from .config import global_config
|
||||
from ...common.database import Database
|
||||
import random
|
||||
import time
|
||||
import numpy as np
|
||||
from .relationship_manager import relationship_manager
|
||||
from .prompt_builder import prompt_builder
|
||||
from .config import global_config
|
||||
from .utils import process_llm_response
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
from nonebot import get_driver
|
||||
|
||||
from ...common.database import Database
|
||||
from ..models.utils_model import LLM_request
|
||||
from .config import global_config
|
||||
from .message import Message
|
||||
from .prompt_builder import prompt_builder
|
||||
from .relationship_manager import relationship_manager
|
||||
from .utils import process_llm_response
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional, Dict, Tuple, ForwardRef
|
||||
import time
|
||||
import jieba.analyse as jieba_analyse
|
||||
import os
|
||||
from datetime import datetime
|
||||
from ...common.database import Database
|
||||
from PIL import Image
|
||||
from .config import global_config
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, ForwardRef, List, Optional
|
||||
|
||||
import urllib3
|
||||
from .utils_user import get_user_nickname,get_user_cardname,get_groupname
|
||||
|
||||
from .cq_code import CQCode, cq_code_tool
|
||||
from .utils_cq import parse_cq_code
|
||||
from .cq_code import cq_code_tool,CQCode
|
||||
from .utils_user import get_groupname, get_user_cardname, get_user_nickname
|
||||
|
||||
Message = ForwardRef('Message') # 添加这行
|
||||
# 禁用SSL警告
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
from typing import Union, List, Optional, Dict
|
||||
from collections import deque
|
||||
from .message import Message, Message_Thinking, MessageSet, Message_Sending
|
||||
import time
|
||||
import asyncio
|
||||
import time
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from nonebot.adapters.onebot.v11 import Bot
|
||||
from .config import global_config
|
||||
from .storage import MessageStorage
|
||||
|
||||
from .cq_code import cq_code_tool
|
||||
import random
|
||||
from .message import Message, Message_Sending, Message_Thinking, MessageSet
|
||||
from .storage import MessageStorage
|
||||
from .utils import calculate_typing_time
|
||||
from .config import global_config
|
||||
|
||||
|
||||
class Message_Sender:
|
||||
"""发送器"""
|
||||
@@ -162,6 +163,11 @@ class MessageManager:
|
||||
message_earliest.update_thinking_time()
|
||||
thinking_time = message_earliest.thinking_time
|
||||
print(f"\033[1;34m[调试]\033[0m 消息正在思考中,已思考{int(thinking_time)}秒\033[K\r", end='', flush=True)
|
||||
|
||||
# 检查是否超时
|
||||
if thinking_time > global_config.thinking_timeout:
|
||||
print(f"\033[1;33m[警告]\033[0m 消息思考超时({thinking_time}秒),移除该消息")
|
||||
container.remove_message(message_earliest)
|
||||
else:# 如果不是message_thinking就只能是message_sending
|
||||
print(f"\033[1;34m[调试]\033[0m 消息'{message_earliest.processed_plain_text}'正在发送中")
|
||||
#直接发,等什么呢
|
||||
@@ -199,7 +205,7 @@ class MessageManager:
|
||||
|
||||
# 安全地移除消息
|
||||
if not container.remove_message(msg):
|
||||
print(f"\033[1;33m[警告]\033[0m 尝试删除不存在的消息")
|
||||
print("\033[1;33m[警告]\033[0m 尝试删除不存在的消息")
|
||||
except Exception as e:
|
||||
print(f"\033[1;31m[错误]\033[0m 处理超时消息时发生错误: {e}")
|
||||
continue
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
import time
|
||||
import random
|
||||
from ..schedule.schedule_generator import bot_schedule
|
||||
import os
|
||||
from .utils import get_embedding, combine_messages, get_recent_group_detailed_plain_text
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from ...common.database import Database
|
||||
from .config import global_config
|
||||
from .topic_identifier import topic_identifier
|
||||
from ..memory_system.memory import memory_graph,hippocampus
|
||||
from random import choice
|
||||
import numpy as np
|
||||
import jieba
|
||||
from collections import Counter
|
||||
from ..memory_system.memory import hippocampus, memory_graph
|
||||
from ..moods.moods import MoodManager
|
||||
from ..schedule.schedule_generator import bot_schedule
|
||||
from .config import global_config
|
||||
from .utils import get_embedding, get_recent_group_detailed_plain_text
|
||||
|
||||
|
||||
class PromptBuilder:
|
||||
def __init__(self):
|
||||
@@ -25,7 +22,7 @@ class PromptBuilder:
|
||||
message_txt: str,
|
||||
sender_name: str = "某人",
|
||||
relationship_value: float = 0.0,
|
||||
group_id: int = None) -> str:
|
||||
group_id: Optional[int] = None) -> tuple[str, str]:
|
||||
"""构建prompt
|
||||
|
||||
Args:
|
||||
@@ -101,7 +98,7 @@ class PromptBuilder:
|
||||
for memory in relevant_memories:
|
||||
memory_items.append(f"关于「{memory['topic']}」的记忆:{memory['content']}")
|
||||
|
||||
memory_prompt = f"看到这些聊天,你想起来:\n" + "\n".join(memory_items) + "\n"
|
||||
memory_prompt = "看到这些聊天,你想起来:\n" + "\n".join(memory_items) + "\n"
|
||||
|
||||
# 打印调试信息
|
||||
print("\n\033[1;32m[记忆检索]\033[0m 找到以下相关记忆:")
|
||||
@@ -117,13 +114,22 @@ class PromptBuilder:
|
||||
activate_prompt = ''
|
||||
activate_prompt = f"以上是群里正在进行的聊天,{memory_prompt} 现在昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},{mood_prompt},你想要{relation_prompt_2}。"
|
||||
|
||||
#检测机器人相关词汇
|
||||
bot_keywords = ['人机', 'bot', '机器', '入机', 'robot', '机器人']
|
||||
is_bot = any(keyword in message_txt.lower() for keyword in bot_keywords)
|
||||
if is_bot:
|
||||
is_bot_prompt = '有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认'
|
||||
else:
|
||||
is_bot_prompt = ''
|
||||
#检测机器人相关词汇,改为关键词检测与反应功能了,提取到全局配置中
|
||||
# bot_keywords = ['人机', 'bot', '机器', '入机', 'robot', '机器人']
|
||||
# is_bot = any(keyword in message_txt.lower() for keyword in bot_keywords)
|
||||
# if is_bot:
|
||||
# is_bot_prompt = '有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认'
|
||||
# else:
|
||||
# is_bot_prompt = ''
|
||||
|
||||
# 关键词检测与反应
|
||||
keywords_reaction_prompt = ''
|
||||
for rule in global_config.keywords_reaction_rules:
|
||||
if rule.get("enable", False):
|
||||
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
|
||||
print(f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}")
|
||||
keywords_reaction_prompt += rule.get("reaction", "") + ','
|
||||
|
||||
|
||||
#人格选择
|
||||
personality=global_config.PROMPT_PERSONALITY
|
||||
@@ -134,15 +140,15 @@ class PromptBuilder:
|
||||
personality_choice = random.random()
|
||||
if personality_choice < probability_1: # 第一种人格
|
||||
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[0]}, 你正在浏览qq群,{promt_info_prompt},
|
||||
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{is_bot_prompt}
|
||||
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{keywords_reaction_prompt}
|
||||
请注意把握群里的聊天内容,不要刻意突出自身学科背景,不要回复的太有条理,可以有个性。'''
|
||||
elif personality_choice < probability_1 + probability_2: # 第二种人格
|
||||
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[1]}, 你正在浏览qq群,{promt_info_prompt},
|
||||
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{is_bot_prompt}
|
||||
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
|
||||
请你表达自己的见解和观点。可以有个性。'''
|
||||
else: # 第三种人格
|
||||
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[2]}, 你正在浏览qq群,{promt_info_prompt},
|
||||
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{is_bot_prompt}
|
||||
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
|
||||
请你表达自己的见解和观点。可以有个性。'''
|
||||
|
||||
#中文高手(新加的好玩功能)
|
||||
@@ -203,7 +209,7 @@ class PromptBuilder:
|
||||
|
||||
#激活prompt构建
|
||||
activate_prompt = ''
|
||||
activate_prompt = f"以上是群里正在进行的聊天。"
|
||||
activate_prompt = "以上是群里正在进行的聊天。"
|
||||
personality=global_config.PROMPT_PERSONALITY
|
||||
prompt_personality = ''
|
||||
personality_choice = random.random()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import time
|
||||
from ...common.database import Database
|
||||
from nonebot.adapters.onebot.v11 import Bot
|
||||
from typing import Optional, Tuple
|
||||
import asyncio
|
||||
from typing import Optional
|
||||
|
||||
from ...common.database import Database
|
||||
|
||||
|
||||
class Impression:
|
||||
traits: str = None
|
||||
@@ -123,7 +123,7 @@ class RelationshipManager:
|
||||
print(f"\033[1;32m[关系管理]\033[0m 已加载 {len(self.relationships)} 条关系记录")
|
||||
|
||||
while True:
|
||||
print(f"\033[1;32m[关系管理]\033[0m 正在自动保存关系")
|
||||
print("\033[1;32m[关系管理]\033[0m 正在自动保存关系")
|
||||
await asyncio.sleep(300) # 等待300秒(5分钟)
|
||||
await self._save_all_relationships()
|
||||
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
from typing import Dict, List, Any, Optional
|
||||
import time
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
import asyncio
|
||||
from .message import Message
|
||||
from typing import Optional
|
||||
|
||||
from ...common.database import Database
|
||||
from .message import Message
|
||||
|
||||
|
||||
class MessageStorage:
|
||||
def __init__(self):
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
from typing import Optional, Dict, List
|
||||
from openai import OpenAI
|
||||
from .message import Message
|
||||
import jieba
|
||||
from typing import List, Optional
|
||||
|
||||
from nonebot import get_driver
|
||||
from .config import global_config
|
||||
|
||||
from ..models.utils_model import LLM_request
|
||||
from .config import global_config
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -26,7 +25,7 @@ class TopicIdentifier:
|
||||
topic, _ = await self.llm_topic_judge.generate_response(prompt)
|
||||
|
||||
if not topic:
|
||||
print(f"\033[1;31m[错误]\033[0m LLM API 返回为空")
|
||||
print("\033[1;31m[错误]\033[0m LLM API 返回为空")
|
||||
return None
|
||||
|
||||
# 直接在这里处理主题解析
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
import time
|
||||
import random
|
||||
from typing import List
|
||||
from .message import Message
|
||||
import requests
|
||||
import numpy as np
|
||||
from .config import global_config
|
||||
import re
|
||||
from typing import Dict
|
||||
from collections import Counter
|
||||
import math
|
||||
from nonebot import get_driver
|
||||
from ..models.utils_model import LLM_request
|
||||
import aiohttp
|
||||
import random
|
||||
import time
|
||||
from collections import Counter
|
||||
from typing import Dict, List
|
||||
|
||||
import jieba
|
||||
import numpy as np
|
||||
from nonebot import get_driver
|
||||
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..utils.typo_generator import ChineseTypoGenerator
|
||||
from .config import global_config
|
||||
from .message import Message
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
import base64
|
||||
import io
|
||||
from PIL import Image
|
||||
import hashlib
|
||||
import time
|
||||
import os
|
||||
import time
|
||||
import zlib # 用于 CRC32
|
||||
|
||||
from loguru import logger
|
||||
from nonebot import get_driver
|
||||
from PIL import Image
|
||||
|
||||
from ...common.database import Database
|
||||
from ..chat.config import global_config
|
||||
import zlib # 用于 CRC32
|
||||
import base64
|
||||
from nonebot import get_driver
|
||||
from loguru import logger
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -119,7 +120,7 @@ def storage_compress_image(base64_data: str, max_size: int = 200) -> str:
|
||||
|
||||
# 保存记录
|
||||
collection.insert_one(image_record)
|
||||
print(f"\033[1;32m[成功]\033[0m 保存图片记录到数据库")
|
||||
print("\033[1;32m[成功]\033[0m 保存图片记录到数据库")
|
||||
|
||||
except Exception as db_error:
|
||||
print(f"\033[1;31m[错误]\033[0m 数据库操作失败: {str(db_error)}")
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from .relationship_manager import relationship_manager
|
||||
from .config import global_config
|
||||
from .relationship_manager import relationship_manager
|
||||
|
||||
|
||||
def get_user_nickname(user_id: int) -> str:
|
||||
if int(user_id) == int(global_config.BOT_QQ):
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import asyncio
|
||||
from .config import global_config
|
||||
|
||||
|
||||
class WillingManager:
|
||||
def __init__(self):
|
||||
@@ -37,11 +39,14 @@ class WillingManager:
|
||||
current_willing *= 0.1
|
||||
print(f"表情包, 当前意愿: {current_willing}")
|
||||
|
||||
print(f"放大系数_interested_rate: {global_config.response_interested_rate_amplifier}")
|
||||
interested_rate *= global_config.response_interested_rate_amplifier #放大回复兴趣度
|
||||
if interested_rate > 0.4:
|
||||
print(f"兴趣度: {interested_rate}, 当前意愿: {current_willing}")
|
||||
current_willing += interested_rate-0.1
|
||||
# print(f"兴趣度: {interested_rate}, 当前意愿: {current_willing}")
|
||||
current_willing += interested_rate-0.4
|
||||
|
||||
self.group_reply_willing[group_id] = min(current_willing, 3.0)
|
||||
current_willing *= global_config.response_willing_amplifier #放大回复意愿
|
||||
# print(f"放大系数_willing: {global_config.response_willing_amplifier}, 当前意愿: {current_willing}")
|
||||
|
||||
reply_probability = max((current_willing - 0.45) * 2, 0)
|
||||
if group_id not in config.talk_allowed_groups:
|
||||
@@ -49,11 +54,14 @@ class WillingManager:
|
||||
reply_probability = 0
|
||||
|
||||
if group_id in config.talk_frequency_down_groups:
|
||||
reply_probability = reply_probability / 3.5
|
||||
reply_probability = reply_probability / global_config.down_frequency_rate
|
||||
|
||||
reply_probability = min(reply_probability, 1)
|
||||
if reply_probability < 0:
|
||||
reply_probability = 0
|
||||
|
||||
|
||||
self.group_reply_willing[group_id] = min(current_willing, 3.0)
|
||||
return reply_probability
|
||||
|
||||
def change_reply_willing_sent(self, group_id: int):
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
import requests
|
||||
import time
|
||||
|
||||
import requests
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# 添加项目根目录到 Python 路径
|
||||
|
||||
@@ -1,19 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
import jieba
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import math
|
||||
from collections import Counter
|
||||
import datetime
|
||||
import random
|
||||
import time
|
||||
|
||||
import jieba
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
from dotenv import load_dotenv
|
||||
import sys
|
||||
import asyncio
|
||||
import aiohttp
|
||||
from typing import Tuple
|
||||
|
||||
sys.path.append("C:/GitHub/MaiMBot") # 添加项目根目录到 Python 路径
|
||||
from src.common.database import Database # 使用正确的导入语法
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import jieba
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
from collections import Counter
|
||||
import datetime
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
|
||||
import jieba
|
||||
import networkx as nx
|
||||
|
||||
from ...common.database import Database # 使用正确的导入语法
|
||||
from ..chat.config import global_config
|
||||
from ...common.database import Database # 使用正确的导入语法
|
||||
from ..chat.utils import (
|
||||
calculate_information_content,
|
||||
cosine_similarity,
|
||||
get_cloest_chat_from_db,
|
||||
text_to_vector,
|
||||
)
|
||||
from ..models.utils_model import LLM_request
|
||||
import math
|
||||
from ..chat.utils import calculate_information_content, get_cloest_chat_from_db ,text_to_vector,cosine_similarity
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class Memory_graph:
|
||||
@@ -530,7 +531,8 @@ class Hippocampus:
|
||||
# 计算每个识别出的主题与记忆主题的相似度
|
||||
for topic in topics:
|
||||
if debug_info:
|
||||
print(f"\033[1;32m[{debug_info}]\033[0m 正在思考有没有见过: {topic}")
|
||||
# print(f"\033[1;32m[{debug_info}]\033[0m 正在思考有没有见过: {topic}")
|
||||
pass
|
||||
|
||||
topic_vector = text_to_vector(topic)
|
||||
has_similar_topic = False
|
||||
@@ -548,11 +550,13 @@ class Hippocampus:
|
||||
if similarity >= similarity_threshold:
|
||||
has_similar_topic = True
|
||||
if debug_info:
|
||||
print(f"\033[1;32m[{debug_info}]\033[0m 找到相似主题: {topic} -> {memory_topic} (相似度: {similarity:.2f})")
|
||||
# print(f"\033[1;32m[{debug_info}]\033[0m 找到相似主题: {topic} -> {memory_topic} (相似度: {similarity:.2f})")
|
||||
pass
|
||||
all_similar_topics.append((memory_topic, similarity))
|
||||
|
||||
if not has_similar_topic and debug_info:
|
||||
print(f"\033[1;31m[{debug_info}]\033[0m 没有见过: {topic} ,呃呃")
|
||||
# print(f"\033[1;31m[{debug_info}]\033[0m 没有见过: {topic} ,呃呃")
|
||||
pass
|
||||
|
||||
return all_similar_topics
|
||||
|
||||
@@ -696,6 +700,7 @@ def segment_text(text):
|
||||
|
||||
|
||||
from nonebot import get_driver
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import sys
|
||||
import jieba
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import math
|
||||
from collections import Counter
|
||||
import datetime
|
||||
import random
|
||||
import time
|
||||
import math
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
import pymongo
|
||||
from loguru import logger
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import pymongo
|
||||
from dotenv import load_dotenv
|
||||
from loguru import logger
|
||||
|
||||
# from chat.config import global_config
|
||||
sys.path.append("C:/GitHub/MaiMBot") # 添加项目根目录到 Python 路径
|
||||
from src.common.database import Database
|
||||
from src.common.database import Database
|
||||
from src.plugins.memory_system.offline_llm import LLMModel
|
||||
|
||||
# 获取当前文件的目录
|
||||
@@ -102,7 +103,7 @@ def get_cloest_chat_from_db(db, length: int, timestamp: str):
|
||||
# 检查当前记录的memorized值
|
||||
current_memorized = record.get('memorized', 0)
|
||||
if current_memorized > 3:
|
||||
print(f"消息已读取3次,跳过")
|
||||
print("消息已读取3次,跳过")
|
||||
return ''
|
||||
|
||||
# 更新memorized值
|
||||
@@ -114,7 +115,7 @@ def get_cloest_chat_from_db(db, length: int, timestamp: str):
|
||||
chat_text += record["detailed_plain_text"]
|
||||
|
||||
return chat_text
|
||||
print(f"消息已读取3次,跳过")
|
||||
print("消息已读取3次,跳过")
|
||||
return ''
|
||||
|
||||
class Memory_graph:
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import os
|
||||
import requests
|
||||
from typing import Tuple, Union
|
||||
import time
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import os
|
||||
import time
|
||||
from typing import Tuple, Union
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class LLMModel:
|
||||
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
|
||||
self.model_name = model_name
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import json
|
||||
import requests
|
||||
import time
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Tuple, Union
|
||||
from nonebot import get_driver
|
||||
|
||||
import aiohttp
|
||||
from loguru import logger
|
||||
from nonebot import get_driver
|
||||
|
||||
from ...common.database import Database
|
||||
from ..chat.config import global_config
|
||||
from ..chat.utils_image import compress_base64_image_by_scale
|
||||
from datetime import datetime
|
||||
from ...common.database import Database
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
@@ -181,6 +181,13 @@ class LLM_request:
|
||||
continue
|
||||
elif response.status in policy["abort_codes"]:
|
||||
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
||||
if response.status == 403 :
|
||||
if global_config.llm_normal == "Pro/deepseek-ai/DeepSeek-V3":
|
||||
logger.error("可能是没有给硅基流动充钱,普通模型自动退化至非Pro模型,反应速度可能会变慢")
|
||||
global_config.llm_normal = "deepseek-ai/DeepSeek-V3"
|
||||
if global_config.llm_reasoning == "Pro/deepseek-ai/DeepSeek-R1":
|
||||
logger.error("可能是没有给硅基流动充钱,推理模型自动退化至非Pro模型,反应速度可能会变慢")
|
||||
global_config.llm_reasoning = "deepseek-ai/DeepSeek-R1"
|
||||
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
|
||||
|
||||
response.raise_for_status()
|
||||
@@ -226,7 +233,7 @@ class LLM_request:
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
logger.critical(f"请求失败: {str(e)}")
|
||||
logger.critical(f"请求头: {await self._build_headers()} 请求体: {payload}")
|
||||
logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
|
||||
raise RuntimeError(f"API请求失败: {str(e)}")
|
||||
|
||||
logger.error("达到最大重试次数,请求仍然失败")
|
||||
@@ -324,12 +331,19 @@ class LLM_request:
|
||||
reasoning = ""
|
||||
return content, reasoning
|
||||
|
||||
async def _build_headers(self) -> dict:
|
||||
async def _build_headers(self, no_key: bool = False) -> dict:
|
||||
"""构建请求头"""
|
||||
return {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
if no_key:
|
||||
return {
|
||||
"Authorization": f"Bearer **********",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
# 防止小朋友们截图自己的key
|
||||
|
||||
async def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的异步响应"""
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import math
|
||||
import time
|
||||
import threading
|
||||
from typing import Dict, Tuple, Optional
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ..chat.config import global_config
|
||||
|
||||
|
||||
@dataclass
|
||||
class MoodState:
|
||||
valence: float # 愉悦度 (-1 到 1)
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
import datetime
|
||||
import os
|
||||
from typing import List, Dict, Union
|
||||
from ...common.database import Database # 使用正确的导入语法
|
||||
from src.plugins.chat.config import global_config
|
||||
from nonebot import get_driver
|
||||
from ..models.utils_model import LLM_request
|
||||
from loguru import logger
|
||||
import json
|
||||
from typing import Dict, Union
|
||||
|
||||
from loguru import logger
|
||||
from nonebot import get_driver
|
||||
|
||||
from src.plugins.chat.config import global_config
|
||||
|
||||
from ...common.database import Database # 使用正确的导入语法
|
||||
from ..models.utils_model import LLM_request
|
||||
|
||||
driver = get_driver()
|
||||
config = driver.config
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
from typing import Dict, List, Any
|
||||
import time
|
||||
import threading
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict
|
||||
|
||||
from ...common.database import Database
|
||||
|
||||
|
||||
class LLMStatistics:
|
||||
def __init__(self, output_file: str = "llm_statistics.txt"):
|
||||
"""初始化LLM统计类
|
||||
|
||||
@@ -2,15 +2,17 @@
|
||||
错别字生成器 - 基于拼音和字频的中文错别字生成工具
|
||||
"""
|
||||
|
||||
from pypinyin import pinyin, Style
|
||||
from collections import defaultdict
|
||||
import json
|
||||
import os
|
||||
import jieba
|
||||
from pathlib import Path
|
||||
import random
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
import jieba
|
||||
from pypinyin import Style, pinyin
|
||||
|
||||
|
||||
class ChineseTypoGenerator:
|
||||
def __init__(self,
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import tomli
|
||||
import tomli_w
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
import tomli
|
||||
import tomli_w
|
||||
|
||||
|
||||
def sync_configs():
|
||||
# 读取两个配置文件
|
||||
|
||||
@@ -17,6 +17,11 @@ prompt_schedule = "一个曾经学习地质,现在学习心理学和脑科学的
|
||||
min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息
|
||||
max_context_size = 15 # 麦麦获得的上文数量
|
||||
emoji_chance = 0.2 # 麦麦使用表情包的概率
|
||||
thinking_timeout = 120 # 麦麦思考时间
|
||||
|
||||
response_willing_amplifier = 1 # 麦麦回复意愿放大系数,一般为1
|
||||
response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数
|
||||
down_frequency_rate = 3.5 # 降低回复频率的群组回复意愿降低系数
|
||||
ban_words = [
|
||||
# "403","张三"
|
||||
]
|
||||
@@ -46,6 +51,19 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
|
||||
mood_decay_rate = 0.95 # 情绪衰减率
|
||||
mood_intensity_factor = 1.0 # 情绪强度因子
|
||||
|
||||
[keywords_reaction] # 针对某个关键词作出反应
|
||||
enable = true # 关键词反应功能的总开关
|
||||
|
||||
[[keywords_reaction.rules]] # 如果想要新增多个关键词,直接复制本条,修改keywords和reaction即可
|
||||
enable = true # 是否启用此条(为了人类在未来AI战争能更好地识别AI(bushi),默认开启)
|
||||
keywords = ["人机", "bot", "机器", "入机", "robot", "机器人"] # 会触发反应的关键词
|
||||
reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" # 触发之后添加的提示词
|
||||
|
||||
[[keywords_reaction.rules]] # 就像这样复制
|
||||
enable = false # 仅作示例,不会触发
|
||||
keywords = ["测试关键词回复","test",""]
|
||||
reaction = "回答“测试成功”"
|
||||
|
||||
[others]
|
||||
enable_advance_output = true # 是否启用高级输出
|
||||
enable_kuuki_read = true # 是否启用读空气功能
|
||||
|
||||
Reference in New Issue
Block a user