Merge remote-tracking branch 'origin/debug'
This commit is contained in:
@@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||

|

|
||||||

|

|
||||||
|
|
||||||
@@ -51,9 +51,11 @@
|
|||||||
|
|
||||||
- [🐳 Docker部署指南](docs/docker_deploy.md)
|
- [🐳 Docker部署指南](docs/docker_deploy.md)
|
||||||
|
|
||||||
- [📦 手动部署指南(Windows)](docs/manual_deploy.md)
|
- [📦 手动部署指南 Windows](docs/manual_deploy_windows.md)
|
||||||
|
|
||||||
- [📦 手动部署指南(Linux)](docs/manual_deploy_linux.md)
|
- [📦 手动部署指南 Linux](docs/manual_deploy_linux.md)
|
||||||
|
|
||||||
|
- 📦 Windows 一键傻瓜式部署,请运行项目根目录中的 ```run.bat```,部署完成后请参照后续配置指南进行配置
|
||||||
|
|
||||||
### 配置说明
|
### 配置说明
|
||||||
- [🎀 新手配置指南](docs/installation_cute.md) - 通俗易懂的配置教程,适合初次使用的猫娘
|
- [🎀 新手配置指南](docs/installation_cute.md) - 通俗易懂的配置教程,适合初次使用的猫娘
|
||||||
|
|||||||
99
bot.py
99
bot.py
@@ -1,12 +1,17 @@
|
|||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
import nonebot
|
import nonebot
|
||||||
|
import time
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from nonebot.adapters.onebot.v11 import Adapter
|
from nonebot.adapters.onebot.v11 import Adapter
|
||||||
|
|
||||||
'''彩蛋'''
|
# 获取没有加载env时的环境变量
|
||||||
from colorama import Fore, init
|
env_mask = {key: os.getenv(key) for key in os.environ}
|
||||||
|
|
||||||
|
def easter_egg():
|
||||||
|
# 彩蛋
|
||||||
|
from colorama import init, Fore
|
||||||
|
|
||||||
init()
|
init()
|
||||||
text = "多年以后,面对AI行刑队,张三将会回想起他2023年在会议上讨论人工智能的那个下午"
|
text = "多年以后,面对AI行刑队,张三将会回想起他2023年在会议上讨论人工智能的那个下午"
|
||||||
@@ -15,12 +20,12 @@ rainbow_text = ""
|
|||||||
for i, char in enumerate(text):
|
for i, char in enumerate(text):
|
||||||
rainbow_text += rainbow_colors[i % len(rainbow_colors)] + char
|
rainbow_text += rainbow_colors[i % len(rainbow_colors)] + char
|
||||||
print(rainbow_text)
|
print(rainbow_text)
|
||||||
'''彩蛋'''
|
|
||||||
|
|
||||||
|
def init_config():
|
||||||
# 初次启动检测
|
# 初次启动检测
|
||||||
if not os.path.exists("config/bot_config.toml"):
|
if not os.path.exists("config/bot_config.toml"):
|
||||||
logger.warning("检测到bot_config.toml不存在,正在从模板复制")
|
logger.warning("检测到bot_config.toml不存在,正在从模板复制")
|
||||||
import shutil
|
|
||||||
# 检查config目录是否存在
|
# 检查config目录是否存在
|
||||||
if not os.path.exists("config"):
|
if not os.path.exists("config"):
|
||||||
os.makedirs("config")
|
os.makedirs("config")
|
||||||
@@ -29,6 +34,7 @@ if not os.path.exists("config/bot_config.toml"):
|
|||||||
shutil.copy("template/bot_config_template.toml", "config/bot_config.toml")
|
shutil.copy("template/bot_config_template.toml", "config/bot_config.toml")
|
||||||
logger.info("复制完成,请修改config/bot_config.toml和.env.prod中的配置后重新启动")
|
logger.info("复制完成,请修改config/bot_config.toml和.env.prod中的配置后重新启动")
|
||||||
|
|
||||||
|
def init_env():
|
||||||
# 初始化.env 默认ENVIRONMENT=prod
|
# 初始化.env 默认ENVIRONMENT=prod
|
||||||
if not os.path.exists(".env"):
|
if not os.path.exists(".env"):
|
||||||
with open(".env", "w") as f:
|
with open(".env", "w") as f:
|
||||||
@@ -44,28 +50,82 @@ if os.path.exists(".env"):
|
|||||||
load_dotenv(".env")
|
load_dotenv(".env")
|
||||||
logger.success("成功加载基础环境变量配置")
|
logger.success("成功加载基础环境变量配置")
|
||||||
|
|
||||||
# 根据 ENVIRONMENT 加载对应的环境配置
|
def load_env():
|
||||||
if os.getenv("ENVIRONMENT") == "prod":
|
# 使用闭包实现对加载器的横向扩展,避免大量重复判断
|
||||||
|
def prod():
|
||||||
logger.success("加载生产环境变量配置")
|
logger.success("加载生产环境变量配置")
|
||||||
load_dotenv(".env.prod", override=True) # override=True 允许覆盖已存在的环境变量
|
load_dotenv(".env.prod", override=True) # override=True 允许覆盖已存在的环境变量
|
||||||
elif os.getenv("ENVIRONMENT") == "dev":
|
|
||||||
|
def dev():
|
||||||
logger.success("加载开发环境变量配置")
|
logger.success("加载开发环境变量配置")
|
||||||
load_dotenv(".env.dev", override=True) # override=True 允许覆盖已存在的环境变量
|
load_dotenv(".env.dev", override=True) # override=True 允许覆盖已存在的环境变量
|
||||||
elif os.path.exists(f".env.{os.getenv('ENVIRONMENT')}"):
|
|
||||||
logger.success(f"加载{os.getenv('ENVIRONMENT')}环境变量配置")
|
fn_map = {
|
||||||
load_dotenv(f".env.{os.getenv('ENVIRONMENT')}", override=True) # override=True 允许覆盖已存在的环境变量
|
"prod": prod,
|
||||||
|
"dev": dev
|
||||||
|
}
|
||||||
|
|
||||||
|
env = os.getenv("ENVIRONMENT")
|
||||||
|
logger.info(f"[load_env] 当前的 ENVIRONMENT 变量值:{env}")
|
||||||
|
|
||||||
|
if env in fn_map:
|
||||||
|
fn_map[env]() # 根据映射执行闭包函数
|
||||||
|
|
||||||
|
elif os.path.exists(f".env.{env}"):
|
||||||
|
logger.success(f"加载{env}环境变量配置")
|
||||||
|
load_dotenv(f".env.{env}", override=True) # override=True 允许覆盖已存在的环境变量
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.error(f"ENVIRONMENT配置错误,请检查.env文件中的ENVIRONMENT变量对应的.env.{os.getenv('ENVIRONMENT')}是否存在")
|
logger.error(f"ENVIRONMENT 配置错误,请检查 .env 文件中的 ENVIRONMENT 变量及对应 .env.{env} 是否存在")
|
||||||
exit(1)
|
RuntimeError(f"ENVIRONMENT 配置错误,请检查 .env 文件中的 ENVIRONMENT 变量及对应 .env.{env} 是否存在")
|
||||||
|
|
||||||
# 检测Key是否存在
|
|
||||||
if not os.getenv("SILICONFLOW_KEY"):
|
|
||||||
logger.error("缺失必要的API KEY")
|
|
||||||
logger.error(f"请至少在.env.{os.getenv('ENVIRONMENT')}文件中填写SILICONFLOW_KEY后重新启动")
|
|
||||||
exit(1)
|
|
||||||
|
|
||||||
# 获取所有环境变量
|
|
||||||
|
def scan_provider(env_config: dict):
|
||||||
|
provider = {}
|
||||||
|
|
||||||
|
# 利用未初始化 env 时获取的 env_mask 来对新的环境变量集去重
|
||||||
|
# 避免 GPG_KEY 这样的变量干扰检查
|
||||||
|
env_config = dict(filter(lambda item: item[0] not in env_mask, env_config.items()))
|
||||||
|
|
||||||
|
# 遍历 env_config 的所有键
|
||||||
|
for key in env_config:
|
||||||
|
# 检查键是否符合 {provider}_BASE_URL 或 {provider}_KEY 的格式
|
||||||
|
if key.endswith("_BASE_URL") or key.endswith("_KEY"):
|
||||||
|
# 提取 provider 名称
|
||||||
|
provider_name = key.split("_", 1)[0] # 从左分割一次,取第一部分
|
||||||
|
|
||||||
|
# 初始化 provider 的字典(如果尚未初始化)
|
||||||
|
if provider_name not in provider:
|
||||||
|
provider[provider_name] = {"url": None, "key": None}
|
||||||
|
|
||||||
|
# 根据键的类型填充 url 或 key
|
||||||
|
if key.endswith("_BASE_URL"):
|
||||||
|
provider[provider_name]["url"] = env_config[key]
|
||||||
|
elif key.endswith("_KEY"):
|
||||||
|
provider[provider_name]["key"] = env_config[key]
|
||||||
|
|
||||||
|
# 检查每个 provider 是否同时存在 url 和 key
|
||||||
|
for provider_name, config in provider.items():
|
||||||
|
if config["url"] is None or config["key"] is None:
|
||||||
|
logger.error(
|
||||||
|
f"provider 内容:{config}\n"
|
||||||
|
f"env_config 内容:{env_config}"
|
||||||
|
)
|
||||||
|
raise ValueError(f"请检查 '{provider_name}' 提供商配置是否丢失 BASE_URL 或 KEY 环境变量")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# 利用 TZ 环境变量设定程序工作的时区
|
||||||
|
# 仅保证行为一致,不依赖 localtime(),实际对生产环境几乎没有作用
|
||||||
|
time.tzset()
|
||||||
|
|
||||||
|
easter_egg()
|
||||||
|
init_config()
|
||||||
|
init_env()
|
||||||
|
load_env()
|
||||||
|
|
||||||
env_config = {key: os.getenv(key) for key in os.environ}
|
env_config = {key: os.getenv(key) for key in os.environ}
|
||||||
|
scan_provider(env_config)
|
||||||
|
|
||||||
# 设置基础配置
|
# 设置基础配置
|
||||||
base_config = {
|
base_config = {
|
||||||
@@ -84,5 +144,4 @@ driver.register_adapter(Adapter)
|
|||||||
# 加载插件
|
# 加载插件
|
||||||
nonebot.load_plugins("src/plugins")
|
nonebot.load_plugins("src/plugins")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
nonebot.run()
|
nonebot.run()
|
||||||
|
|||||||
@@ -2,18 +2,18 @@ services:
|
|||||||
napcat:
|
napcat:
|
||||||
container_name: napcat
|
container_name: napcat
|
||||||
environment:
|
environment:
|
||||||
- tz=Asia/Shanghai
|
- TZ=Asia/Shanghai
|
||||||
- NAPCAT_UID=${NAPCAT_UID}
|
- NAPCAT_UID=${NAPCAT_UID}
|
||||||
- NAPCAT_GID=${NAPCAT_GID}
|
- NAPCAT_GID=${NAPCAT_GID} # 让 NapCat 获取当前用户 GID,UID,防止权限问题
|
||||||
ports:
|
ports:
|
||||||
- 3000:3000
|
- 3000:3000
|
||||||
- 3001:3001
|
- 3001:3001
|
||||||
- 6099:6099
|
- 6099:6099
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- napcatQQ:/app/.config/QQ
|
- napcatQQ:/app/.config/QQ # 持久化 QQ 本体
|
||||||
- napcatCONFIG:/app/napcat/config
|
- napcatCONFIG:/app/napcat/config # 持久化 NapCat 配置文件
|
||||||
- maimbotDATA:/MaiMBot/data # 麦麦的图片等要给napcat不然发送图片会有问题
|
- maimbotDATA:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
|
||||||
image: mlikiowa/napcat-docker:latest
|
image: mlikiowa/napcat-docker:latest
|
||||||
|
|
||||||
mongodb:
|
mongodb:
|
||||||
@@ -26,14 +26,14 @@ services:
|
|||||||
- "27017"
|
- "27017"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
volumes:
|
volumes:
|
||||||
- mongodb:/data/db
|
- mongodb:/data/db # 持久化 MongoDB 数据库
|
||||||
- mongodbCONFIG:/data/configdb
|
- mongodbCONFIG:/data/configdb # 持久化 MongoDB 配置文件
|
||||||
image: mongo:latest
|
image: mongo:latest
|
||||||
|
|
||||||
maimbot:
|
maimbot:
|
||||||
container_name: maimbot
|
container_name: maimbot
|
||||||
environment:
|
environment:
|
||||||
- tz=Asia/Shanghai
|
- TZ=Asia/Shanghai
|
||||||
expose:
|
expose:
|
||||||
- "8080"
|
- "8080"
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
@@ -41,10 +41,10 @@ services:
|
|||||||
- mongodb
|
- mongodb
|
||||||
- napcat
|
- napcat
|
||||||
volumes:
|
volumes:
|
||||||
- napcatCONFIG:/MaiMBot/napcat # 自动根据配置中的qq号创建ws反向客户端配置
|
- napcatCONFIG:/MaiMBot/napcat # 自动根据配置中的 QQ 号创建 ws 反向客户端配置
|
||||||
- ./bot_config.toml:/MaiMBot/config/bot_config.toml
|
- ./bot_config.toml:/MaiMBot/config/bot_config.toml # Toml 配置文件映射
|
||||||
- maimbotDATA:/MaiMBot/data
|
- maimbotDATA:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
|
||||||
- ./.env.prod:/MaiMBot/.env.prod
|
- ./.env.prod:/MaiMBot/.env.prod # Toml 配置文件映射
|
||||||
image: sengokucola/maimbot:latest
|
image: sengokucola/maimbot:latest
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
4
run.bat
4
run.bat
@@ -1,6 +1,6 @@
|
|||||||
@ECHO OFF
|
@ECHO OFF
|
||||||
chcp 65001
|
chcp 65001
|
||||||
REM python -m venv venv
|
python -m venv venv
|
||||||
call venv\Scripts\activate.bat
|
call venv\Scripts\activate.bat
|
||||||
REM pip install -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple --upgrade -r requirements.txt
|
pip install -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple --upgrade -r requirements.txt
|
||||||
python run.py
|
python run.py
|
||||||
7
run.py
7
run.py
@@ -1,7 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import zipfile
|
import zipfile
|
||||||
|
import sys
|
||||||
import requests
|
import requests
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
@@ -105,6 +105,11 @@ def install_napcat():
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
os.system("cls")
|
os.system("cls")
|
||||||
|
if sys.version_info < (3, 9):
|
||||||
|
print("当前 Python 版本过低,最低版本为 3.9,请更新 Python 版本")
|
||||||
|
print("按任意键退出")
|
||||||
|
input()
|
||||||
|
exit(1)
|
||||||
choice = input(
|
choice = input(
|
||||||
"请输入要进行的操作:\n"
|
"请输入要进行的操作:\n"
|
||||||
"1.首次安装\n"
|
"1.首次安装\n"
|
||||||
|
|||||||
@@ -4,11 +4,15 @@ from typing import Dict, Optional
|
|||||||
|
|
||||||
import tomli
|
import tomli
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
|
from packaging import version
|
||||||
|
from packaging.version import Version, InvalidVersion
|
||||||
|
from packaging.specifiers import SpecifierSet,InvalidSpecifier
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class BotConfig:
|
class BotConfig:
|
||||||
"""机器人配置类"""
|
"""机器人配置类"""
|
||||||
|
INNER_VERSION: Version = None
|
||||||
|
|
||||||
BOT_QQ: Optional[int] = 1
|
BOT_QQ: Optional[int] = 1
|
||||||
BOT_NICKNAME: Optional[str] = None
|
BOT_NICKNAME: Optional[str] = None
|
||||||
|
|
||||||
@@ -64,6 +68,14 @@ class BotConfig:
|
|||||||
mood_decay_rate: float = 0.95 # 情绪衰减率
|
mood_decay_rate: float = 0.95 # 情绪衰减率
|
||||||
mood_intensity_factor: float = 0.7 # 情绪强度因子
|
mood_intensity_factor: float = 0.7 # 情绪强度因子
|
||||||
|
|
||||||
|
keywords_reaction_rules = [] # 关键词回复规则
|
||||||
|
|
||||||
|
chinese_typo_enable=True # 是否启用中文错别字生成器
|
||||||
|
chinese_typo_error_rate=0.03 # 单字替换概率
|
||||||
|
chinese_typo_min_freq=7 # 最小字频阈值
|
||||||
|
chinese_typo_tone_error_rate=0.2 # 声调错误概率
|
||||||
|
chinese_typo_word_replace_rate=0.02 # 整词替换概率
|
||||||
|
|
||||||
# 默认人设
|
# 默认人设
|
||||||
PROMPT_PERSONALITY=[
|
PROMPT_PERSONALITY=[
|
||||||
"曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧",
|
"曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧",
|
||||||
@@ -86,11 +98,283 @@ class BotConfig:
|
|||||||
os.makedirs(config_dir)
|
os.makedirs(config_dir)
|
||||||
return config_dir
|
return config_dir
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_to_specifierset(cls, value: str) -> SpecifierSet:
|
||||||
|
"""将 字符串 版本表达式转换成 SpecifierSet
|
||||||
|
Args:
|
||||||
|
value[str]: 版本表达式(字符串)
|
||||||
|
Returns:
|
||||||
|
SpecifierSet
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
converted = SpecifierSet(value)
|
||||||
|
except InvalidSpecifier as e:
|
||||||
|
logger.error(
|
||||||
|
f"{value} 分类使用了错误的版本约束表达式\n",
|
||||||
|
"请阅读 https://semver.org/lang/zh-CN/ 修改代码"
|
||||||
|
)
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
return converted
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_config_version(cls, toml: dict) -> Version:
|
||||||
|
"""提取配置文件的 SpecifierSet 版本数据
|
||||||
|
Args:
|
||||||
|
toml[dict]: 输入的配置文件字典
|
||||||
|
Returns:
|
||||||
|
Version
|
||||||
|
"""
|
||||||
|
|
||||||
|
if 'inner' in toml:
|
||||||
|
try:
|
||||||
|
config_version : str = toml["inner"]["version"]
|
||||||
|
except KeyError as e:
|
||||||
|
logger.error(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件")
|
||||||
|
raise KeyError(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件")
|
||||||
|
else:
|
||||||
|
toml["inner"] = { "version": "0.0.0" }
|
||||||
|
config_version = toml["inner"]["version"]
|
||||||
|
|
||||||
|
try:
|
||||||
|
ver = version.parse(config_version)
|
||||||
|
except InvalidVersion as e:
|
||||||
|
logger.error(
|
||||||
|
"配置文件中 inner段 的 version 键是错误的版本描述\n"
|
||||||
|
"请阅读 https://semver.org/lang/zh-CN/ 修改配置,并参考本项目指定的模板进行修改\n"
|
||||||
|
"本项目在不同的版本下有不同的模板,请注意识别"
|
||||||
|
)
|
||||||
|
raise InvalidVersion("配置文件中 inner段 的 version 键是错误的版本描述\n")
|
||||||
|
|
||||||
|
return ver
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_config(cls, config_path: str = None) -> "BotConfig":
|
def load_config(cls, config_path: str = None) -> "BotConfig":
|
||||||
"""从TOML配置文件加载配置"""
|
"""从TOML配置文件加载配置"""
|
||||||
config = cls()
|
config = cls()
|
||||||
|
|
||||||
|
def personality(parent: dict):
|
||||||
|
personality_config=parent['personality']
|
||||||
|
personality=personality_config.get('prompt_personality')
|
||||||
|
if len(personality) >= 2:
|
||||||
|
logger.info(f"载入自定义人格:{personality}")
|
||||||
|
config.PROMPT_PERSONALITY=personality_config.get('prompt_personality',config.PROMPT_PERSONALITY)
|
||||||
|
logger.info(f"载入自定义日程prompt:{personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)}")
|
||||||
|
config.PROMPT_SCHEDULE_GEN=personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)
|
||||||
|
|
||||||
|
if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
|
||||||
|
config.PERSONALITY_1=personality_config.get('personality_1_probability',config.PERSONALITY_1)
|
||||||
|
config.PERSONALITY_2=personality_config.get('personality_2_probability',config.PERSONALITY_2)
|
||||||
|
config.PERSONALITY_3=personality_config.get('personality_3_probability',config.PERSONALITY_3)
|
||||||
|
|
||||||
|
def emoji(parent: dict):
|
||||||
|
emoji_config = parent["emoji"]
|
||||||
|
config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
|
||||||
|
config.EMOJI_REGISTER_INTERVAL = emoji_config.get("register_interval", config.EMOJI_REGISTER_INTERVAL)
|
||||||
|
config.EMOJI_CHECK_PROMPT = emoji_config.get('check_prompt',config.EMOJI_CHECK_PROMPT)
|
||||||
|
config.EMOJI_SAVE = emoji_config.get('auto_save',config.EMOJI_SAVE)
|
||||||
|
config.EMOJI_CHECK = emoji_config.get('enable_check',config.EMOJI_CHECK)
|
||||||
|
|
||||||
|
def cq_code(parent: dict):
|
||||||
|
cq_code_config = parent["cq_code"]
|
||||||
|
config.ENABLE_PIC_TRANSLATE = cq_code_config.get("enable_pic_translate", config.ENABLE_PIC_TRANSLATE)
|
||||||
|
|
||||||
|
def bot(parent: dict):
|
||||||
|
# 机器人基础配置
|
||||||
|
bot_config = parent["bot"]
|
||||||
|
bot_qq = bot_config.get("qq")
|
||||||
|
config.BOT_QQ = int(bot_qq)
|
||||||
|
config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
|
||||||
|
|
||||||
|
def response(parent: dict):
|
||||||
|
response_config = parent["response"]
|
||||||
|
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
|
||||||
|
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
|
||||||
|
config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY)
|
||||||
|
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
|
||||||
|
|
||||||
|
def model(parent: dict):
|
||||||
|
# 加载模型配置
|
||||||
|
model_config:dict = parent["model"]
|
||||||
|
|
||||||
|
config_list = [
|
||||||
|
"llm_reasoning",
|
||||||
|
"llm_reasoning_minor",
|
||||||
|
"llm_normal",
|
||||||
|
"llm_normal_minor",
|
||||||
|
"llm_topic_judge",
|
||||||
|
"llm_summary_by_topic",
|
||||||
|
"llm_emotion_judge",
|
||||||
|
"vlm",
|
||||||
|
"embedding",
|
||||||
|
"moderation"
|
||||||
|
]
|
||||||
|
|
||||||
|
for item in config_list:
|
||||||
|
if item in model_config:
|
||||||
|
cfg_item:dict = model_config[item]
|
||||||
|
|
||||||
|
# base_url 的例子: SILICONFLOW_BASE_URL
|
||||||
|
# key 的例子: SILICONFLOW_KEY
|
||||||
|
cfg_target = {
|
||||||
|
"name" : "",
|
||||||
|
"base_url" : "",
|
||||||
|
"key" : "",
|
||||||
|
"pri_in" : 0,
|
||||||
|
"pri_out" : 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
|
||||||
|
cfg_target = cfg_item
|
||||||
|
|
||||||
|
elif config.INNER_VERSION in SpecifierSet(">=0.0.1"):
|
||||||
|
stable_item = ["name","pri_in","pri_out"]
|
||||||
|
pricing_item = ["pri_in","pri_out"]
|
||||||
|
# 从配置中原始拷贝稳定字段
|
||||||
|
for i in stable_item:
|
||||||
|
# 如果 字段 属于计费项 且获取不到,那默认值是 0
|
||||||
|
if i in pricing_item and i not in cfg_item:
|
||||||
|
cfg_target[i] = 0
|
||||||
|
else:
|
||||||
|
# 没有特殊情况则原样复制
|
||||||
|
try:
|
||||||
|
cfg_target[i] = cfg_item[i]
|
||||||
|
except KeyError as e:
|
||||||
|
logger.error(f"{item} 中的必要字段 {e} 不存在,请检查")
|
||||||
|
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查")
|
||||||
|
|
||||||
|
|
||||||
|
provider = cfg_item.get("provider")
|
||||||
|
if provider == None:
|
||||||
|
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
|
||||||
|
raise KeyError(f"provider 字段在模型配置 {item} 中不存在,请检查")
|
||||||
|
|
||||||
|
cfg_target["base_url"] = f"{provider}_BASE_URL"
|
||||||
|
cfg_target["key"] = f"{provider}_KEY"
|
||||||
|
|
||||||
|
|
||||||
|
# 如果 列表中的项目在 model_config 中,利用反射来设置对应项目
|
||||||
|
setattr(config,item,cfg_target)
|
||||||
|
else:
|
||||||
|
logger.error(f"模型 {item} 在config中不存在,请检查")
|
||||||
|
raise KeyError(f"模型 {item} 在config中不存在,请检查")
|
||||||
|
|
||||||
|
def message(parent: dict):
|
||||||
|
msg_config = parent["message"]
|
||||||
|
config.MIN_TEXT_LENGTH = msg_config.get("min_text_length", config.MIN_TEXT_LENGTH)
|
||||||
|
config.MAX_CONTEXT_SIZE = msg_config.get("max_context_size", config.MAX_CONTEXT_SIZE)
|
||||||
|
config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
|
||||||
|
config.ban_words=msg_config.get("ban_words",config.ban_words)
|
||||||
|
|
||||||
|
if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
|
||||||
|
config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
|
||||||
|
config.response_willing_amplifier = msg_config.get("response_willing_amplifier", config.response_willing_amplifier)
|
||||||
|
config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
|
||||||
|
config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
|
||||||
|
|
||||||
|
def memory(parent: dict):
|
||||||
|
memory_config = parent["memory"]
|
||||||
|
config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
|
||||||
|
config.forget_memory_interval = memory_config.get("forget_memory_interval", config.forget_memory_interval)
|
||||||
|
|
||||||
|
def mood(parent: dict):
|
||||||
|
mood_config = parent["mood"]
|
||||||
|
config.mood_update_interval = mood_config.get("mood_update_interval", config.mood_update_interval)
|
||||||
|
config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
|
||||||
|
config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
|
||||||
|
|
||||||
|
def keywords_reaction(parent: dict):
|
||||||
|
keywords_reaction_config = parent["keywords_reaction"]
|
||||||
|
if keywords_reaction_config.get("enable", False):
|
||||||
|
config.keywords_reaction_rules = keywords_reaction_config.get("rules", config.keywords_reaction_rules)
|
||||||
|
|
||||||
|
def chinese_typo(parent: dict):
|
||||||
|
chinese_typo_config = parent["chinese_typo"]
|
||||||
|
config.chinese_typo_enable = chinese_typo_config.get("enable", config.chinese_typo_enable)
|
||||||
|
config.chinese_typo_error_rate = chinese_typo_config.get("error_rate", config.chinese_typo_error_rate)
|
||||||
|
config.chinese_typo_min_freq = chinese_typo_config.get("min_freq", config.chinese_typo_min_freq)
|
||||||
|
config.chinese_typo_tone_error_rate = chinese_typo_config.get("tone_error_rate", config.chinese_typo_tone_error_rate)
|
||||||
|
config.chinese_typo_word_replace_rate = chinese_typo_config.get("word_replace_rate", config.chinese_typo_word_replace_rate)
|
||||||
|
|
||||||
|
def groups(parent: dict):
|
||||||
|
groups_config = parent["groups"]
|
||||||
|
config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
|
||||||
|
config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
|
||||||
|
config.ban_user_id = set(groups_config.get("ban_user_id", []))
|
||||||
|
|
||||||
|
def others(parent: dict):
|
||||||
|
others_config = parent["others"]
|
||||||
|
config.enable_advance_output = others_config.get("enable_advance_output", config.enable_advance_output)
|
||||||
|
config.enable_kuuki_read = others_config.get("enable_kuuki_read", config.enable_kuuki_read)
|
||||||
|
|
||||||
|
# 版本表达式:>=1.0.0,<2.0.0
|
||||||
|
# 允许字段:func: method, support: str, notice: str, necessary: bool
|
||||||
|
# 如果使用 notice 字段,在该组配置加载时,会展示该字段对用户的警示
|
||||||
|
# 例如:"notice": "personality 将在 1.3.2 后被移除",那么在有效版本中的用户就会虽然可以
|
||||||
|
# 正常执行程序,但是会看到这条自定义提示
|
||||||
|
include_configs = {
|
||||||
|
"personality": {
|
||||||
|
"func": personality,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"emoji": {
|
||||||
|
"func": emoji,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"cq_code": {
|
||||||
|
"func": cq_code,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"bot": {
|
||||||
|
"func": bot,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"response": {
|
||||||
|
"func": response,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"model": {
|
||||||
|
"func": model,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"func": message,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"memory": {
|
||||||
|
"func": memory,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"mood": {
|
||||||
|
"func": mood,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"keywords_reaction": {
|
||||||
|
"func": keywords_reaction,
|
||||||
|
"support": ">=0.0.2",
|
||||||
|
"necessary": False
|
||||||
|
},
|
||||||
|
"chinese_typo": {
|
||||||
|
"func": chinese_typo,
|
||||||
|
"support": ">=0.0.3",
|
||||||
|
"necessary": False
|
||||||
|
},
|
||||||
|
"groups": {
|
||||||
|
"func": groups,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
},
|
||||||
|
"others": {
|
||||||
|
"func": others,
|
||||||
|
"support": ">=0.0.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# 原地修改,将 字符串版本表达式 转换成 版本对象
|
||||||
|
for key in include_configs:
|
||||||
|
item_support = include_configs[key]["support"]
|
||||||
|
include_configs[key]["support"] = cls.convert_to_specifierset(item_support)
|
||||||
|
|
||||||
if os.path.exists(config_path):
|
if os.path.exists(config_path):
|
||||||
with open(config_path, "rb") as f:
|
with open(config_path, "rb") as f:
|
||||||
try:
|
try:
|
||||||
@@ -99,128 +383,59 @@ class BotConfig:
|
|||||||
logger.critical(f"配置文件bot_config.toml填写有误,请检查第{e.lineno}行第{e.colno}处:{e.msg}")
|
logger.critical(f"配置文件bot_config.toml填写有误,请检查第{e.lineno}行第{e.colno}处:{e.msg}")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if 'personality' in toml_dict:
|
# 获取配置文件版本
|
||||||
personality_config=toml_dict['personality']
|
config.INNER_VERSION = cls.get_config_version(toml_dict)
|
||||||
personality=personality_config.get('prompt_personality')
|
|
||||||
if len(personality) >= 2:
|
|
||||||
logger.info(f"载入自定义人格:{personality}")
|
|
||||||
config.PROMPT_PERSONALITY=personality_config.get('prompt_personality',config.PROMPT_PERSONALITY)
|
|
||||||
logger.info(f"载入自定义日程prompt:{personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)}")
|
|
||||||
config.PROMPT_SCHEDULE_GEN=personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)
|
|
||||||
config.PERSONALITY_1=personality_config.get('personality_1_probability',config.PERSONALITY_1)
|
|
||||||
config.PERSONALITY_2=personality_config.get('personality_2_probability',config.PERSONALITY_2)
|
|
||||||
config.PERSONALITY_3=personality_config.get('personality_3_probability',config.PERSONALITY_3)
|
|
||||||
|
|
||||||
if "emoji" in toml_dict:
|
# 如果在配置中找到了需要的项,调用对应项的闭包函数处理
|
||||||
emoji_config = toml_dict["emoji"]
|
for key in include_configs:
|
||||||
config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
|
if key in toml_dict:
|
||||||
config.EMOJI_REGISTER_INTERVAL = emoji_config.get("register_interval", config.EMOJI_REGISTER_INTERVAL)
|
group_specifierset: SpecifierSet = include_configs[key]["support"]
|
||||||
config.EMOJI_CHECK_PROMPT = emoji_config.get('check_prompt',config.EMOJI_CHECK_PROMPT)
|
|
||||||
config.EMOJI_SAVE = emoji_config.get('auto_save',config.EMOJI_SAVE)
|
|
||||||
config.EMOJI_CHECK = emoji_config.get('enable_check',config.EMOJI_CHECK)
|
|
||||||
|
|
||||||
if "cq_code" in toml_dict:
|
# 检查配置文件版本是否在支持范围内
|
||||||
cq_code_config = toml_dict["cq_code"]
|
if config.INNER_VERSION in group_specifierset:
|
||||||
config.ENABLE_PIC_TRANSLATE = cq_code_config.get("enable_pic_translate", config.ENABLE_PIC_TRANSLATE)
|
# 如果版本在支持范围内,检查是否存在通知
|
||||||
|
if 'notice' in include_configs[key]:
|
||||||
|
logger.warning(include_configs[key]["notice"])
|
||||||
|
|
||||||
# 机器人基础配置
|
include_configs[key]["func"](toml_dict)
|
||||||
if "bot" in toml_dict:
|
|
||||||
bot_config = toml_dict["bot"]
|
|
||||||
bot_qq = bot_config.get("qq")
|
|
||||||
config.BOT_QQ = int(bot_qq)
|
|
||||||
config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
|
|
||||||
|
|
||||||
if "response" in toml_dict:
|
else:
|
||||||
response_config = toml_dict["response"]
|
# 如果版本不在支持范围内,崩溃并提示用户
|
||||||
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
|
logger.error(
|
||||||
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
|
f"配置文件中的 '{key}' 字段的版本 ({config.INNER_VERSION}) 不在支持范围内。\n"
|
||||||
config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY)
|
f"当前程序仅支持以下版本范围: {group_specifierset}"
|
||||||
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
|
)
|
||||||
|
raise InvalidVersion(f"当前程序仅支持以下版本范围: {group_specifierset}")
|
||||||
|
|
||||||
# 加载模型配置
|
# 如果 necessary 项目存在,而且显式声明是 False,进入特殊处理
|
||||||
if "model" in toml_dict:
|
elif "necessary" in include_configs[key] and include_configs[key].get("necessary") == False:
|
||||||
model_config = toml_dict["model"]
|
# 通过 pass 处理的项虽然直接忽略也是可以的,但是为了不增加理解困难,依然需要在这里显式处理
|
||||||
|
if key == "keywords_reaction":
|
||||||
|
pass
|
||||||
|
|
||||||
if "llm_reasoning" in model_config:
|
else:
|
||||||
config.llm_reasoning = model_config["llm_reasoning"]
|
# 如果用户根本没有需要的配置项,提示缺少配置
|
||||||
|
logger.error(f"配置文件中缺少必需的字段: '{key}'")
|
||||||
if "llm_reasoning_minor" in model_config:
|
raise KeyError(f"配置文件中缺少必需的字段: '{key}'")
|
||||||
config.llm_reasoning_minor = model_config["llm_reasoning_minor"]
|
|
||||||
|
|
||||||
if "llm_normal" in model_config:
|
|
||||||
config.llm_normal = model_config["llm_normal"]
|
|
||||||
|
|
||||||
if "llm_normal_minor" in model_config:
|
|
||||||
config.llm_normal_minor = model_config["llm_normal_minor"]
|
|
||||||
|
|
||||||
if "llm_topic_judge" in model_config:
|
|
||||||
config.llm_topic_judge = model_config["llm_topic_judge"]
|
|
||||||
|
|
||||||
if "llm_summary_by_topic" in model_config:
|
|
||||||
config.llm_summary_by_topic = model_config["llm_summary_by_topic"]
|
|
||||||
|
|
||||||
if "llm_emotion_judge" in model_config:
|
|
||||||
config.llm_emotion_judge = model_config["llm_emotion_judge"]
|
|
||||||
|
|
||||||
if "vlm" in model_config:
|
|
||||||
config.vlm = model_config["vlm"]
|
|
||||||
|
|
||||||
if "embedding" in model_config:
|
|
||||||
config.embedding = model_config["embedding"]
|
|
||||||
|
|
||||||
if "moderation" in model_config:
|
|
||||||
config.moderation = model_config["moderation"]
|
|
||||||
|
|
||||||
# 消息配置
|
|
||||||
if "message" in toml_dict:
|
|
||||||
msg_config = toml_dict["message"]
|
|
||||||
config.MIN_TEXT_LENGTH = msg_config.get("min_text_length", config.MIN_TEXT_LENGTH)
|
|
||||||
config.MAX_CONTEXT_SIZE = msg_config.get("max_context_size", config.MAX_CONTEXT_SIZE)
|
|
||||||
config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
|
|
||||||
config.ban_words=msg_config.get("ban_words",config.ban_words)
|
|
||||||
config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
|
|
||||||
config.response_willing_amplifier = msg_config.get("response_willing_amplifier", config.response_willing_amplifier)
|
|
||||||
config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
|
|
||||||
config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
|
|
||||||
|
|
||||||
if "memory" in toml_dict:
|
|
||||||
memory_config = toml_dict["memory"]
|
|
||||||
config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
|
|
||||||
config.forget_memory_interval = memory_config.get("forget_memory_interval", config.forget_memory_interval)
|
|
||||||
|
|
||||||
if "mood" in toml_dict:
|
|
||||||
mood_config = toml_dict["mood"]
|
|
||||||
config.mood_update_interval = mood_config.get("mood_update_interval", config.mood_update_interval)
|
|
||||||
config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
|
|
||||||
config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
|
|
||||||
|
|
||||||
# 群组配置
|
|
||||||
if "groups" in toml_dict:
|
|
||||||
groups_config = toml_dict["groups"]
|
|
||||||
config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
|
|
||||||
config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
|
|
||||||
config.ban_user_id = set(groups_config.get("ban_user_id", []))
|
|
||||||
|
|
||||||
if "others" in toml_dict:
|
|
||||||
others_config = toml_dict["others"]
|
|
||||||
config.enable_advance_output = others_config.get("enable_advance_output", config.enable_advance_output)
|
|
||||||
config.enable_kuuki_read = others_config.get("enable_kuuki_read", config.enable_kuuki_read)
|
|
||||||
|
|
||||||
logger.success(f"成功加载配置文件: {config_path}")
|
logger.success(f"成功加载配置文件: {config_path}")
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
|
||||||
# 获取配置文件路径
|
# 获取配置文件路径
|
||||||
|
|
||||||
bot_config_floder_path = BotConfig.get_config_dir()
|
bot_config_floder_path = BotConfig.get_config_dir()
|
||||||
print(f"正在品鉴配置文件目录: {bot_config_floder_path}")
|
print(f"正在品鉴配置文件目录: {bot_config_floder_path}")
|
||||||
|
|
||||||
bot_config_path = os.path.join(bot_config_floder_path, "bot_config.toml")
|
bot_config_path = os.path.join(bot_config_floder_path, "bot_config.toml")
|
||||||
|
|
||||||
if os.path.exists(bot_config_path):
|
if os.path.exists(bot_config_path):
|
||||||
# 如果开发环境配置文件不存在,则使用默认配置文件
|
# 如果开发环境配置文件不存在,则使用默认配置文件
|
||||||
print(f"异常的新鲜,异常的美味: {bot_config_path}")
|
print(f"异常的新鲜,异常的美味: {bot_config_path}")
|
||||||
logger.info("使用bot配置文件")
|
logger.info("使用bot配置文件")
|
||||||
else:
|
else:
|
||||||
logger.info("没有找到美味")
|
# 配置文件不存在
|
||||||
|
logger.error("配置文件不存在,请检查路径: {bot_config_path}")
|
||||||
|
raise FileNotFoundError(f"配置文件不存在: {bot_config_path}")
|
||||||
|
|
||||||
global_config = BotConfig.load_config(config_path=bot_config_path)
|
global_config = BotConfig.load_config(config_path=bot_config_path)
|
||||||
|
|
||||||
|
|||||||
@@ -114,13 +114,22 @@ class PromptBuilder:
|
|||||||
activate_prompt = ''
|
activate_prompt = ''
|
||||||
activate_prompt = f"以上是群里正在进行的聊天,{memory_prompt} 现在昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},{mood_prompt},你想要{relation_prompt_2}。"
|
activate_prompt = f"以上是群里正在进行的聊天,{memory_prompt} 现在昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},{mood_prompt},你想要{relation_prompt_2}。"
|
||||||
|
|
||||||
#检测机器人相关词汇
|
#检测机器人相关词汇,改为关键词检测与反应功能了,提取到全局配置中
|
||||||
bot_keywords = ['人机', 'bot', '机器', '入机', 'robot', '机器人']
|
# bot_keywords = ['人机', 'bot', '机器', '入机', 'robot', '机器人']
|
||||||
is_bot = any(keyword in message_txt.lower() for keyword in bot_keywords)
|
# is_bot = any(keyword in message_txt.lower() for keyword in bot_keywords)
|
||||||
if is_bot:
|
# if is_bot:
|
||||||
is_bot_prompt = '有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认'
|
# is_bot_prompt = '有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认'
|
||||||
else:
|
# else:
|
||||||
is_bot_prompt = ''
|
# is_bot_prompt = ''
|
||||||
|
|
||||||
|
# 关键词检测与反应
|
||||||
|
keywords_reaction_prompt = ''
|
||||||
|
for rule in global_config.keywords_reaction_rules:
|
||||||
|
if rule.get("enable", False):
|
||||||
|
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
|
||||||
|
print(f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}")
|
||||||
|
keywords_reaction_prompt += rule.get("reaction", "") + ','
|
||||||
|
|
||||||
|
|
||||||
#人格选择
|
#人格选择
|
||||||
personality=global_config.PROMPT_PERSONALITY
|
personality=global_config.PROMPT_PERSONALITY
|
||||||
@@ -131,15 +140,15 @@ class PromptBuilder:
|
|||||||
personality_choice = random.random()
|
personality_choice = random.random()
|
||||||
if personality_choice < probability_1: # 第一种人格
|
if personality_choice < probability_1: # 第一种人格
|
||||||
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[0]}, 你正在浏览qq群,{promt_info_prompt},
|
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[0]}, 你正在浏览qq群,{promt_info_prompt},
|
||||||
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{is_bot_prompt}
|
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{keywords_reaction_prompt}
|
||||||
请注意把握群里的聊天内容,不要刻意突出自身学科背景,不要回复的太有条理,可以有个性。'''
|
请注意把握群里的聊天内容,不要刻意突出自身学科背景,不要回复的太有条理,可以有个性。'''
|
||||||
elif personality_choice < probability_1 + probability_2: # 第二种人格
|
elif personality_choice < probability_1 + probability_2: # 第二种人格
|
||||||
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[1]}, 你正在浏览qq群,{promt_info_prompt},
|
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[1]}, 你正在浏览qq群,{promt_info_prompt},
|
||||||
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{is_bot_prompt}
|
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
|
||||||
请你表达自己的见解和观点。可以有个性。'''
|
请你表达自己的见解和观点。可以有个性。'''
|
||||||
else: # 第三种人格
|
else: # 第三种人格
|
||||||
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[2]}, 你正在浏览qq群,{promt_info_prompt},
|
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[2]}, 你正在浏览qq群,{promt_info_prompt},
|
||||||
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{is_bot_prompt}
|
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
|
||||||
请你表达自己的见解和观点。可以有个性。'''
|
请你表达自己的见解和观点。可以有个性。'''
|
||||||
|
|
||||||
#中文高手(新加的好玩功能)
|
#中文高手(新加的好玩功能)
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ from ..models.utils_model import LLM_request
|
|||||||
from ..utils.typo_generator import ChineseTypoGenerator
|
from ..utils.typo_generator import ChineseTypoGenerator
|
||||||
from .config import global_config
|
from .config import global_config
|
||||||
from .message import Message
|
from .message import Message
|
||||||
|
from ..moods.moods import MoodManager
|
||||||
|
|
||||||
driver = get_driver()
|
driver = get_driver()
|
||||||
config = driver.config
|
config = driver.config
|
||||||
@@ -326,40 +327,68 @@ def random_remove_punctuation(text: str) -> str:
|
|||||||
|
|
||||||
def process_llm_response(text: str) -> List[str]:
|
def process_llm_response(text: str) -> List[str]:
|
||||||
# processed_response = process_text_with_typos(content)
|
# processed_response = process_text_with_typos(content)
|
||||||
if len(text) > 300:
|
if len(text) > 200:
|
||||||
print(f"回复过长 ({len(text)} 字符),返回默认回复")
|
print(f"回复过长 ({len(text)} 字符),返回默认回复")
|
||||||
return ['懒得说']
|
return ['懒得说']
|
||||||
# 处理长消息
|
# 处理长消息
|
||||||
typo_generator = ChineseTypoGenerator(
|
typo_generator = ChineseTypoGenerator(
|
||||||
error_rate=0.03,
|
error_rate=global_config.chinese_typo_error_rate,
|
||||||
min_freq=7,
|
min_freq=global_config.chinese_typo_min_freq,
|
||||||
tone_error_rate=0.2,
|
tone_error_rate=global_config.chinese_typo_tone_error_rate,
|
||||||
word_replace_rate=0.02
|
word_replace_rate=global_config.chinese_typo_word_replace_rate
|
||||||
)
|
)
|
||||||
typoed_text = typo_generator.create_typo_sentence(text)[0]
|
split_sentences = split_into_sentences_w_remove_punctuation(text)
|
||||||
sentences = split_into_sentences_w_remove_punctuation(typoed_text)
|
sentences = []
|
||||||
|
for sentence in split_sentences:
|
||||||
|
if global_config.chinese_typo_enable:
|
||||||
|
typoed_text, typo_corrections = typo_generator.create_typo_sentence(sentence)
|
||||||
|
sentences.append(typoed_text)
|
||||||
|
if typo_corrections:
|
||||||
|
sentences.append(typo_corrections)
|
||||||
|
else:
|
||||||
|
sentences.append(sentence)
|
||||||
# 检查分割后的消息数量是否过多(超过3条)
|
# 检查分割后的消息数量是否过多(超过3条)
|
||||||
if len(sentences) > 4:
|
|
||||||
|
if len(sentences) > 5:
|
||||||
print(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
|
print(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
|
||||||
return [f'{global_config.BOT_NICKNAME}不知道哦']
|
return [f'{global_config.BOT_NICKNAME}不知道哦']
|
||||||
|
|
||||||
return sentences
|
return sentences
|
||||||
|
|
||||||
|
|
||||||
def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1) -> float:
|
def calculate_typing_time(input_string: str, chinese_time: float = 0.4, english_time: float = 0.2) -> float:
|
||||||
"""
|
"""
|
||||||
计算输入字符串所需的时间,中文和英文字符有不同的输入时间
|
计算输入字符串所需的时间,中文和英文字符有不同的输入时间
|
||||||
input_string (str): 输入的字符串
|
input_string (str): 输入的字符串
|
||||||
chinese_time (float): 中文字符的输入时间,默认为0.3秒
|
chinese_time (float): 中文字符的输入时间,默认为0.2秒
|
||||||
english_time (float): 英文字符的输入时间,默认为0.15秒
|
english_time (float): 英文字符的输入时间,默认为0.1秒
|
||||||
|
|
||||||
|
特殊情况:
|
||||||
|
- 如果只有一个中文字符,将使用3倍的中文输入时间
|
||||||
|
- 在所有输入结束后,额外加上回车时间0.3秒
|
||||||
"""
|
"""
|
||||||
|
mood_manager = MoodManager.get_instance()
|
||||||
|
# 将0-1的唤醒度映射到-1到1
|
||||||
|
mood_arousal = mood_manager.current_mood.arousal
|
||||||
|
# 映射到0.5到2倍的速度系数
|
||||||
|
typing_speed_multiplier = 1.5 ** mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
|
||||||
|
chinese_time *= 1/typing_speed_multiplier
|
||||||
|
english_time *= 1/typing_speed_multiplier
|
||||||
|
# 计算中文字符数
|
||||||
|
chinese_chars = sum(1 for char in input_string if '\u4e00' <= char <= '\u9fff')
|
||||||
|
|
||||||
|
# 如果只有一个中文字符,使用3倍时间
|
||||||
|
if chinese_chars == 1 and len(input_string.strip()) == 1:
|
||||||
|
return chinese_time * 3 + 0.3 # 加上回车时间
|
||||||
|
|
||||||
|
# 正常计算所有字符的输入时间
|
||||||
total_time = 0.0
|
total_time = 0.0
|
||||||
for char in input_string:
|
for char in input_string:
|
||||||
if '\u4e00' <= char <= '\u9fff': # 判断是否为中文字符
|
if '\u4e00' <= char <= '\u9fff': # 判断是否为中文字符
|
||||||
total_time += chinese_time
|
total_time += chinese_time
|
||||||
else: # 其他字符(如英文)
|
else: # 其他字符(如英文)
|
||||||
total_time += english_time
|
total_time += english_time
|
||||||
return total_time
|
return total_time + 0.3 # 加上回车时间
|
||||||
|
|
||||||
|
|
||||||
def cosine_similarity(v1, v2):
|
def cosine_similarity(v1, v2):
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ class LLM_request:
|
|||||||
self.api_key = getattr(config, model["key"])
|
self.api_key = getattr(config, model["key"])
|
||||||
self.base_url = getattr(config, model["base_url"])
|
self.base_url = getattr(config, model["base_url"])
|
||||||
except AttributeError as e:
|
except AttributeError as e:
|
||||||
|
logger.error(f"原始 model dict 信息:{model}")
|
||||||
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
|
logger.error(f"配置错误:找不到对应的配置项 - {str(e)}")
|
||||||
raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
|
raise ValueError(f"配置错误:找不到对应的配置项 - {str(e)}") from e
|
||||||
self.model_name = model["name"]
|
self.model_name = model["name"]
|
||||||
@@ -181,6 +182,13 @@ class LLM_request:
|
|||||||
continue
|
continue
|
||||||
elif response.status in policy["abort_codes"]:
|
elif response.status in policy["abort_codes"]:
|
||||||
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
|
||||||
|
if response.status == 403 :
|
||||||
|
if global_config.llm_normal == "Pro/deepseek-ai/DeepSeek-V3":
|
||||||
|
logger.error("可能是没有给硅基流动充钱,普通模型自动退化至非Pro模型,反应速度可能会变慢")
|
||||||
|
global_config.llm_normal = "deepseek-ai/DeepSeek-V3"
|
||||||
|
if global_config.llm_reasoning == "Pro/deepseek-ai/DeepSeek-R1":
|
||||||
|
logger.error("可能是没有给硅基流动充钱,推理模型自动退化至非Pro模型,反应速度可能会变慢")
|
||||||
|
global_config.llm_reasoning = "deepseek-ai/DeepSeek-R1"
|
||||||
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
|
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
|
||||||
|
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|||||||
@@ -284,10 +284,13 @@ class ChineseTypoGenerator:
|
|||||||
|
|
||||||
返回:
|
返回:
|
||||||
typo_sentence: 包含错别字的句子
|
typo_sentence: 包含错别字的句子
|
||||||
typo_info: 错别字信息列表
|
correction_suggestion: 随机选择的一个纠正建议,返回正确的字/词
|
||||||
"""
|
"""
|
||||||
result = []
|
result = []
|
||||||
typo_info = []
|
typo_info = []
|
||||||
|
word_typos = [] # 记录词语错误对(错词,正确词)
|
||||||
|
char_typos = [] # 记录单字错误对(错字,正确字)
|
||||||
|
current_pos = 0
|
||||||
|
|
||||||
# 分词
|
# 分词
|
||||||
words = self._segment_sentence(sentence)
|
words = self._segment_sentence(sentence)
|
||||||
@@ -296,6 +299,7 @@ class ChineseTypoGenerator:
|
|||||||
# 如果是标点符号或空格,直接添加
|
# 如果是标点符号或空格,直接添加
|
||||||
if all(not self._is_chinese_char(c) for c in word):
|
if all(not self._is_chinese_char(c) for c in word):
|
||||||
result.append(word)
|
result.append(word)
|
||||||
|
current_pos += len(word)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 获取词语的拼音
|
# 获取词语的拼音
|
||||||
@@ -316,6 +320,8 @@ class ChineseTypoGenerator:
|
|||||||
' '.join(word_pinyin),
|
' '.join(word_pinyin),
|
||||||
' '.join(self._get_word_pinyin(typo_word)),
|
' '.join(self._get_word_pinyin(typo_word)),
|
||||||
orig_freq, typo_freq))
|
orig_freq, typo_freq))
|
||||||
|
word_typos.append((typo_word, word)) # 记录(错词,正确词)对
|
||||||
|
current_pos += len(typo_word)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 如果不进行整词替换,则进行单字替换
|
# 如果不进行整词替换,则进行单字替换
|
||||||
@@ -333,11 +339,15 @@ class ChineseTypoGenerator:
|
|||||||
result.append(typo_char)
|
result.append(typo_char)
|
||||||
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
|
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
|
||||||
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
|
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
|
||||||
|
char_typos.append((typo_char, char)) # 记录(错字,正确字)对
|
||||||
|
current_pos += 1
|
||||||
continue
|
continue
|
||||||
result.append(char)
|
result.append(char)
|
||||||
|
current_pos += 1
|
||||||
else:
|
else:
|
||||||
# 处理多字词的单字替换
|
# 处理多字词的单字替换
|
||||||
word_result = []
|
word_result = []
|
||||||
|
word_start_pos = current_pos
|
||||||
for i, (char, py) in enumerate(zip(word, word_pinyin)):
|
for i, (char, py) in enumerate(zip(word, word_pinyin)):
|
||||||
# 词中的字替换概率降低
|
# 词中的字替换概率降低
|
||||||
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))
|
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))
|
||||||
@@ -353,11 +363,24 @@ class ChineseTypoGenerator:
|
|||||||
word_result.append(typo_char)
|
word_result.append(typo_char)
|
||||||
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
|
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
|
||||||
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
|
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
|
||||||
|
char_typos.append((typo_char, char)) # 记录(错字,正确字)对
|
||||||
continue
|
continue
|
||||||
word_result.append(char)
|
word_result.append(char)
|
||||||
result.append(''.join(word_result))
|
result.append(''.join(word_result))
|
||||||
|
current_pos += len(word)
|
||||||
|
|
||||||
return ''.join(result), typo_info
|
# 优先从词语错误中选择,如果没有则从单字错误中选择
|
||||||
|
correction_suggestion = None
|
||||||
|
# 50%概率返回纠正建议
|
||||||
|
if random.random() < 0.5:
|
||||||
|
if word_typos:
|
||||||
|
wrong_word, correct_word = random.choice(word_typos)
|
||||||
|
correction_suggestion = correct_word
|
||||||
|
elif char_typos:
|
||||||
|
wrong_char, correct_char = random.choice(char_typos)
|
||||||
|
correction_suggestion = correct_char
|
||||||
|
|
||||||
|
return ''.join(result), correction_suggestion
|
||||||
|
|
||||||
def format_typo_info(self, typo_info):
|
def format_typo_info(self, typo_info):
|
||||||
"""
|
"""
|
||||||
@@ -419,16 +442,16 @@ def main():
|
|||||||
|
|
||||||
# 创建包含错别字的句子
|
# 创建包含错别字的句子
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
typo_sentence, typo_info = typo_generator.create_typo_sentence(sentence)
|
typo_sentence, correction_suggestion = typo_generator.create_typo_sentence(sentence)
|
||||||
|
|
||||||
# 打印结果
|
# 打印结果
|
||||||
print("\n原句:", sentence)
|
print("\n原句:", sentence)
|
||||||
print("错字版:", typo_sentence)
|
print("错字版:", typo_sentence)
|
||||||
|
|
||||||
# 打印错别字信息
|
# 打印纠正建议
|
||||||
if typo_info:
|
if correction_suggestion:
|
||||||
print("\n错别字信息:")
|
print("\n随机纠正建议:")
|
||||||
print(typo_generator.format_typo_info(typo_info))
|
print(f"应该改为:{correction_suggestion}")
|
||||||
|
|
||||||
# 计算并打印总耗时
|
# 计算并打印总耗时
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
[inner]
|
||||||
|
version = "0.0.3"
|
||||||
|
|
||||||
[bot]
|
[bot]
|
||||||
qq = 123
|
qq = 123
|
||||||
nickname = "麦麦"
|
nickname = "麦麦"
|
||||||
@@ -51,6 +54,26 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
|
|||||||
mood_decay_rate = 0.95 # 情绪衰减率
|
mood_decay_rate = 0.95 # 情绪衰减率
|
||||||
mood_intensity_factor = 1.0 # 情绪强度因子
|
mood_intensity_factor = 1.0 # 情绪强度因子
|
||||||
|
|
||||||
|
[keywords_reaction] # 针对某个关键词作出反应
|
||||||
|
enable = true # 关键词反应功能的总开关
|
||||||
|
|
||||||
|
[[keywords_reaction.rules]] # 如果想要新增多个关键词,直接复制本条,修改keywords和reaction即可
|
||||||
|
enable = true # 是否启用此条(为了人类在未来AI战争能更好地识别AI(bushi),默认开启)
|
||||||
|
keywords = ["人机", "bot", "机器", "入机", "robot", "机器人"] # 会触发反应的关键词
|
||||||
|
reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" # 触发之后添加的提示词
|
||||||
|
|
||||||
|
[[keywords_reaction.rules]] # 就像这样复制
|
||||||
|
enable = false # 仅作示例,不会触发
|
||||||
|
keywords = ["测试关键词回复","test",""]
|
||||||
|
reaction = "回答“测试成功”"
|
||||||
|
|
||||||
|
[chinese_typo]
|
||||||
|
enable = true # 是否启用中文错别字生成器
|
||||||
|
error_rate=0.03 # 单字替换概率
|
||||||
|
min_freq=7 # 最小字频阈值
|
||||||
|
tone_error_rate=0.2 # 声调错误概率
|
||||||
|
word_replace_rate=0.02 # 整词替换概率
|
||||||
|
|
||||||
[others]
|
[others]
|
||||||
enable_advance_output = true # 是否启用高级输出
|
enable_advance_output = true # 是否启用高级输出
|
||||||
enable_kuuki_read = true # 是否启用读空气功能
|
enable_kuuki_read = true # 是否启用读空气功能
|
||||||
@@ -80,49 +103,42 @@ ban_user_id = [] #禁止回复消息的QQ号
|
|||||||
|
|
||||||
[model.llm_reasoning] #回复模型1 主要回复模型
|
[model.llm_reasoning] #回复模型1 主要回复模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-R1"
|
name = "Pro/deepseek-ai/DeepSeek-R1"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
pri_in = 0 #模型的输入价格(非必填,可以记录消耗)
|
pri_in = 0 #模型的输入价格(非必填,可以记录消耗)
|
||||||
pri_out = 0 #模型的输出价格(非必填,可以记录消耗)
|
pri_out = 0 #模型的输出价格(非必填,可以记录消耗)
|
||||||
|
|
||||||
|
|
||||||
[model.llm_reasoning_minor] #回复模型3 次要回复模型
|
[model.llm_reasoning_minor] #回复模型3 次要回复模型
|
||||||
name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|
||||||
#非推理模型
|
#非推理模型
|
||||||
|
|
||||||
[model.llm_normal] #V3 回复模型2 次要回复模型
|
[model.llm_normal] #V3 回复模型2 次要回复模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|
||||||
[model.llm_normal_minor] #V2.5
|
[model.llm_normal_minor] #V2.5
|
||||||
name = "deepseek-ai/DeepSeek-V2.5"
|
name = "deepseek-ai/DeepSeek-V2.5"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|
||||||
[model.llm_emotion_judge] #主题判断 0.7/m
|
[model.llm_emotion_judge] #主题判断 0.7/m
|
||||||
name = "Qwen/Qwen2.5-14B-Instruct"
|
name = "Qwen/Qwen2.5-14B-Instruct"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|
||||||
[model.llm_topic_judge] #主题判断:建议使用qwen2.5 7b
|
[model.llm_topic_judge] #主题判断:建议使用qwen2.5 7b
|
||||||
name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|
||||||
[model.llm_summary_by_topic] #建议使用qwen2.5 32b 及以上
|
[model.llm_summary_by_topic] #建议使用qwen2.5 32b 及以上
|
||||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
pri_in = 0
|
pri_in = 0
|
||||||
pri_out = 0
|
pri_out = 0
|
||||||
|
|
||||||
[model.moderation] #内容审核 未启用
|
[model.moderation] #内容审核 未启用
|
||||||
name = ""
|
name = ""
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
pri_in = 0
|
pri_in = 0
|
||||||
pri_out = 0
|
pri_out = 0
|
||||||
|
|
||||||
@@ -130,8 +146,7 @@ pri_out = 0
|
|||||||
|
|
||||||
[model.vlm] #图像识别 0.35/m
|
[model.vlm] #图像识别 0.35/m
|
||||||
name = "Pro/Qwen/Qwen2-VL-7B-Instruct"
|
name = "Pro/Qwen/Qwen2-VL-7B-Instruct"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -139,5 +154,4 @@ key = "SILICONFLOW_KEY"
|
|||||||
|
|
||||||
[model.embedding] #嵌入
|
[model.embedding] #嵌入
|
||||||
name = "BAAI/bge-m3"
|
name = "BAAI/bge-m3"
|
||||||
base_url = "SILICONFLOW_BASE_URL"
|
provider = "SILICONFLOW"
|
||||||
key = "SILICONFLOW_KEY"
|
|
||||||
|
|||||||
Reference in New Issue
Block a user