diff --git a/.gitignore b/.gitignore
index 4e1606a54..e51abc5cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -193,9 +193,8 @@ cython_debug/
# jieba
jieba.cache
-
-# vscode
-/.vscode
+# .vscode
+!.vscode/settings.json
# direnv
/.direnv
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 000000000..23fd35f0e
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "editor.formatOnSave": true
+}
\ No newline at end of file
diff --git a/README.md b/README.md
index 0c02d1cba..f4ebca07d 100644
--- a/README.md
+++ b/README.md
@@ -29,16 +29,17 @@
-> ⚠️ **注意事项**
+> [!WARNING]
> - 项目处于活跃开发阶段,代码可能随时更改
> - 文档未完善,有问题可以提交 Issue 或者 Discussion
> - QQ机器人存在被限制风险,请自行了解,谨慎使用
> - 由于持续迭代,可能存在一些已知或未知的bug
> - 由于开发中,可能消耗较多token
-**交流群**: 766798517 一群人较多,建议加下面的(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
-**交流群**: 571780722 另一个群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
-**交流群**: 1035228475 另一个群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
+## 💬交流群
+- [一群](https://qm.qq.com/q/VQ3XZrWgMs) 766798517 ,建议加下面的(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
+- [二群](https://qm.qq.com/q/RzmCiRtHEW) 571780722 (开发和建议相关讨论)不一定有空回复,会优先写文档和代码
+- [三群](https://qm.qq.com/q/wlH5eT8OmQ) 1035228475(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
**其他平台版本**
@@ -51,18 +52,17 @@
### 部署方式
-如果你不知道Docker是什么,建议寻找相关教程或使用手动部署(现在不建议使用docker,更新慢,可能不适配)
+- 📦 **Windows 一键傻瓜式部署**:请运行项目根目录中的 `run.bat`,部署完成后请参照后续配置指南进行配置
+
+- [📦 Windows 手动部署指南 ](docs/manual_deploy_windows.md)
+
+- [📦 Linux 手动部署指南 ](docs/manual_deploy_linux.md)
+
+如果你不知道Docker是什么,建议寻找相关教程或使用手动部署 **(现在不建议使用docker,更新慢,可能不适配)**
- [🐳 Docker部署指南](docs/docker_deploy.md)
-- [📦 手动部署指南 Windows](docs/manual_deploy_windows.md)
-
-
-- [📦 手动部署指南 Linux](docs/manual_deploy_linux.md)
-
-- 📦 Windows 一键傻瓜式部署,请运行项目根目录中的 ```run.bat```,部署完成后请参照后续配置指南进行配置
-
### 配置说明
- [🎀 新手配置指南](docs/installation_cute.md) - 通俗易懂的配置教程,适合初次使用的猫娘
- [⚙️ 标准配置指南](docs/installation_standard.md) - 简明专业的配置说明,适合有经验的用户
@@ -140,9 +140,10 @@
## 📌 注意事项
-SengokuCola纯编程外行,面向cursor编程,很多代码史一样多多包涵
-> ⚠️ **警告**:本应用生成内容来自人工智能模型,由 AI 生成,请仔细甄别,请勿用于违反法律的用途,AI生成内容不代表本人观点和立场。
+SengokuCola纯编程外行,面向cursor编程,很多代码史一样多多包涵
+> [!WARNING]
+> 本应用生成内容来自人工智能模型,由 AI 生成,请仔细甄别,请勿用于违反法律的用途,AI生成内容不代表本人观点和立场。
## 致谢
[nonebot2](https://github.com/nonebot/nonebot2): 跨平台 Python 异步聊天机器人框架
diff --git a/bot.py b/bot.py
index c2ed3dfdf..471a98eaf 100644
--- a/bot.py
+++ b/bot.py
@@ -1,9 +1,12 @@
+import asyncio
import os
import shutil
import sys
import nonebot
import time
+
+import uvicorn
from dotenv import load_dotenv
from loguru import logger
from nonebot.adapters.onebot.v11 import Adapter
@@ -12,6 +15,8 @@ import platform
# 获取没有加载env时的环境变量
env_mask = {key: os.getenv(key) for key in os.environ}
+uvicorn_server = None
+
def easter_egg():
# 彩蛋
@@ -100,10 +105,12 @@ def load_logger():
"#777777>|> {name:.<8}:{function:.<8}:{line: >4} -> {message}",
colorize=True,
- level=os.getenv("LOG_LEVEL", "DEBUG") # 根据环境设置日志级别,默认为INFO
+ level=os.getenv("LOG_LEVEL", "INFO"), # 根据环境设置日志级别,默认为INFO
+ filter=lambda record: "nonebot" not in record["name"]
)
+
def scan_provider(env_config: dict):
provider = {}
@@ -138,7 +145,39 @@ def scan_provider(env_config: dict):
raise ValueError(f"请检查 '{provider_name}' 提供商配置是否丢失 BASE_URL 或 KEY 环境变量")
-if __name__ == "__main__":
+async def graceful_shutdown():
+ try:
+ global uvicorn_server
+ if uvicorn_server:
+ uvicorn_server.force_exit = True # 强制退出
+ await uvicorn_server.shutdown()
+
+ tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
+ for task in tasks:
+ task.cancel()
+ await asyncio.gather(*tasks, return_exceptions=True)
+
+ except Exception as e:
+ logger.error(f"麦麦关闭失败: {e}")
+
+
+async def uvicorn_main():
+ global uvicorn_server
+ config = uvicorn.Config(
+ app="__main__:app",
+ host=os.getenv("HOST", "127.0.0.1"),
+ port=int(os.getenv("PORT", 8080)),
+ reload=os.getenv("ENVIRONMENT") == "dev",
+ timeout_graceful_shutdown=5,
+ log_config=None,
+ access_log=False
+ )
+ server = uvicorn.Server(config)
+ uvicorn_server = server
+ await server.serve()
+
+
+def raw_main():
# 利用 TZ 环境变量设定程序工作的时区
# 仅保证行为一致,不依赖 localtime(),实际对生产环境几乎没有作用
if platform.system().lower() != 'windows':
@@ -165,10 +204,30 @@ if __name__ == "__main__":
nonebot.init(**base_config, **env_config)
# 注册适配器
+ global driver
driver = nonebot.get_driver()
driver.register_adapter(Adapter)
# 加载插件
nonebot.load_plugins("src/plugins")
- nonebot.run()
+
+if __name__ == "__main__":
+
+ try:
+ raw_main()
+
+ global app
+ app = nonebot.get_asgi()
+
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ loop.run_until_complete(uvicorn_main())
+ except KeyboardInterrupt:
+ logger.warning("麦麦会努力做的更好的!正在停止中......")
+ except Exception as e:
+ logger.error(f"主程序异常: {e}")
+ finally:
+ loop.run_until_complete(graceful_shutdown())
+ loop.close()
+ logger.info("进程终止完毕,麦麦开始休眠......下次再见哦!")
diff --git a/changelog_config.md b/changelog_config.md
index 7101fe828..c4c560644 100644
--- a/changelog_config.md
+++ b/changelog_config.md
@@ -1,6 +1,12 @@
# Changelog
+## [0.0.5] - 2025-3-11
+### Added
+- 新增了 `alias_names` 配置项,用于指定麦麦的别名。
+
## [0.0.4] - 2025-3-9
### Added
- 新增了 `memory_ban_words` 配置项,用于指定不希望记忆的词汇。
+
+
diff --git a/docker-compose.yml b/docker-compose.yml
index 512558558..227df606b 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -6,8 +6,6 @@ services:
- NAPCAT_UID=${NAPCAT_UID}
- NAPCAT_GID=${NAPCAT_GID} # 让 NapCat 获取当前用户 GID,UID,防止权限问题
ports:
- - 3000:3000
- - 3001:3001
- 6099:6099
restart: unless-stopped
volumes:
@@ -19,7 +17,7 @@ services:
mongodb:
container_name: mongodb
environment:
- - tz=Asia/Shanghai
+ - TZ=Asia/Shanghai
# - MONGO_INITDB_ROOT_USERNAME=your_username
# - MONGO_INITDB_ROOT_PASSWORD=your_password
expose:
diff --git a/docs/Jonathan R.md b/docs/Jonathan R.md
new file mode 100644
index 000000000..660caaeec
--- /dev/null
+++ b/docs/Jonathan R.md
@@ -0,0 +1,20 @@
+Jonathan R. Wolpaw 在 “Memory in neuroscience: rhetoric versus reality.” 一文中提到,从神经科学的感觉运动假设出发,整个神经系统的功能是将经验与适当的行为联系起来,而不是单纯的信息存储。
+Jonathan R,Wolpaw. (2019). Memory in neuroscience: rhetoric versus reality.. Behavioral and cognitive neuroscience reviews(2).
+
+1. **单一过程理论**
+ - 单一过程理论认为,识别记忆主要是基于熟悉性这一单一因素的影响。熟悉性是指对刺激的一种自动的、无意识的感知,它可以使我们在没有回忆起具体细节的情况下,判断一个刺激是否曾经出现过。
+ - 例如,在一些实验中,研究者发现被试可以在没有回忆起具体学习情境的情况下,对曾经出现过的刺激做出正确的判断,这被认为是熟悉性在起作用1。
+2. **双重过程理论**
+ - 双重过程理论则认为,识别记忆是基于两个过程:回忆和熟悉性。回忆是指对过去经验的有意识的回忆,它可以使我们回忆起具体的细节和情境;熟悉性则是一种自动的、无意识的感知。
+ - 该理论认为,在识别记忆中,回忆和熟悉性共同作用,使我们能够判断一个刺激是否曾经出现过。例如,在 “记得 / 知道” 范式中,被试被要求判断他们对一个刺激的记忆是基于回忆还是熟悉性。研究发现,被试可以区分这两种不同的记忆过程,这为双重过程理论提供了支持1。
+
+
+
+1. **神经元节点与连接**:借鉴神经网络原理,将每个记忆单元视为一个神经元节点。节点之间通过连接相互关联,连接的强度代表记忆之间的关联程度。在形态学联想记忆中,具有相似形态特征的记忆节点连接强度较高。例如,苹果和橘子的记忆节点,由于在形状、都是水果等形态语义特征上相似,它们之间的连接强度大于苹果与汽车记忆节点间的连接强度。
+2. **记忆聚类与层次结构**:依据形态特征的相似性对记忆进行聚类,形成不同的记忆簇。每个记忆簇内部的记忆具有较高的相似性,而不同记忆簇之间的记忆相似性较低。同时,构建记忆的层次结构,高层次的记忆节点代表更抽象、概括的概念,低层次的记忆节点对应具体的实例。比如,“水果” 作为高层次记忆节点,连接着 “苹果”“橘子”“香蕉” 等低层次具体水果的记忆节点。
+3. **网络的动态更新**:随着新记忆的不断加入,记忆网络动态调整。新记忆节点根据其形态特征与现有网络中的节点建立连接,同时影响相关连接的强度。若新记忆与某个记忆簇的特征高度相似,则被纳入该记忆簇;若具有独特特征,则可能引发新的记忆簇的形成。例如,当系统学习到一种新的水果 “番石榴”,它会根据番石榴的形态、语义等特征,在记忆网络中找到与之最相似的区域(如水果记忆簇),并建立相应连接,同时调整周围节点连接强度以适应这一新记忆。
+
+
+
+- **相似性联想**:该理论认为,当两个或多个事物在形态上具有相似性时,它们在记忆中会形成关联。例如,梨和苹果在形状和都是水果这一属性上有相似性,所以当我们看到梨时,很容易通过形态学联想记忆联想到苹果。这种相似性联想有助于我们对新事物进行分类和理解,当遇到一个新的类似水果时,我们可以通过与已有的水果记忆进行相似性匹配,来推测它的一些特征。
+- **时空关联性联想**:除了相似性联想,MAM 还强调时空关联性联想。如果两个事物在时间或空间上经常同时出现,它们也会在记忆中形成关联。比如,每次在公园里看到花的时候,都能听到鸟儿的叫声,那么花和鸟儿叫声的形态特征(花的视觉形态和鸟叫的听觉形态)就会在记忆中形成关联,以后听到鸟叫可能就会联想到公园里的花。
\ No newline at end of file
diff --git a/docs/docker_deploy.md b/docs/docker_deploy.md
index 3958d2fc4..db759dfd0 100644
--- a/docs/docker_deploy.md
+++ b/docs/docker_deploy.md
@@ -1,67 +1,97 @@
# 🐳 Docker 部署指南
-## 部署步骤(推荐,但不一定是最新)
+## 部署步骤 (推荐,但不一定是最新)
+
+**"更新镜像与容器"部分在本文档 [Part 6](#6-更新镜像与容器)**
+
+### 0. 前提说明
+
+**本文假设读者已具备一定的 Docker 基础知识。若您对 Docker 不熟悉,建议先参考相关教程或文档进行学习,或选择使用 [📦Linux手动部署指南](./manual_deploy_linux.md) 或 [📦Windows手动部署指南](./manual_deploy_windows.md) 。**
-### 1. 获取Docker配置文件:
+### 1. 获取Docker配置文件
+
+- 建议先单独创建好一个文件夹并进入,作为工作目录
```bash
wget https://raw.githubusercontent.com/SengokuCola/MaiMBot/main/docker-compose.yml -O docker-compose.yml
```
-- 若需要启用MongoDB数据库的用户名和密码,可进入docker-compose.yml,取消MongoDB处的注释并修改变量`=`后方的值为你的用户名和密码\
-修改后请注意在之后配置`.env.prod`文件时指定MongoDB数据库的用户名密码
+- 若需要启用MongoDB数据库的用户名和密码,可进入docker-compose.yml,取消MongoDB处的注释并修改变量旁 `=` 后方的值为你的用户名和密码\
+修改后请注意在之后配置 `.env.prod` 文件时指定MongoDB数据库的用户名密码
-### 2. 启动服务:
+### 2. 启动服务
-- **!!! 请在第一次启动前确保当前工作目录下`.env.prod`与`bot_config.toml`文件存在 !!!**\
+- **!!! 请在第一次启动前确保当前工作目录下 `.env.prod` 与 `bot_config.toml` 文件存在 !!!**\
由于Docker文件映射行为的特殊性,若宿主机的映射路径不存在,可能导致意外的目录创建,而不会创建文件,由于此处需要文件映射到文件,需提前确保文件存在且路径正确,可使用如下命令:
+
```bash
touch .env.prod
touch bot_config.toml
```
- 启动Docker容器:
+
```bash
NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose up -d
+# 旧版Docker中可能找不到docker compose,请使用docker-compose工具替代
+NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker-compose up -d
```
-- 旧版Docker中可能找不到docker compose,请使用docker-compose工具替代
+### 3. 修改配置并重启Docker
-### 3. 修改配置并重启Docker:
-
-- 请前往 [🎀 新手配置指南](docs/installation_cute.md) 或 [⚙️ 标准配置指南](docs/installation_standard.md) 完成`.env.prod`与`bot_config.toml`配置文件的编写\
-**需要注意`.env.prod`中HOST处IP的填写,Docker中部署和系统中直接安装的配置会有所不同**
+- 请前往 [🎀新手配置指南](./installation_cute.md) 或 [⚙️标准配置指南](./installation_standard.md) 完成 `.env.prod` 与 `bot_config.toml` 配置文件的编写\
+**需要注意 `.env.prod` 中HOST处IP的填写,Docker中部署和系统中直接安装的配置会有所不同**
- 重启Docker容器:
+
```bash
-docker restart maimbot # 若修改过容器名称则替换maimbot为你自定的名臣
+docker restart maimbot # 若修改过容器名称则替换maimbot为你自定的名称
```
- 下方命令可以但不推荐,只是同时重启NapCat、MongoDB、MaiMBot三个服务
+
```bash
NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose restart
+# 旧版Docker中可能找不到docker compose,请使用docker-compose工具替代
+NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker-compose restart
```
-- 旧版Docker中可能找不到docker compose,请使用docker-compose工具替代
-
### 4. 登入NapCat管理页添加反向WebSocket
-- 在浏览器地址栏输入`http://<宿主机IP>:6099/`进入NapCat的管理Web页,添加一个Websocket客户端
+- 在浏览器地址栏输入 `http://<宿主机IP>:6099/` 进入NapCat的管理Web页,添加一个Websocket客户端
+
> 网络配置 -> 新建 -> Websocket客户端
-- Websocket客户端的名称自定,URL栏填入`ws://maimbot:8080/onebot/v11/ws`,启用并保存即可\
+- Websocket客户端的名称自定,URL栏填入 `ws://maimbot:8080/onebot/v11/ws`,启用并保存即可\
(若修改过容器名称则替换maimbot为你自定的名称)
-### 5. 愉快地和麦麦对话吧!
+### 5. 部署完成,愉快地和麦麦对话吧!
+
+
+### 6. 更新镜像与容器
+
+- 拉取最新镜像
+
+```bash
+docker-compose pull
+```
+
+- 执行启动容器指令,该指令会自动重建镜像有更新的容器并启动
+
+```bash
+NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose up -d
+# 旧版Docker中可能找不到docker compose,请使用docker-compose工具替代
+NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker-compose up -d
+```
## ⚠️ 注意事项
- 目前部署方案仍在测试中,可能存在未知问题
- 配置文件中的API密钥请妥善保管,不要泄露
-- 建议先在测试环境中运行,确认无误后再部署到生产环境
\ No newline at end of file
+- 建议先在测试环境中运行,确认无误后再部署到生产环境
\ No newline at end of file
diff --git a/docs/installation_cute.md b/docs/installation_cute.md
index e7541f7d3..4465660f9 100644
--- a/docs/installation_cute.md
+++ b/docs/installation_cute.md
@@ -52,12 +52,12 @@ key = "SILICONFLOW_KEY" # 用同一张门票就可以啦
如果你想用DeepSeek官方的服务,就要这样改:
```toml
[model.llm_reasoning]
-name = "Pro/deepseek-ai/DeepSeek-R1"
+name = "deepseek-reasoner" # 改成对应的模型名称,这里为DeepseekR1
base_url = "DEEP_SEEK_BASE_URL" # 改成去DeepSeek游乐园
key = "DEEP_SEEK_KEY" # 用DeepSeek的门票
[model.llm_normal]
-name = "Pro/deepseek-ai/DeepSeek-V3"
+name = "deepseek-chat" # 改成对应的模型名称,这里为DeepseekV3
base_url = "DEEP_SEEK_BASE_URL" # 也去DeepSeek游乐园
key = "DEEP_SEEK_KEY" # 用同一张DeepSeek门票
```
@@ -110,7 +110,8 @@ PLUGINS=["src2.plugins.chat"] # 这里是机器人的插件列表呢
```toml
[bot]
qq = "把这里改成你的机器人QQ号喵" # 填写你的机器人QQ号
-nickname = "麦麦" # 机器人的名字,你可以改成你喜欢的任何名字哦
+nickname = "麦麦" # 机器人的名字,你可以改成你喜欢的任何名字哦,建议和机器人QQ名称/群昵称一样哦
+alias_names = ["小麦", "阿麦"] # 也可以用这个招呼机器人,可以不设置呢
[personality]
# 这里可以设置机器人的性格呢,让它更有趣一些喵
diff --git a/docs/installation_standard.md b/docs/installation_standard.md
index 5f52676d1..03b66dc46 100644
--- a/docs/installation_standard.md
+++ b/docs/installation_standard.md
@@ -8,7 +8,7 @@
## API配置说明
-`.env.prod`和`bot_config.toml`中的API配置关系如下:
+`.env.prod` 和 `bot_config.toml` 中的API配置关系如下:
### 在.env.prod中定义API凭证:
```ini
@@ -34,7 +34,7 @@ key = "SILICONFLOW_KEY" # 引用.env.prod中定义的密钥
如需切换到其他API服务,只需修改引用:
```toml
[model.llm_reasoning]
-name = "Pro/deepseek-ai/DeepSeek-R1"
+name = "deepseek-reasoner" # 改成对应的模型名称,这里为DeepseekR1
base_url = "DEEP_SEEK_BASE_URL" # 切换为DeepSeek服务
key = "DEEP_SEEK_KEY" # 使用DeepSeek密钥
```
@@ -53,11 +53,11 @@ CHAT_ANY_WHERE_BASE_URL=https://api.chatanywhere.tech/v1
# 服务配置
HOST=127.0.0.1 # 如果使用Docker部署,需要改成0.0.0.0,否则QQ消息无法传入
-PORT=8080
+PORT=8080 # 与反向端口相同
# 数据库配置
MONGODB_HOST=127.0.0.1 # 如果使用Docker部署,需要改成数据库容器的名字,默认是mongodb
-MONGODB_PORT=27017
+MONGODB_PORT=27017 # MongoDB端口
DATABASE_NAME=MegBot
MONGODB_USERNAME = "" # 数据库用户名
MONGODB_PASSWORD = "" # 数据库密码
@@ -72,6 +72,9 @@ PLUGINS=["src2.plugins.chat"]
[bot]
qq = "机器人QQ号" # 必填
nickname = "麦麦" # 机器人昵称
+# alias_names: 配置机器人可使用的别名。当机器人在群聊或对话中被调用时,别名可以作为直接命令或提及机器人的关键字使用。
+# 该配置项为字符串数组。例如: ["小麦", "阿麦"]
+alias_names = ["小麦", "阿麦"] # 机器人别名
[personality]
prompt_personality = [
diff --git a/docs/manual_deploy_linux.md b/docs/manual_deploy_linux.md
index d310ffc59..41f0390b8 100644
--- a/docs/manual_deploy_linux.md
+++ b/docs/manual_deploy_linux.md
@@ -66,7 +66,7 @@ pip install -r requirements.txt
## 数据库配置
### 3️⃣ **安装并启动MongoDB**
-- 安装与启动:Debian参考[官方文档](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-debian/),Ubuntu参考[官方文档](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
+- 安装与启动: Debian参考[官方文档](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-debian/),Ubuntu参考[官方文档](https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/)
- 默认连接本地27017端口
---
@@ -76,15 +76,14 @@ pip install -r requirements.txt
- 参考[NapCat官方文档](https://www.napcat.wiki/guide/boot/Shell#napcat-installer-linux%E4%B8%80%E9%94%AE%E4%BD%BF%E7%94%A8%E8%84%9A%E6%9C%AC-%E6%94%AF%E6%8C%81ubuntu-20-debian-10-centos9)安装
-- 使用QQ小号登录,添加反向WS地址:
-`ws://127.0.0.1:8080/onebot/v11/ws`
+- 使用QQ小号登录,添加反向WS地址: `ws://127.0.0.1:8080/onebot/v11/ws`
---
## 配置文件设置
### 5️⃣ **配置文件设置,让麦麦Bot正常工作**
-- 修改环境配置文件:`.env.prod`
-- 修改机器人配置文件:`bot_config.toml`
+- 修改环境配置文件: `.env.prod`
+- 修改机器人配置文件: `bot_config.toml`
---
@@ -107,9 +106,9 @@ python3 bot.py
---
## 常见问题
-🔧 权限问题:在命令前加`sudo`
-🔌 端口占用:使用`sudo lsof -i :8080`查看端口占用
-🛡️ 防火墙:确保8080/27017端口开放
+🔧 权限问题: 在命令前加 `sudo`
+🔌 端口占用: 使用 `sudo lsof -i :8080` 查看端口占用
+🛡️ 防火墙: 确保8080/27017端口开放
```bash
sudo ufw allow 8080/tcp
sudo ufw allow 27017/tcp
diff --git a/docs/manual_deploy_windows.md b/docs/manual_deploy_windows.md
index 86238bcd4..eebdc4f41 100644
--- a/docs/manual_deploy_windows.md
+++ b/docs/manual_deploy_windows.md
@@ -30,7 +30,7 @@
在创建虚拟环境之前,请确保你的电脑上安装了Python 3.9及以上版本。如果没有,可以按以下步骤安装:
-1. 访问Python官网下载页面:https://www.python.org/downloads/release/python-3913/
+1. 访问Python官网下载页面: https://www.python.org/downloads/release/python-3913/
2. 下载Windows安装程序 (64-bit): `python-3.9.13-amd64.exe`
3. 运行安装程序,并确保勾选"Add Python 3.9 to PATH"选项
4. 点击"Install Now"开始安装
@@ -79,11 +79,11 @@ pip install -r requirements.txt
### 3️⃣ **配置NapCat,让麦麦bot与qq取得联系**
- 安装并登录NapCat(用你的qq小号)
-- 添加反向WS:`ws://127.0.0.1:8080/onebot/v11/ws`
+- 添加反向WS: `ws://127.0.0.1:8080/onebot/v11/ws`
### 4️⃣ **配置文件设置,让麦麦Bot正常工作**
-- 修改环境配置文件:`.env.prod`
-- 修改机器人配置文件:`bot_config.toml`
+- 修改环境配置文件: `.env.prod`
+- 修改机器人配置文件: `bot_config.toml`
### 5️⃣ **启动麦麦机器人**
- 打开命令行,cd到对应路径
diff --git a/flake.nix b/flake.nix
index 54737d640..3586857f0 100644
--- a/flake.nix
+++ b/flake.nix
@@ -22,6 +22,7 @@
pythonEnv = pkgs.python3.withPackages (
ps: with ps; [
+ ruff
pymongo
python-dotenv
pydantic
diff --git a/pyproject.toml b/pyproject.toml
index e54dcdacd..0a4805744 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,23 +1,51 @@
[project]
-name = "Megbot"
+name = "MaiMaiBot"
version = "0.1.0"
-description = "New Bot Project"
+description = "MaiMaiBot"
[tool.nonebot]
plugins = ["src.plugins.chat"]
-plugin_dirs = ["src/plugins"]
+plugin_dirs = ["src/plugins"]
[tool.ruff]
-# 设置 Python 版本
-target-version = "py39"
+
+include = ["*.py"]
+
+# 行长度设置
+line-length = 120
+
+[tool.ruff.lint]
+fixable = ["ALL"]
+unfixable = []
+
+# 如果一个变量的名称以下划线开头,即使它未被使用,也不应该被视为错误或警告。
+dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
# 启用的规则
select = [
- "E", # pycodestyle 错误
- "F", # pyflakes
- "I", # isort
- "B", # flake8-bugbear
+ "E", # pycodestyle 错误
+ "F", # pyflakes
+ "B", # flake8-bugbear
]
-# 行长度设置
-line-length = 88
\ No newline at end of file
+ignore = ["E711"]
+
+[tool.ruff.format]
+docstring-code-format = true
+indent-style = "space"
+
+
+# 使用双引号表示字符串
+quote-style = "double"
+
+# 尊重魔法尾随逗号
+# 例如:
+# items = [
+# "apple",
+# "banana",
+# "cherry",
+# ]
+skip-magic-trailing-comma = false
+
+# 自动检测合适的换行符
+line-ending = "auto"
diff --git a/requirements.txt b/requirements.txt
index 4f969682f..0acaade5e 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/run.bat b/run.bat
index 659a7545a..91904bc34 100644
--- a/run.bat
+++ b/run.bat
@@ -3,7 +3,7 @@ chcp 65001
if not exist "venv" (
python -m venv venv
call venv\Scripts\activate.bat
- pip install -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple --upgrade -r requirements.txt
+ pip install -i https://mirrors.aliyun.com/pypi/simple --upgrade -r requirements.txt
) else (
call venv\Scripts\activate.bat
)
diff --git a/run.py b/run.py
index baea4d13c..50e312c37 100644
--- a/run.py
+++ b/run.py
@@ -107,6 +107,8 @@ def install_napcat():
napcat_filename = input(
"下载完成后请把文件复制到此文件夹,并将**不包含后缀的文件名**输入至此窗口,如 NapCat.32793.Shell:"
)
+ if(napcat_filename[-4:] == ".zip"):
+ napcat_filename = napcat_filename[:-4]
extract_files(napcat_filename + ".zip", "napcat")
print("NapCat 安装完成")
os.remove(napcat_filename + ".zip")
diff --git a/src/gui/reasoning_gui.py b/src/gui/reasoning_gui.py
index 514a95dfb..5768ddc09 100644
--- a/src/gui/reasoning_gui.py
+++ b/src/gui/reasoning_gui.py
@@ -87,7 +87,7 @@ class ReasoningGUI:
self.db = Database.get_instance().db
logger.success("数据库初始化成功")
except Exception:
- logger.exception(f"数据库初始化失败")
+ logger.exception("数据库初始化失败")
sys.exit(1)
# 存储群组数据
@@ -342,7 +342,7 @@ class ReasoningGUI:
'group_id': self.selected_group_id
})
except Exception:
- logger.exception(f"自动更新出错")
+ logger.exception("自动更新出错")
# 每5秒更新一次
time.sleep(5)
diff --git a/src/plugins/chat/__init__.py b/src/plugins/chat/__init__.py
index bd71be019..c730466b3 100644
--- a/src/plugins/chat/__init__.py
+++ b/src/plugins/chat/__init__.py
@@ -121,9 +121,9 @@ async def build_memory_task():
@scheduler.scheduled_job("interval", seconds=global_config.forget_memory_interval, id="forget_memory")
async def forget_memory_task():
"""每30秒执行一次记忆构建"""
- # print("\033[1;32m[记忆遗忘]\033[0m 开始遗忘记忆...")
- # await hippocampus.operation_forget_topic(percentage=0.1)
- # print("\033[1;32m[记忆遗忘]\033[0m 记忆遗忘完成")
+ print("\033[1;32m[记忆遗忘]\033[0m 开始遗忘记忆...")
+ await hippocampus.operation_forget_topic(percentage=0.1)
+ print("\033[1;32m[记忆遗忘]\033[0m 记忆遗忘完成")
@scheduler.scheduled_job("interval", seconds=global_config.build_memory_interval + 10, id="merge_memory")
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 86d0b6944..490b171b5 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -138,7 +138,7 @@ class ChatBot:
# 如果找不到思考消息,直接返回
if not thinking_message:
- logger.warning(f"未找到对应的思考消息,可能已超时被移除")
+ logger.warning("未找到对应的思考消息,可能已超时被移除")
return
# 记录开始思考的时间,避免从思考到回复的时间太久
@@ -187,7 +187,7 @@ class ChatBot:
# 检查是否 <没有找到> emoji
if emoji_raw != None:
- emoji_path, discription = emoji_raw
+ emoji_path, description = emoji_raw
emoji_cq = CQCode.create_emoji_cq(emoji_path)
@@ -203,7 +203,7 @@ class ChatBot:
raw_message=emoji_cq,
plain_text=emoji_cq,
processed_plain_text=emoji_cq,
- detailed_plain_text=discription,
+ detailed_plain_text=description,
user_nickname=global_config.BOT_NICKNAME,
group_name=message.group_name,
time=bot_response_time,
diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py
index 888e33a7f..547c44a82 100644
--- a/src/plugins/chat/config.py
+++ b/src/plugins/chat/config.py
@@ -1,6 +1,6 @@
import os
from dataclasses import dataclass, field
-from typing import Dict, Optional
+from typing import Dict, List, Optional
import tomli
from loguru import logger
@@ -12,10 +12,12 @@ from packaging.specifiers import SpecifierSet, InvalidSpecifier
@dataclass
class BotConfig:
"""机器人配置类"""
+
INNER_VERSION: Version = None
BOT_QQ: Optional[int] = 1
BOT_NICKNAME: Optional[str] = None
+ BOT_ALIAS_NAMES: List[str] = field(default_factory=list) # 别名,可以通过这个叫它
# 消息处理相关配置
MIN_TEXT_LENGTH: int = 2 # 最小处理文本长度
@@ -82,23 +84,25 @@ class BotConfig:
PROMPT_PERSONALITY = [
"曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧",
"是一个女大学生,你有黑色头发,你会刷小红书",
- "是一个女大学生,你会刷b站,对ACG文化感兴趣"
+ "是一个女大学生,你会刷b站,对ACG文化感兴趣",
]
- PROMPT_SCHEDULE_GEN="一个曾经学习地质,现在学习心理学和脑科学的女大学生,喜欢刷qq,贴吧,知乎和小红书"
-
- PERSONALITY_1: float = 0.6 # 第一种人格概率
- PERSONALITY_2: float = 0.3 # 第二种人格概率
- PERSONALITY_3: float = 0.1 # 第三种人格概率
-
- memory_ban_words: list = field(default_factory=lambda: ['表情包', '图片', '回复', '聊天记录']) # 添加新的配置项默认值
-
+ PROMPT_SCHEDULE_GEN = "一个曾经学习地质,现在学习心理学和脑科学的女大学生,喜欢刷qq,贴吧,知乎和小红书"
+
+ PERSONALITY_1: float = 0.6 # 第一种人格概率
+ PERSONALITY_2: float = 0.3 # 第二种人格概率
+ PERSONALITY_3: float = 0.1 # 第三种人格概率
+
+ memory_ban_words: list = field(
+ default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"]
+ ) # 添加新的配置项默认值
+
@staticmethod
def get_config_dir() -> str:
"""获取配置文件目录"""
current_dir = os.path.dirname(os.path.abspath(__file__))
- root_dir = os.path.abspath(os.path.join(current_dir, '..', '..', '..'))
- config_dir = os.path.join(root_dir, 'config')
+ root_dir = os.path.abspath(os.path.join(current_dir, "..", "..", ".."))
+ config_dir = os.path.join(root_dir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return config_dir
@@ -109,35 +113,32 @@ class BotConfig:
Args:
value[str]: 版本表达式(字符串)
Returns:
- SpecifierSet
+ SpecifierSet
"""
try:
converted = SpecifierSet(value)
- except InvalidSpecifier as e:
- logger.error(
- f"{value} 分类使用了错误的版本约束表达式\n",
- "请阅读 https://semver.org/lang/zh-CN/ 修改代码"
- )
+ except InvalidSpecifier:
+ logger.error(f"{value} 分类使用了错误的版本约束表达式\n", "请阅读 https://semver.org/lang/zh-CN/ 修改代码")
exit(1)
return converted
@classmethod
def get_config_version(cls, toml: dict) -> Version:
- """提取配置文件的 SpecifierSet 版本数据
+ """提取配置文件的 SpecifierSet 版本数据
Args:
toml[dict]: 输入的配置文件字典
Returns:
- Version
+ Version
"""
- if 'inner' in toml:
+ if "inner" in toml:
try:
config_version: str = toml["inner"]["version"]
except KeyError as e:
- logger.error(f"配置文件中 inner 段 不存在, 这是错误的配置文件")
- raise KeyError(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件")
+ logger.error("配置文件中 inner 段 不存在, 这是错误的配置文件")
+ raise KeyError(f"配置文件中 inner 段 不存在 {e}, 这是错误的配置文件") from e
else:
toml["inner"] = {"version": "0.0.0"}
config_version = toml["inner"]["version"]
@@ -150,7 +151,7 @@ class BotConfig:
"请阅读 https://semver.org/lang/zh-CN/ 修改配置,并参考本项目指定的模板进行修改\n"
"本项目在不同的版本下有不同的模板,请注意识别"
)
- raise InvalidVersion("配置文件中 inner段 的 version 键是错误的版本描述\n")
+ raise InvalidVersion("配置文件中 inner段 的 version 键是错误的版本描述\n") from e
return ver
@@ -160,26 +161,26 @@ class BotConfig:
config = cls()
def personality(parent: dict):
- personality_config = parent['personality']
- personality = personality_config.get('prompt_personality')
+ personality_config = parent["personality"]
+ personality = personality_config.get("prompt_personality")
if len(personality) >= 2:
logger.debug(f"载入自定义人格:{personality}")
- config.PROMPT_PERSONALITY = personality_config.get('prompt_personality', config.PROMPT_PERSONALITY)
+ config.PROMPT_PERSONALITY = personality_config.get("prompt_personality", config.PROMPT_PERSONALITY)
logger.info(f"载入自定义日程prompt:{personality_config.get('prompt_schedule', config.PROMPT_SCHEDULE_GEN)}")
- config.PROMPT_SCHEDULE_GEN = personality_config.get('prompt_schedule', config.PROMPT_SCHEDULE_GEN)
+ config.PROMPT_SCHEDULE_GEN = personality_config.get("prompt_schedule", config.PROMPT_SCHEDULE_GEN)
if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
- config.PERSONALITY_1 = personality_config.get('personality_1_probability', config.PERSONALITY_1)
- config.PERSONALITY_2 = personality_config.get('personality_2_probability', config.PERSONALITY_2)
- config.PERSONALITY_3 = personality_config.get('personality_3_probability', config.PERSONALITY_3)
+ config.PERSONALITY_1 = personality_config.get("personality_1_probability", config.PERSONALITY_1)
+ config.PERSONALITY_2 = personality_config.get("personality_2_probability", config.PERSONALITY_2)
+ config.PERSONALITY_3 = personality_config.get("personality_3_probability", config.PERSONALITY_3)
def emoji(parent: dict):
emoji_config = parent["emoji"]
config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
config.EMOJI_REGISTER_INTERVAL = emoji_config.get("register_interval", config.EMOJI_REGISTER_INTERVAL)
- config.EMOJI_CHECK_PROMPT = emoji_config.get('check_prompt', config.EMOJI_CHECK_PROMPT)
- config.EMOJI_SAVE = emoji_config.get('auto_save', config.EMOJI_SAVE)
- config.EMOJI_CHECK = emoji_config.get('enable_check', config.EMOJI_CHECK)
+ config.EMOJI_CHECK_PROMPT = emoji_config.get("check_prompt", config.EMOJI_CHECK_PROMPT)
+ config.EMOJI_SAVE = emoji_config.get("auto_save", config.EMOJI_SAVE)
+ config.EMOJI_CHECK = emoji_config.get("enable_check", config.EMOJI_CHECK)
def cq_code(parent: dict):
cq_code_config = parent["cq_code"]
@@ -192,12 +193,16 @@ class BotConfig:
config.BOT_QQ = int(bot_qq)
config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
+ if config.INNER_VERSION in SpecifierSet(">=0.0.5"):
+ config.BOT_ALIAS_NAMES = bot_config.get("alias_names", config.BOT_ALIAS_NAMES)
+
def response(parent: dict):
response_config = parent["response"]
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
- config.MODEL_R1_DISTILL_PROBABILITY = response_config.get("model_r1_distill_probability",
- config.MODEL_R1_DISTILL_PROBABILITY)
+ config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
+ "model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
+ )
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
def model(parent: dict):
@@ -214,7 +219,7 @@ class BotConfig:
"llm_emotion_judge",
"vlm",
"embedding",
- "moderation"
+ "moderation",
]
for item in config_list:
@@ -223,13 +228,7 @@ class BotConfig:
# base_url 的例子: SILICONFLOW_BASE_URL
# key 的例子: SILICONFLOW_KEY
- cfg_target = {
- "name": "",
- "base_url": "",
- "key": "",
- "pri_in": 0,
- "pri_out": 0
- }
+ cfg_target = {"name": "", "base_url": "", "key": "", "pri_in": 0, "pri_out": 0}
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
cfg_target = cfg_item
@@ -248,7 +247,7 @@ class BotConfig:
cfg_target[i] = cfg_item[i]
except KeyError as e:
logger.error(f"{item} 中的必要字段不存在,请检查")
- raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查")
+ raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
provider = cfg_item.get("provider")
if provider is None:
@@ -273,10 +272,12 @@ class BotConfig:
if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
- config.response_willing_amplifier = msg_config.get("response_willing_amplifier",
- config.response_willing_amplifier)
- config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier",
- config.response_interested_rate_amplifier)
+ config.response_willing_amplifier = msg_config.get(
+ "response_willing_amplifier", config.response_willing_amplifier
+ )
+ config.response_interested_rate_amplifier = msg_config.get(
+ "response_interested_rate_amplifier", config.response_interested_rate_amplifier
+ )
config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
if config.INNER_VERSION in SpecifierSet(">=0.0.5"):
@@ -286,7 +287,7 @@ class BotConfig:
memory_config = parent["memory"]
config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
config.forget_memory_interval = memory_config.get("forget_memory_interval", config.forget_memory_interval)
-
+
# 在版本 >= 0.0.4 时才处理新增的配置项
if config.INNER_VERSION in SpecifierSet(">=0.0.4"):
config.memory_ban_words = set(memory_config.get("memory_ban_words", []))
@@ -307,10 +308,12 @@ class BotConfig:
config.chinese_typo_enable = chinese_typo_config.get("enable", config.chinese_typo_enable)
config.chinese_typo_error_rate = chinese_typo_config.get("error_rate", config.chinese_typo_error_rate)
config.chinese_typo_min_freq = chinese_typo_config.get("min_freq", config.chinese_typo_min_freq)
- config.chinese_typo_tone_error_rate = chinese_typo_config.get("tone_error_rate",
- config.chinese_typo_tone_error_rate)
- config.chinese_typo_word_replace_rate = chinese_typo_config.get("word_replace_rate",
- config.chinese_typo_word_replace_rate)
+ config.chinese_typo_tone_error_rate = chinese_typo_config.get(
+ "tone_error_rate", config.chinese_typo_tone_error_rate
+ )
+ config.chinese_typo_word_replace_rate = chinese_typo_config.get(
+ "word_replace_rate", config.chinese_typo_word_replace_rate
+ )
def groups(parent: dict):
groups_config = parent["groups"]
@@ -329,61 +332,19 @@ class BotConfig:
# 例如:"notice": "personality 将在 1.3.2 后被移除",那么在有效版本中的用户就会虽然可以
# 正常执行程序,但是会看到这条自定义提示
include_configs = {
- "personality": {
- "func": personality,
- "support": ">=0.0.0"
- },
- "emoji": {
- "func": emoji,
- "support": ">=0.0.0"
- },
- "cq_code": {
- "func": cq_code,
- "support": ">=0.0.0"
- },
- "bot": {
- "func": bot,
- "support": ">=0.0.0"
- },
- "response": {
- "func": response,
- "support": ">=0.0.0"
- },
- "model": {
- "func": model,
- "support": ">=0.0.0"
- },
- "message": {
- "func": message,
- "support": ">=0.0.0"
- },
- "memory": {
- "func": memory,
- "support": ">=0.0.0",
- "necessary": False
- },
- "mood": {
- "func": mood,
- "support": ">=0.0.0"
- },
- "keywords_reaction": {
- "func": keywords_reaction,
- "support": ">=0.0.2",
- "necessary": False
- },
- "chinese_typo": {
- "func": chinese_typo,
- "support": ">=0.0.3",
- "necessary": False
- },
- "groups": {
- "func": groups,
- "support": ">=0.0.0"
- },
- "others": {
- "func": others,
- "support": ">=0.0.0"
- }
+ "personality": {"func": personality, "support": ">=0.0.0"},
+ "emoji": {"func": emoji, "support": ">=0.0.0"},
+ "cq_code": {"func": cq_code, "support": ">=0.0.0"},
+ "bot": {"func": bot, "support": ">=0.0.0"},
+ "response": {"func": response, "support": ">=0.0.0"},
+ "model": {"func": model, "support": ">=0.0.0"},
+ "message": {"func": message, "support": ">=0.0.0"},
+ "memory": {"func": memory, "support": ">=0.0.0", "necessary": False},
+ "mood": {"func": mood, "support": ">=0.0.0"},
+ "keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False},
+ "chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False},
+ "groups": {"func": groups, "support": ">=0.0.0"},
+ "others": {"func": others, "support": ">=0.0.0"},
}
# 原地修改,将 字符串版本表达式 转换成 版本对象
@@ -395,7 +356,7 @@ class BotConfig:
with open(config_path, "rb") as f:
try:
toml_dict = tomli.load(f)
- except(tomli.TOMLDecodeError) as e:
+ except tomli.TOMLDecodeError as e:
logger.critical(f"配置文件bot_config.toml填写有误,请检查第{e.lineno}行第{e.colno}处:{e.msg}")
exit(1)
@@ -410,7 +371,7 @@ class BotConfig:
# 检查配置文件版本是否在支持范围内
if config.INNER_VERSION in group_specifierset:
# 如果版本在支持范围内,检查是否存在通知
- if 'notice' in include_configs[key]:
+ if "notice" in include_configs[key]:
logger.warning(include_configs[key]["notice"])
include_configs[key]["func"](toml_dict)
@@ -424,7 +385,7 @@ class BotConfig:
raise InvalidVersion(f"当前程序仅支持以下版本范围: {group_specifierset}")
# 如果 necessary 项目存在,而且显式声明是 False,进入特殊处理
- elif "necessary" in include_configs[key] and include_configs[key].get("necessary") == False:
+ elif "necessary" in include_configs[key] and include_configs[key].get("necessary") is False:
# 通过 pass 处理的项虽然直接忽略也是可以的,但是为了不增加理解困难,依然需要在这里显式处理
if key == "keywords_reaction":
pass
diff --git a/src/plugins/chat/cq_code.py b/src/plugins/chat/cq_code.py
index b13e33e48..21541a78b 100644
--- a/src/plugins/chat/cq_code.py
+++ b/src/plugins/chat/cq_code.py
@@ -155,8 +155,8 @@ class CQCode:
logger.error(f"最终请求失败: {str(e)}")
time.sleep(1.5 ** retry) # 指数退避
- except Exception as e:
- logger.exception(f"[未知错误]")
+ except Exception:
+ logger.exception("[未知错误]")
return None
return None
@@ -281,7 +281,7 @@ class CQCode:
logger.debug(f"合并后的转发消息: {combined_messages}")
return f"[转发消息:\n{combined_messages}]"
- except Exception as e:
+ except Exception:
logger.exception("处理转发消息失败")
return '[转发消息]'
diff --git a/src/plugins/chat/emoji_manager.py b/src/plugins/chat/emoji_manager.py
index 973bcad2d..feba23076 100644
--- a/src/plugins/chat/emoji_manager.py
+++ b/src/plugins/chat/emoji_manager.py
@@ -51,8 +51,8 @@ class EmojiManager:
self._initialized = True
# 启动时执行一次完整性检查
self.check_emoji_file_integrity()
- except Exception as e:
- logger.exception(f"初始化表情管理器失败")
+ except Exception:
+ logger.exception("初始化表情管理器失败")
def _ensure_db(self):
"""确保数据库已初始化"""
@@ -87,8 +87,8 @@ class EmojiManager:
{'_id': emoji_id},
{'$inc': {'usage_count': 1}}
)
- except Exception as e:
- logger.exception(f"记录表情使用失败")
+ except Exception:
+ logger.exception("记录表情使用失败")
async def get_emoji_for_text(self, text: str) -> Optional[str]:
"""根据文本内容获取相关表情包
@@ -117,7 +117,7 @@ class EmojiManager:
try:
# 获取所有表情包
- all_emojis = list(self.db.db.emoji.find({}, {'_id': 1, 'path': 1, 'embedding': 1, 'discription': 1}))
+ all_emojis = list(self.db.db.emoji.find({}, {'_id': 1, 'path': 1, 'embedding': 1, 'description': 1}))
if not all_emojis:
logger.warning("数据库中没有任何表情包")
@@ -160,9 +160,9 @@ class EmojiManager:
{'$inc': {'usage_count': 1}}
)
logger.success(
- f"找到匹配的表情包: {selected_emoji.get('discription', '无描述')} (相似度: {similarity:.4f})")
+ f"找到匹配的表情包: {selected_emoji.get('description', '无描述')} (相似度: {similarity:.4f})")
# 稍微改一下文本描述,不然容易产生幻觉,描述已经包含 表情包 了
- return selected_emoji['path'], "[ %s ]" % selected_emoji.get('discription', '无描述')
+ return selected_emoji['path'], "[ %s ]" % selected_emoji.get('description', '无描述')
except Exception as search_error:
logger.error(f"搜索表情包失败: {str(search_error)}")
@@ -174,7 +174,7 @@ class EmojiManager:
logger.error(f"获取表情包失败: {str(e)}")
return None
- async def _get_emoji_discription(self, image_base64: str) -> str:
+ async def _get_emoji_description(self, image_base64: str) -> str:
"""获取表情包的标签"""
try:
prompt = '这是一个表情包,使用中文简洁的描述一下表情包的内容和表情包所表达的情感'
@@ -203,7 +203,7 @@ class EmojiManager:
try:
prompt = f'这是{global_config.BOT_NICKNAME}将要发送的消息内容:\n{text}\n若要为其配上表情包,请你输出这个表情包应该表达怎样的情感,应该给人什么样的感觉,不要太简洁也不要太长,注意不要输出任何对消息内容的分析内容,只输出\"一种什么样的感觉\"中间的形容词部分。'
- content, _ = await self.llm_emotion_judge.generate_response_async(prompt)
+ content, _ = await self.llm_emotion_judge.generate_response_async(prompt,temperature=1.5)
logger.info(f"输出描述: {content}")
return content
@@ -236,36 +236,36 @@ class EmojiManager:
continue
# 获取表情包的描述
- discription = await self._get_emoji_discription(image_base64)
+ description = await self._get_emoji_description(image_base64)
if global_config.EMOJI_CHECK:
check = await self._check_emoji(image_base64)
if '是' not in check:
os.remove(image_path)
- logger.info(f"描述: {discription}")
+ logger.info(f"描述: {description}")
logger.info(f"其不满足过滤规则,被剔除 {check}")
continue
logger.info(f"check通过 {check}")
- if discription is not None:
- embedding = await get_embedding(discription)
+ if description is not None:
+ embedding = await get_embedding(description)
# 准备数据库记录
emoji_record = {
'filename': filename,
'path': image_path,
'embedding': embedding,
- 'discription': discription,
+ 'description': description,
'timestamp': int(time.time())
}
# 保存到数据库
self.db.db['emoji'].insert_one(emoji_record)
logger.success(f"注册新表情包: {filename}")
- logger.info(f"描述: {discription}")
+ logger.info(f"描述: {description}")
else:
logger.warning(f"跳过表情包: {filename}")
- except Exception as e:
- logger.exception(f"扫描表情包失败")
+ except Exception:
+ logger.exception("扫描表情包失败")
async def _periodic_scan(self, interval_MINS: int = 10):
"""定期扫描新表情包"""
diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py
index f40c9a441..4e431d9fd 100644
--- a/src/plugins/chat/llm_generator.py
+++ b/src/plugins/chat/llm_generator.py
@@ -94,7 +94,7 @@ class ResponseGenerator:
try:
content, reasoning_content = await model.generate_response(prompt)
except Exception:
- logger.exception(f"生成回复时出错")
+ logger.exception("生成回复时出错")
return None
# 保存到数据库
@@ -146,7 +146,7 @@ class ResponseGenerator:
return ["neutral"]
except Exception:
- logger.exception(f"获取情感标签时出错")
+ logger.exception("获取情感标签时出错")
return ["neutral"]
async def _process_response(self, content: str) -> Tuple[List[str], List[str]]:
diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py
index 4abbd3b3f..0fb40373e 100644
--- a/src/plugins/chat/message_sender.py
+++ b/src/plugins/chat/message_sender.py
@@ -61,7 +61,7 @@ class Message_Sender:
auto_escape=auto_escape
)
logger.debug(f"发送消息{message}成功")
- except Exception as e:
+ except Exception:
logger.exception(f"发送消息{message}失败")
@@ -120,7 +120,7 @@ class MessageContainer:
return True
return False
except Exception:
- logger.exception(f"移除消息时发生错误")
+ logger.exception("移除消息时发生错误")
return False
def has_messages(self) -> bool:
@@ -214,7 +214,7 @@ class MessageManager:
if not container.remove_message(msg):
logger.warning("尝试删除不存在的消息")
except Exception:
- logger.exception(f"处理超时消息时发生错误")
+ logger.exception("处理超时消息时发生错误")
continue
async def start_processor(self):
diff --git a/src/plugins/chat/prompt_builder.py b/src/plugins/chat/prompt_builder.py
index 4cf21af19..0805caa5a 100644
--- a/src/plugins/chat/prompt_builder.py
+++ b/src/plugins/chat/prompt_builder.py
@@ -131,18 +131,19 @@ class PromptBuilder:
probability_1 = global_config.PERSONALITY_1
probability_2 = global_config.PERSONALITY_2
probability_3 = global_config.PERSONALITY_3
- prompt_personality = ''
+
+ prompt_personality = f'{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},你还有很多别名:{"/".join(global_config.BOT_ALIAS_NAMES)},'
personality_choice = random.random()
if personality_choice < probability_1: # 第一种人格
- prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[0]}, 你正在浏览qq群,{promt_info_prompt},
+ prompt_personality += f'''{personality[0]}, 你正在浏览qq群,{promt_info_prompt},
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{keywords_reaction_prompt}
请注意把握群里的聊天内容,不要刻意突出自身学科背景,不要回复的太有条理,可以有个性。'''
elif personality_choice < probability_1 + probability_2: # 第二种人格
- prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[1]}, 你正在浏览qq群,{promt_info_prompt},
+ prompt_personality += f'''{personality[1]}, 你正在浏览qq群,{promt_info_prompt},
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
请你表达自己的见解和观点。可以有个性。'''
else: # 第三种人格
- prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME},{personality[2]}, 你正在浏览qq群,{promt_info_prompt},
+ prompt_personality += f'''{personality[2]}, 你正在浏览qq群,{promt_info_prompt},
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
请你表达自己的见解和观点。可以有个性。'''
diff --git a/src/plugins/chat/storage.py b/src/plugins/chat/storage.py
index 1c2d05071..4081f8984 100644
--- a/src/plugins/chat/storage.py
+++ b/src/plugins/chat/storage.py
@@ -45,6 +45,6 @@ class MessageStorage:
self.db.db.messages.insert_one(message_data)
except Exception:
- logger.exception(f"存储消息失败")
+ logger.exception("存储消息失败")
# 如果需要其他存储相关的函数,可以在这里添加
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 054526e94..6619f37af 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -53,19 +53,13 @@ def db_message_to_str(message_dict: Dict) -> str:
return result
-def is_mentioned_bot_in_message(message: Message) -> bool:
- """检查消息是否提到了机器人"""
- keywords = [global_config.BOT_NICKNAME]
- for keyword in keywords:
- if keyword in message.processed_plain_text:
- return True
- return False
-
-
def is_mentioned_bot_in_txt(message: str) -> bool:
"""检查消息是否提到了机器人"""
- keywords = [global_config.BOT_NICKNAME]
- for keyword in keywords:
+ if global_config.BOT_NICKNAME is None:
+ return True
+ if global_config.BOT_NICKNAME in message:
+ return True
+ for keyword in global_config.BOT_ALIAS_NAMES:
if keyword in message:
return True
return False
diff --git a/src/plugins/knowledege/knowledge_library.py b/src/plugins/knowledege/knowledge_library.py
index 481076961..4bf6227bb 100644
--- a/src/plugins/knowledege/knowledge_library.py
+++ b/src/plugins/knowledege/knowledge_library.py
@@ -79,7 +79,7 @@ class KnowledgeLibrary:
content = f.read()
# 按1024字符分段
- segments = [content[i:i+600] for i in range(0, len(content), 600)]
+ segments = [content[i:i+600] for i in range(0, len(content), 300)]
# 处理每个分段
for segment in segments:
diff --git a/src/plugins/memory_system/memory.py b/src/plugins/memory_system/memory.py
index 9b325b36d..0730f9e57 100644
--- a/src/plugins/memory_system/memory.py
+++ b/src/plugins/memory_system/memory.py
@@ -25,26 +25,46 @@ class Memory_graph:
self.db = Database.get_instance()
def connect_dot(self, concept1, concept2):
- # 如果边已存在,增加 strength
+ # 避免自连接
+ if concept1 == concept2:
+ return
+
+ current_time = datetime.datetime.now().timestamp()
+
+ # 如果边已存在,增加 strength
if self.G.has_edge(concept1, concept2):
self.G[concept1][concept2]['strength'] = self.G[concept1][concept2].get('strength', 1) + 1
+ # 更新最后修改时间
+ self.G[concept1][concept2]['last_modified'] = current_time
else:
- # 如果是新边,初始化 strength 为 1
- self.G.add_edge(concept1, concept2, strength=1)
+ # 如果是新边,初始化 strength 为 1
+ self.G.add_edge(concept1, concept2,
+ strength=1,
+ created_time=current_time, # 添加创建时间
+ last_modified=current_time) # 添加最后修改时间
def add_dot(self, concept, memory):
+ current_time = datetime.datetime.now().timestamp()
+
if concept in self.G:
- # 如果节点已存在,将新记忆添加到现有列表中
if 'memory_items' in self.G.nodes[concept]:
if not isinstance(self.G.nodes[concept]['memory_items'], list):
- # 如果当前不是列表,将其转换为列表
self.G.nodes[concept]['memory_items'] = [self.G.nodes[concept]['memory_items']]
self.G.nodes[concept]['memory_items'].append(memory)
+ # 更新最后修改时间
+ self.G.nodes[concept]['last_modified'] = current_time
else:
self.G.nodes[concept]['memory_items'] = [memory]
+ # 如果节点存在但没有memory_items,说明是第一次添加memory,设置created_time
+ if 'created_time' not in self.G.nodes[concept]:
+ self.G.nodes[concept]['created_time'] = current_time
+ self.G.nodes[concept]['last_modified'] = current_time
else:
- # 如果是新节点,创建新的记忆列表
- self.G.add_node(concept, memory_items=[memory])
+ # 如果是新节点,创建新的记忆列表
+ self.G.add_node(concept,
+ memory_items=[memory],
+ created_time=current_time, # 添加创建时间
+ last_modified=current_time) # 添加最后修改时间
def get_dot(self, concept):
# 检查节点是否存在于图中
@@ -191,15 +211,11 @@ class Hippocampus:
async def memory_compress(self, messages: list, compress_rate=0.1):
"""压缩消息记录为记忆
- Args:
- messages: 消息记录字典列表,每个字典包含text和time字段
- compress_rate: 压缩率
-
Returns:
- set: (话题, 记忆) 元组集合
+ tuple: (压缩记忆集合, 相似主题字典)
"""
if not messages:
- return set()
+ return set(), {}
# 合并消息文本,同时保留时间信息
input_text = ""
@@ -246,12 +262,33 @@ class Hippocampus:
# 等待所有任务完成
compressed_memory = set()
+ similar_topics_dict = {} # 存储每个话题的相似主题列表
for topic, task in tasks:
response = await task
if response:
compressed_memory.add((topic, response[0]))
+ # 为每个话题查找相似的已存在主题
+ existing_topics = list(self.memory_graph.G.nodes())
+ similar_topics = []
+
+ for existing_topic in existing_topics:
+ topic_words = set(jieba.cut(topic))
+ existing_words = set(jieba.cut(existing_topic))
+
+ all_words = topic_words | existing_words
+ v1 = [1 if word in topic_words else 0 for word in all_words]
+ v2 = [1 if word in existing_words else 0 for word in all_words]
+
+ similarity = cosine_similarity(v1, v2)
+
+ if similarity >= 0.6:
+ similar_topics.append((existing_topic, similarity))
+
+ similar_topics.sort(key=lambda x: x[1], reverse=True)
+ similar_topics = similar_topics[:5]
+ similar_topics_dict[topic] = similar_topics
- return compressed_memory
+ return compressed_memory, similar_topics_dict
def calculate_topic_num(self, text, compress_rate):
"""计算文本的话题数量"""
@@ -265,33 +302,40 @@ class Hippocampus:
return topic_num
async def operation_build_memory(self, chat_size=20):
- # 最近消息获取频率
- time_frequency = {'near': 2, 'mid': 4, 'far': 2}
- memory_sample = self.get_memory_sample(chat_size, time_frequency)
-
- for i, input_text in enumerate(memory_sample, 1):
- # 加载进度可视化
+ time_frequency = {'near': 3, 'mid': 8, 'far': 5}
+ memory_samples = self.get_memory_sample(chat_size, time_frequency)
+
+ for i, messages in enumerate(memory_samples, 1):
all_topics = []
- progress = (i / len(memory_sample)) * 100
+ # 加载进度可视化
+ progress = (i / len(memory_samples)) * 100
bar_length = 30
- filled_length = int(bar_length * i // len(memory_sample))
+ filled_length = int(bar_length * i // len(memory_samples))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
- logger.debug(f"进度: [{bar}] {progress:.1f}% ({i}/{len(memory_sample)})")
+ logger.debug(f"进度: [{bar}] {progress:.1f}% ({i}/{len(memory_samples)})")
- # 生成压缩后记忆 ,表现为 (话题,记忆) 的元组
- compressed_memory = set()
compress_rate = 0.1
- compressed_memory = await self.memory_compress(input_text, compress_rate)
- logger.info(f"压缩后记忆数量: {len(compressed_memory)}")
-
- # 将记忆加入到图谱中
+ compressed_memory, similar_topics_dict = await self.memory_compress(messages, compress_rate)
+ logger.info(f"压缩后记忆数量: {len(compressed_memory)},似曾相识的话题: {len(similar_topics_dict)}")
+
for topic, memory in compressed_memory:
logger.info(f"添加节点: {topic}")
self.memory_graph.add_dot(topic, memory)
- all_topics.append(topic) # 收集所有话题
+ all_topics.append(topic)
+
+ # 连接相似的已存在主题
+ if topic in similar_topics_dict:
+ similar_topics = similar_topics_dict[topic]
+ for similar_topic, similarity in similar_topics:
+ if topic != similar_topic:
+ strength = int(similarity * 10)
+ logger.info(f"连接相似节点: {topic} 和 {similar_topic} (强度: {strength})")
+ self.memory_graph.G.add_edge(topic, similar_topic, strength=strength)
+
+ # 连接同批次的相关话题
for i in range(len(all_topics)):
for j in range(i + 1, len(all_topics)):
- logger.info(f"连接节点: {all_topics[i]} 和 {all_topics[j]}")
+ logger.info(f"连接同批次节点: {all_topics[i]} 和 {all_topics[j]}")
self.memory_graph.connect_dot(all_topics[i], all_topics[j])
self.sync_memory_to_db()
@@ -302,7 +346,7 @@ class Hippocampus:
db_nodes = list(self.memory_graph.db.db.graph_data.nodes.find())
memory_nodes = list(self.memory_graph.G.nodes(data=True))
- # 转换数据库节点为字典格式,方便查找
+ # 转换数据库节点为字典格式,方便查找
db_nodes_dict = {node['concept']: node for node in db_nodes}
# 检查并更新节点
@@ -313,13 +357,19 @@ class Hippocampus:
# 计算内存中节点的特征值
memory_hash = self.calculate_node_hash(concept, memory_items)
+
+ # 获取时间信息
+ created_time = data.get('created_time', datetime.datetime.now().timestamp())
+ last_modified = data.get('last_modified', datetime.datetime.now().timestamp())
if concept not in db_nodes_dict:
- # 数据库中缺少的节点,添加
+ # 数据库中缺少的节点,添加
node_data = {
'concept': concept,
'memory_items': memory_items,
- 'hash': memory_hash
+ 'hash': memory_hash,
+ 'created_time': created_time,
+ 'last_modified': last_modified
}
self.memory_graph.db.db.graph_data.nodes.insert_one(node_data)
else:
@@ -327,25 +377,21 @@ class Hippocampus:
db_node = db_nodes_dict[concept]
db_hash = db_node.get('hash', None)
- # 如果特征值不同,则更新节点
+ # 如果特征值不同,则更新节点
if db_hash != memory_hash:
self.memory_graph.db.db.graph_data.nodes.update_one(
{'concept': concept},
{'$set': {
'memory_items': memory_items,
- 'hash': memory_hash
+ 'hash': memory_hash,
+ 'created_time': created_time,
+ 'last_modified': last_modified
}}
)
- # 检查并删除数据库中多余的节点
- memory_concepts = set(node[0] for node in memory_nodes)
- for db_node in db_nodes:
- if db_node['concept'] not in memory_concepts:
- self.memory_graph.db.db.graph_data.nodes.delete_one({'concept': db_node['concept']})
-
# 处理边的信息
db_edges = list(self.memory_graph.db.db.graph_data.edges.find())
- memory_edges = list(self.memory_graph.G.edges())
+ memory_edges = list(self.memory_graph.G.edges(data=True))
# 创建边的哈希值字典
db_edge_dict = {}
@@ -357,10 +403,14 @@ class Hippocampus:
}
# 检查并更新边
- for source, target in memory_edges:
+ for source, target, data in memory_edges:
edge_hash = self.calculate_edge_hash(source, target)
edge_key = (source, target)
- strength = self.memory_graph.G[source][target].get('strength', 1)
+ strength = data.get('strength', 1)
+
+ # 获取边的时间信息
+ created_time = data.get('created_time', datetime.datetime.now().timestamp())
+ last_modified = data.get('last_modified', datetime.datetime.now().timestamp())
if edge_key not in db_edge_dict:
# 添加新边
@@ -368,7 +418,9 @@ class Hippocampus:
'source': source,
'target': target,
'strength': strength,
- 'hash': edge_hash
+ 'hash': edge_hash,
+ 'created_time': created_time,
+ 'last_modified': last_modified
}
self.memory_graph.db.db.graph_data.edges.insert_one(edge_data)
else:
@@ -378,20 +430,12 @@ class Hippocampus:
{'source': source, 'target': target},
{'$set': {
'hash': edge_hash,
- 'strength': strength
+ 'strength': strength,
+ 'created_time': created_time,
+ 'last_modified': last_modified
}}
)
- # 删除多余的边
- memory_edge_set = set(memory_edges)
- for edge_key in db_edge_dict:
- if edge_key not in memory_edge_set:
- source, target = edge_key
- self.memory_graph.db.db.graph_data.edges.delete_one({
- 'source': source,
- 'target': target
- })
-
def sync_memory_from_db(self):
"""从数据库同步数据到内存中的图结构"""
# 清空当前图
@@ -405,61 +449,107 @@ class Hippocampus:
# 确保memory_items是列表
if not isinstance(memory_items, list):
memory_items = [memory_items] if memory_items else []
+
+ # 获取时间信息
+ created_time = node.get('created_time', datetime.datetime.now().timestamp())
+ last_modified = node.get('last_modified', datetime.datetime.now().timestamp())
+
# 添加节点到图中
- self.memory_graph.G.add_node(concept, memory_items=memory_items)
+ self.memory_graph.G.add_node(concept,
+ memory_items=memory_items,
+ created_time=created_time,
+ last_modified=last_modified)
# 从数据库加载所有边
edges = self.memory_graph.db.db.graph_data.edges.find()
for edge in edges:
source = edge['source']
target = edge['target']
- strength = edge.get('strength', 1) # 获取 strength,默认为 1
+ strength = edge.get('strength', 1) # 获取 strength,默认为 1
+
+ # 获取时间信息
+ created_time = edge.get('created_time', datetime.datetime.now().timestamp())
+ last_modified = edge.get('last_modified', datetime.datetime.now().timestamp())
+
# 只有当源节点和目标节点都存在时才添加边
if source in self.memory_graph.G and target in self.memory_graph.G:
- self.memory_graph.G.add_edge(source, target, strength=strength)
+ self.memory_graph.G.add_edge(source, target,
+ strength=strength,
+ created_time=created_time,
+ last_modified=last_modified)
async def operation_forget_topic(self, percentage=0.1):
- """随机选择图中一定比例的节点进行检查,根据条件决定是否遗忘"""
- # 获取所有节点
+ """随机选择图中一定比例的节点和边进行检查,根据时间条件决定是否遗忘"""
all_nodes = list(self.memory_graph.G.nodes())
- # 计算要检查的节点数量
- check_count = max(1, int(len(all_nodes) * percentage))
- # 随机选择节点
- nodes_to_check = random.sample(all_nodes, check_count)
-
- forgotten_nodes = []
+ all_edges = list(self.memory_graph.G.edges())
+
+ check_nodes_count = max(1, int(len(all_nodes) * percentage))
+ check_edges_count = max(1, int(len(all_edges) * percentage))
+
+ nodes_to_check = random.sample(all_nodes, check_nodes_count)
+ edges_to_check = random.sample(all_edges, check_edges_count)
+
+ edge_changes = {'weakened': 0, 'removed': 0}
+ node_changes = {'reduced': 0, 'removed': 0}
+
+ current_time = datetime.datetime.now().timestamp()
+
+ # 检查并遗忘连接
+ logger.info("开始检查连接...")
+ for source, target in edges_to_check:
+ edge_data = self.memory_graph.G[source][target]
+ last_modified = edge_data.get('last_modified')
+ # print(source,target)
+ # print(f"float(last_modified):{float(last_modified)}" )
+ # print(f"current_time:{current_time}")
+ # print(f"current_time - last_modified:{current_time - last_modified}")
+ if current_time - last_modified > 3600*24: # test
+ current_strength = edge_data.get('strength', 1)
+ new_strength = current_strength - 1
+
+ if new_strength <= 0:
+ self.memory_graph.G.remove_edge(source, target)
+ edge_changes['removed'] += 1
+ logger.info(f"\033[1;31m[连接移除]\033[0m {source} - {target}")
+ else:
+ edge_data['strength'] = new_strength
+ edge_data['last_modified'] = current_time
+ edge_changes['weakened'] += 1
+ logger.info(f"\033[1;34m[连接减弱]\033[0m {source} - {target} (强度: {current_strength} -> {new_strength})")
+
+ # 检查并遗忘话题
+ logger.info("开始检查节点...")
for node in nodes_to_check:
- # 获取节点的连接数
- connections = self.memory_graph.G.degree(node)
-
- # 获取节点的内容条数
- memory_items = self.memory_graph.G.nodes[node].get('memory_items', [])
- if not isinstance(memory_items, list):
- memory_items = [memory_items] if memory_items else []
- content_count = len(memory_items)
-
- # 检查连接强度
- weak_connections = True
- if connections > 1: # 只有当连接数大于1时才检查强度
- for neighbor in self.memory_graph.G.neighbors(node):
- strength = self.memory_graph.G[node][neighbor].get('strength', 1)
- if strength > 2:
- weak_connections = False
- break
-
- # 如果满足遗忘条件
- if (connections <= 1 and weak_connections) or content_count <= 2:
- removed_item = self.memory_graph.forget_topic(node)
- if removed_item:
- forgotten_nodes.append((node, removed_item))
- logger.debug(f"遗忘节点 {node} 的记忆: {removed_item}")
-
- # 同步到数据库
- if forgotten_nodes:
+ node_data = self.memory_graph.G.nodes[node]
+ last_modified = node_data.get('last_modified', current_time)
+
+ if current_time - last_modified > 3600*24: # test
+ memory_items = node_data.get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ if memory_items:
+ current_count = len(memory_items)
+ removed_item = random.choice(memory_items)
+ memory_items.remove(removed_item)
+
+ if memory_items:
+ self.memory_graph.G.nodes[node]['memory_items'] = memory_items
+ self.memory_graph.G.nodes[node]['last_modified'] = current_time
+ node_changes['reduced'] += 1
+ logger.info(f"\033[1;33m[记忆减少]\033[0m {node} (记忆数量: {current_count} -> {len(memory_items)})")
+ else:
+ self.memory_graph.G.remove_node(node)
+ node_changes['removed'] += 1
+ logger.info(f"\033[1;31m[节点移除]\033[0m {node}")
+
+ if any(count > 0 for count in edge_changes.values()) or any(count > 0 for count in node_changes.values()):
self.sync_memory_to_db()
- logger.debug(f"完成遗忘操作,共遗忘 {len(forgotten_nodes)} 个节点的记忆")
+ logger.info("\n遗忘操作统计:")
+ logger.info(f"连接变化: {edge_changes['weakened']} 个减弱, {edge_changes['removed']} 个移除")
+ logger.info(f"节点变化: {node_changes['reduced']} 个减少记忆, {node_changes['removed']} 个移除")
else:
- logger.debug("本次检查没有节点满足遗忘条件")
+ logger.info("\n本次检查没有节点或连接满足遗忘条件")
async def merge_memory(self, topic):
"""
@@ -486,7 +576,7 @@ class Hippocampus:
logger.debug(f"选择的记忆:\n{merged_text}")
# 使用memory_compress生成新的压缩记忆
- compressed_memories = await self.memory_compress(selected_memories, 0.1)
+ compressed_memories, _ = await self.memory_compress(selected_memories, 0.1)
# 从原记忆列表中移除被选中的记忆
for memory in selected_memories:
diff --git a/src/plugins/memory_system/memory_test1.py b/src/plugins/memory_system/memory_test1.py
new file mode 100644
index 000000000..bbd734ec2
--- /dev/null
+++ b/src/plugins/memory_system/memory_test1.py
@@ -0,0 +1,1208 @@
+# -*- coding: utf-8 -*-
+import datetime
+import math
+import os
+import random
+import sys
+import time
+from collections import Counter
+from pathlib import Path
+
+import matplotlib.pyplot as plt
+import networkx as nx
+import pymongo
+from dotenv import load_dotenv
+from loguru import logger
+import jieba
+
+'''
+该理论认为,当两个或多个事物在形态上具有相似性时,
+它们在记忆中会形成关联。
+例如,梨和苹果在形状和都是水果这一属性上有相似性,
+所以当我们看到梨时,很容易通过形态学联想记忆联想到苹果。
+这种相似性联想有助于我们对新事物进行分类和理解,
+当遇到一个新的类似水果时,
+我们可以通过与已有的水果记忆进行相似性匹配,
+来推测它的一些特征。
+
+
+
+时空关联性联想:
+除了相似性联想,MAM 还强调时空关联性联想。
+如果两个事物在时间或空间上经常同时出现,它们也会在记忆中形成关联。
+比如,每次在公园里看到花的时候,都能听到鸟儿的叫声,
+那么花和鸟儿叫声的形态特征(花的视觉形态和鸟叫的听觉形态)就会在记忆中形成关联,
+以后听到鸟叫可能就会联想到公园里的花。
+
+'''
+
+# from chat.config import global_config
+sys.path.append("C:/GitHub/MaiMBot") # 添加项目根目录到 Python 路径
+from src.common.database import Database
+from src.plugins.memory_system.offline_llm import LLMModel
+
+# 获取当前文件的目录
+current_dir = Path(__file__).resolve().parent
+# 获取项目根目录(上三层目录)
+project_root = current_dir.parent.parent.parent
+# env.dev文件路径
+env_path = project_root / ".env.dev"
+
+# 加载环境变量
+if env_path.exists():
+ logger.info(f"从 {env_path} 加载环境变量")
+ load_dotenv(env_path)
+else:
+ logger.warning(f"未找到环境变量文件: {env_path}")
+ logger.info("将使用默认配置")
+
+class Database:
+ _instance = None
+ db = None
+
+ @classmethod
+ def get_instance(cls):
+ if cls._instance is None:
+ cls._instance = cls()
+ return cls._instance
+
+ def __init__(self):
+ if not Database.db:
+ Database.initialize(
+ host=os.getenv("MONGODB_HOST"),
+ port=int(os.getenv("MONGODB_PORT")),
+ db_name=os.getenv("DATABASE_NAME"),
+ username=os.getenv("MONGODB_USERNAME"),
+ password=os.getenv("MONGODB_PASSWORD"),
+ auth_source=os.getenv("MONGODB_AUTH_SOURCE")
+ )
+
+ @classmethod
+ def initialize(cls, host, port, db_name, username=None, password=None, auth_source="admin"):
+ try:
+ if username and password:
+ uri = f"mongodb://{username}:{password}@{host}:{port}/{db_name}?authSource={auth_source}"
+ else:
+ uri = f"mongodb://{host}:{port}"
+
+ client = pymongo.MongoClient(uri)
+ cls.db = client[db_name]
+ # 测试连接
+ client.server_info()
+ logger.success("MongoDB连接成功!")
+
+ except Exception as e:
+ logger.error(f"初始化MongoDB失败: {str(e)}")
+ raise
+
+def calculate_information_content(text):
+ """计算文本的信息量(熵)"""
+ char_count = Counter(text)
+ total_chars = len(text)
+
+ entropy = 0
+ for count in char_count.values():
+ probability = count / total_chars
+ entropy -= probability * math.log2(probability)
+
+ return entropy
+
+def get_cloest_chat_from_db(db, length: int, timestamp: str):
+ """从数据库中获取最接近指定时间戳的聊天记录,并记录读取次数
+
+ Returns:
+ list: 消息记录字典列表,每个字典包含消息内容和时间信息
+ """
+ chat_records = []
+ closest_record = db.db.messages.find_one({"time": {"$lte": timestamp}}, sort=[('time', -1)])
+
+ if closest_record and closest_record.get('memorized', 0) < 4:
+ closest_time = closest_record['time']
+ group_id = closest_record['group_id']
+ # 获取该时间戳之后的length条消息,且groupid相同
+ records = list(db.db.messages.find(
+ {"time": {"$gt": closest_time}, "group_id": group_id}
+ ).sort('time', 1).limit(length))
+
+ # 更新每条消息的memorized属性
+ for record in records:
+ current_memorized = record.get('memorized', 0)
+ if current_memorized > 3:
+ print("消息已读取3次,跳过")
+ return ''
+
+ # 更新memorized值
+ db.db.messages.update_one(
+ {"_id": record["_id"]},
+ {"$set": {"memorized": current_memorized + 1}}
+ )
+
+ # 添加到记录列表中
+ chat_records.append({
+ 'text': record["detailed_plain_text"],
+ 'time': record["time"],
+ 'group_id': record["group_id"]
+ })
+
+ return chat_records
+
+class Memory_cortex:
+ def __init__(self, memory_graph: 'Memory_graph'):
+ self.memory_graph = memory_graph
+
+ def sync_memory_from_db(self):
+ """
+ 从数据库同步数据到内存中的图结构
+ 将清空当前内存中的图,并从数据库重新加载所有节点和边
+ """
+ # 清空当前图
+ self.memory_graph.G.clear()
+
+ # 获取当前时间作为默认时间
+ default_time = datetime.datetime.now().timestamp()
+
+ # 从数据库加载所有节点
+ nodes = self.memory_graph.db.db.graph_data.nodes.find()
+ for node in nodes:
+ concept = node['concept']
+ memory_items = node.get('memory_items', [])
+ # 确保memory_items是列表
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ # 获取时间属性,如果不存在则使用默认时间
+ created_time = node.get('created_time')
+ last_modified = node.get('last_modified')
+
+ # 如果时间属性不存在,则更新数据库
+ if created_time is None or last_modified is None:
+ created_time = default_time
+ last_modified = default_time
+ # 更新数据库中的节点
+ self.memory_graph.db.db.graph_data.nodes.update_one(
+ {'concept': concept},
+ {'$set': {
+ 'created_time': created_time,
+ 'last_modified': last_modified
+ }}
+ )
+ logger.info(f"为节点 {concept} 添加默认时间属性")
+
+ # 添加节点到图中,包含时间属性
+ self.memory_graph.G.add_node(concept,
+ memory_items=memory_items,
+ created_time=created_time,
+ last_modified=last_modified)
+
+ # 从数据库加载所有边
+ edges = self.memory_graph.db.db.graph_data.edges.find()
+ for edge in edges:
+ source = edge['source']
+ target = edge['target']
+
+ # 只有当源节点和目标节点都存在时才添加边
+ if source in self.memory_graph.G and target in self.memory_graph.G:
+ # 获取时间属性,如果不存在则使用默认时间
+ created_time = edge.get('created_time')
+ last_modified = edge.get('last_modified')
+
+ # 如果时间属性不存在,则更新数据库
+ if created_time is None or last_modified is None:
+ created_time = default_time
+ last_modified = default_time
+ # 更新数据库中的边
+ self.memory_graph.db.db.graph_data.edges.update_one(
+ {'source': source, 'target': target},
+ {'$set': {
+ 'created_time': created_time,
+ 'last_modified': last_modified
+ }}
+ )
+ logger.info(f"为边 {source} - {target} 添加默认时间属性")
+
+ self.memory_graph.G.add_edge(source, target,
+ strength=edge.get('strength', 1),
+ created_time=created_time,
+ last_modified=last_modified)
+
+ logger.success("从数据库同步记忆图谱完成")
+
+ def calculate_node_hash(self, concept, memory_items):
+ """
+ 计算节点的特征值
+ """
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+ # 将记忆项排序以确保相同内容生成相同的哈希值
+ sorted_items = sorted(memory_items)
+ # 组合概念和记忆项生成特征值
+ content = f"{concept}:{'|'.join(sorted_items)}"
+ return hash(content)
+
+ def calculate_edge_hash(self, source, target):
+ """
+ 计算边的特征值
+ """
+ # 对源节点和目标节点排序以确保相同的边生成相同的哈希值
+ nodes = sorted([source, target])
+ return hash(f"{nodes[0]}:{nodes[1]}")
+
+ def sync_memory_to_db(self):
+ """
+ 检查并同步内存中的图结构与数据库
+ 使用特征值(哈希值)快速判断是否需要更新
+ """
+ current_time = datetime.datetime.now().timestamp()
+
+ # 获取数据库中所有节点和内存中所有节点
+ db_nodes = list(self.memory_graph.db.db.graph_data.nodes.find())
+ memory_nodes = list(self.memory_graph.G.nodes(data=True))
+
+ # 转换数据库节点为字典格式,方便查找
+ db_nodes_dict = {node['concept']: node for node in db_nodes}
+
+ # 检查并更新节点
+ for concept, data in memory_nodes:
+ memory_items = data.get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ # 计算内存中节点的特征值
+ memory_hash = self.calculate_node_hash(concept, memory_items)
+
+ if concept not in db_nodes_dict:
+ # 数据库中缺少的节点,添加
+ node_data = {
+ 'concept': concept,
+ 'memory_items': memory_items,
+ 'hash': memory_hash,
+ 'created_time': data.get('created_time', current_time),
+ 'last_modified': data.get('last_modified', current_time)
+ }
+ self.memory_graph.db.db.graph_data.nodes.insert_one(node_data)
+ else:
+ # 获取数据库中节点的特征值
+ db_node = db_nodes_dict[concept]
+ db_hash = db_node.get('hash', None)
+
+ # 如果特征值不同,则更新节点
+ if db_hash != memory_hash:
+ self.memory_graph.db.db.graph_data.nodes.update_one(
+ {'concept': concept},
+ {'$set': {
+ 'memory_items': memory_items,
+ 'hash': memory_hash,
+ 'last_modified': current_time
+ }}
+ )
+
+ # 检查并删除数据库中多余的节点
+ memory_concepts = set(node[0] for node in memory_nodes)
+ for db_node in db_nodes:
+ if db_node['concept'] not in memory_concepts:
+ self.memory_graph.db.db.graph_data.nodes.delete_one({'concept': db_node['concept']})
+
+ # 处理边的信息
+ db_edges = list(self.memory_graph.db.db.graph_data.edges.find())
+ memory_edges = list(self.memory_graph.G.edges(data=True))
+
+ # 创建边的哈希值字典
+ db_edge_dict = {}
+ for edge in db_edges:
+ edge_hash = self.calculate_edge_hash(edge['source'], edge['target'])
+ db_edge_dict[(edge['source'], edge['target'])] = {
+ 'hash': edge_hash,
+ 'strength': edge.get('strength', 1)
+ }
+
+ # 检查并更新边
+ for source, target, data in memory_edges:
+ edge_hash = self.calculate_edge_hash(source, target)
+ edge_key = (source, target)
+ strength = data.get('strength', 1)
+
+ if edge_key not in db_edge_dict:
+ # 添加新边
+ edge_data = {
+ 'source': source,
+ 'target': target,
+ 'strength': strength,
+ 'hash': edge_hash,
+ 'created_time': data.get('created_time', current_time),
+ 'last_modified': data.get('last_modified', current_time)
+ }
+ self.memory_graph.db.db.graph_data.edges.insert_one(edge_data)
+ else:
+ # 检查边的特征值是否变化
+ if db_edge_dict[edge_key]['hash'] != edge_hash:
+ self.memory_graph.db.db.graph_data.edges.update_one(
+ {'source': source, 'target': target},
+ {'$set': {
+ 'hash': edge_hash,
+ 'strength': strength,
+ 'last_modified': current_time
+ }}
+ )
+
+ # 删除多余的边
+ memory_edge_set = set((source, target) for source, target, _ in memory_edges)
+ for edge_key in db_edge_dict:
+ if edge_key not in memory_edge_set:
+ source, target = edge_key
+ self.memory_graph.db.db.graph_data.edges.delete_one({
+ 'source': source,
+ 'target': target
+ })
+
+ logger.success("完成记忆图谱与数据库的差异同步")
+
+ def remove_node_from_db(self, topic):
+ """
+ 从数据库中删除指定节点及其相关的边
+
+ Args:
+ topic: 要删除的节点概念
+ """
+ # 删除节点
+ self.memory_graph.db.db.graph_data.nodes.delete_one({'concept': topic})
+ # 删除所有涉及该节点的边
+ self.memory_graph.db.db.graph_data.edges.delete_many({
+ '$or': [
+ {'source': topic},
+ {'target': topic}
+ ]
+ })
+
+class Memory_graph:
+ def __init__(self):
+ self.G = nx.Graph() # 使用 networkx 的图结构
+ self.db = Database.get_instance()
+
+ def connect_dot(self, concept1, concept2):
+ # 避免自连接
+ if concept1 == concept2:
+ return
+
+ current_time = datetime.datetime.now().timestamp()
+
+ # 如果边已存在,增加 strength
+ if self.G.has_edge(concept1, concept2):
+ self.G[concept1][concept2]['strength'] = self.G[concept1][concept2].get('strength', 1) + 1
+ # 更新最后修改时间
+ self.G[concept1][concept2]['last_modified'] = current_time
+ else:
+ # 如果是新边,初始化 strength 为 1
+ self.G.add_edge(concept1, concept2,
+ strength=1,
+ created_time=current_time,
+ last_modified=current_time)
+
+ def add_dot(self, concept, memory):
+ current_time = datetime.datetime.now().timestamp()
+
+ if concept in self.G:
+ # 如果节点已存在,将新记忆添加到现有列表中
+ if 'memory_items' in self.G.nodes[concept]:
+ if not isinstance(self.G.nodes[concept]['memory_items'], list):
+ # 如果当前不是列表,将其转换为列表
+ self.G.nodes[concept]['memory_items'] = [self.G.nodes[concept]['memory_items']]
+ self.G.nodes[concept]['memory_items'].append(memory)
+ # 更新最后修改时间
+ self.G.nodes[concept]['last_modified'] = current_time
+ else:
+ self.G.nodes[concept]['memory_items'] = [memory]
+ self.G.nodes[concept]['last_modified'] = current_time
+ else:
+ # 如果是新节点,创建新的记忆列表
+ self.G.add_node(concept,
+ memory_items=[memory],
+ created_time=current_time,
+ last_modified=current_time)
+
+ def get_dot(self, concept):
+ # 检查节点是否存在于图中
+ if concept in self.G:
+ # 从图中获取节点数据
+ node_data = self.G.nodes[concept]
+ return concept, node_data
+ return None
+
+ def get_related_item(self, topic, depth=1):
+ if topic not in self.G:
+ return [], []
+
+ first_layer_items = []
+ second_layer_items = []
+
+ # 获取相邻节点
+ neighbors = list(self.G.neighbors(topic))
+
+ # 获取当前节点的记忆项
+ node_data = self.get_dot(topic)
+ if node_data:
+ concept, data = node_data
+ if 'memory_items' in data:
+ memory_items = data['memory_items']
+ if isinstance(memory_items, list):
+ first_layer_items.extend(memory_items)
+ else:
+ first_layer_items.append(memory_items)
+
+ # 只在depth=2时获取第二层记忆
+ if depth >= 2:
+ # 获取相邻节点的记忆项
+ for neighbor in neighbors:
+ node_data = self.get_dot(neighbor)
+ if node_data:
+ concept, data = node_data
+ if 'memory_items' in data:
+ memory_items = data['memory_items']
+ if isinstance(memory_items, list):
+ second_layer_items.extend(memory_items)
+ else:
+ second_layer_items.append(memory_items)
+
+ return first_layer_items, second_layer_items
+
+ @property
+ def dots(self):
+ # 返回所有节点对应的 Memory_dot 对象
+ return [self.get_dot(node) for node in self.G.nodes()]
+
+# 海马体
+class Hippocampus:
+ def __init__(self, memory_graph: Memory_graph):
+ self.memory_graph = memory_graph
+ self.memory_cortex = Memory_cortex(memory_graph)
+ self.llm_model = LLMModel()
+ self.llm_model_small = LLMModel(model_name="deepseek-ai/DeepSeek-V2.5")
+ self.llm_model_get_topic = LLMModel(model_name="Pro/Qwen/Qwen2.5-7B-Instruct")
+ self.llm_model_summary = LLMModel(model_name="Qwen/Qwen2.5-32B-Instruct")
+
+ def get_memory_sample(self, chat_size=20, time_frequency:dict={'near':2,'mid':4,'far':3}):
+ """获取记忆样本
+
+ Returns:
+ list: 消息记录列表,每个元素是一个消息记录字典列表
+ """
+ current_timestamp = datetime.datetime.now().timestamp()
+ chat_samples = []
+
+ # 短期:1h 中期:4h 长期:24h
+ for _ in range(time_frequency.get('near')):
+ random_time = current_timestamp - random.randint(1, 3600*4)
+ messages = get_cloest_chat_from_db(db=self.memory_graph.db, length=chat_size, timestamp=random_time)
+ if messages:
+ chat_samples.append(messages)
+
+ for _ in range(time_frequency.get('mid')):
+ random_time = current_timestamp - random.randint(3600*4, 3600*24)
+ messages = get_cloest_chat_from_db(db=self.memory_graph.db, length=chat_size, timestamp=random_time)
+ if messages:
+ chat_samples.append(messages)
+
+ for _ in range(time_frequency.get('far')):
+ random_time = current_timestamp - random.randint(3600*24, 3600*24*7)
+ messages = get_cloest_chat_from_db(db=self.memory_graph.db, length=chat_size, timestamp=random_time)
+ if messages:
+ chat_samples.append(messages)
+
+ return chat_samples
+
+ def calculate_topic_num(self,text, compress_rate):
+ """计算文本的话题数量"""
+ information_content = calculate_information_content(text)
+ topic_by_length = text.count('\n')*compress_rate
+ topic_by_information_content = max(1, min(5, int((information_content-3) * 2)))
+ topic_num = int((topic_by_length + topic_by_information_content)/2)
+ print(f"topic_by_length: {topic_by_length}, topic_by_information_content: {topic_by_information_content}, topic_num: {topic_num}")
+ return topic_num
+
+ async def memory_compress(self, messages: list, compress_rate=0.1):
+ """压缩消息记录为记忆
+
+ Args:
+ messages: 消息记录字典列表,每个字典包含text和time字段
+ compress_rate: 压缩率
+
+ Returns:
+ tuple: (压缩记忆集合, 相似主题字典)
+ - 压缩记忆集合: set of (话题, 记忆) 元组
+ - 相似主题字典: dict of {话题: [(相似主题, 相似度), ...]}
+ """
+ if not messages:
+ return set(), {}
+
+ # 合并消息文本,同时保留时间信息
+ input_text = ""
+ time_info = ""
+ # 计算最早和最晚时间
+ earliest_time = min(msg['time'] for msg in messages)
+ latest_time = max(msg['time'] for msg in messages)
+
+ earliest_dt = datetime.datetime.fromtimestamp(earliest_time)
+ latest_dt = datetime.datetime.fromtimestamp(latest_time)
+
+ # 如果是同一年
+ if earliest_dt.year == latest_dt.year:
+ earliest_str = earliest_dt.strftime("%m-%d %H:%M:%S")
+ latest_str = latest_dt.strftime("%m-%d %H:%M:%S")
+ time_info += f"是在{earliest_dt.year}年,{earliest_str} 到 {latest_str} 的对话:\n"
+ else:
+ earliest_str = earliest_dt.strftime("%Y-%m-%d %H:%M:%S")
+ latest_str = latest_dt.strftime("%Y-%m-%d %H:%M:%S")
+ time_info += f"是从 {earliest_str} 到 {latest_str} 的对话:\n"
+
+ for msg in messages:
+ input_text += f"{msg['text']}\n"
+
+ print(input_text)
+
+ topic_num = self.calculate_topic_num(input_text, compress_rate)
+ topics_response = self.llm_model_get_topic.generate_response(self.find_topic_llm(input_text, topic_num))
+
+ # 过滤topics
+ filter_keywords = ['表情包', '图片', '回复', '聊天记录']
+ topics = [topic.strip() for topic in topics_response[0].replace(",", ",").replace("、", ",").replace(" ", ",").split(",") if topic.strip()]
+ filtered_topics = [topic for topic in topics if not any(keyword in topic for keyword in filter_keywords)]
+
+ print(f"过滤后话题: {filtered_topics}")
+
+ # 为每个话题查找相似的已存在主题
+ print("\n检查相似主题:")
+ similar_topics_dict = {} # 存储每个话题的相似主题列表
+
+ for topic in filtered_topics:
+ # 获取所有现有节点
+ existing_topics = list(self.memory_graph.G.nodes())
+ similar_topics = []
+
+ # 对每个现有节点计算相似度
+ for existing_topic in existing_topics:
+ # 使用jieba分词并计算余弦相似度
+ topic_words = set(jieba.cut(topic))
+ existing_words = set(jieba.cut(existing_topic))
+
+ # 计算词向量
+ all_words = topic_words | existing_words
+ v1 = [1 if word in topic_words else 0 for word in all_words]
+ v2 = [1 if word in existing_words else 0 for word in all_words]
+
+ # 计算余弦相似度
+ similarity = cosine_similarity(v1, v2)
+
+ # 如果相似度超过阈值,添加到结果中
+ if similarity >= 0.6: # 设置相似度阈值
+ similar_topics.append((existing_topic, similarity))
+
+ # 按相似度降序排序
+ similar_topics.sort(key=lambda x: x[1], reverse=True)
+ # 只保留前5个最相似的主题
+ similar_topics = similar_topics[:5]
+
+ # 存储到字典中
+ similar_topics_dict[topic] = similar_topics
+
+ # 输出结果
+ if similar_topics:
+ print(f"\n主题「{topic}」的相似主题:")
+ for similar_topic, score in similar_topics:
+ print(f"- {similar_topic} (相似度: {score:.3f})")
+ else:
+ print(f"\n主题「{topic}」没有找到相似主题")
+
+ # 创建所有话题的请求任务
+ tasks = []
+ for topic in filtered_topics:
+ topic_what_prompt = self.topic_what(input_text, topic , time_info)
+ # 创建异步任务
+ task = self.llm_model_small.generate_response_async(topic_what_prompt)
+ tasks.append((topic.strip(), task))
+
+ # 等待所有任务完成
+ compressed_memory = set()
+ for topic, task in tasks:
+ response = await task
+ if response:
+ compressed_memory.add((topic, response[0]))
+
+ return compressed_memory, similar_topics_dict
+
+ async def operation_build_memory(self, chat_size=12):
+ # 最近消息获取频率
+ time_frequency = {'near': 3, 'mid': 8, 'far': 5}
+ memory_samples = self.get_memory_sample(chat_size, time_frequency)
+
+ all_topics = [] # 用于存储所有话题
+
+ for i, messages in enumerate(memory_samples, 1):
+ # 加载进度可视化
+ all_topics = []
+ progress = (i / len(memory_samples)) * 100
+ bar_length = 30
+ filled_length = int(bar_length * i // len(memory_samples))
+ bar = '█' * filled_length + '-' * (bar_length - filled_length)
+ print(f"\n进度: [{bar}] {progress:.1f}% ({i}/{len(memory_samples)})")
+
+ # 生成压缩后记忆
+ compress_rate = 0.1
+ compressed_memory, similar_topics_dict = await self.memory_compress(messages, compress_rate)
+ print(f"\033[1;33m压缩后记忆数量\033[0m: {len(compressed_memory)},似曾相识的话题: {len(similar_topics_dict)}")
+
+ # 将记忆加入到图谱中
+ for topic, memory in compressed_memory:
+ print(f"\033[1;32m添加节点\033[0m: {topic}")
+ self.memory_graph.add_dot(topic, memory)
+ all_topics.append(topic)
+
+ # 连接相似的已存在主题
+ if topic in similar_topics_dict:
+ similar_topics = similar_topics_dict[topic]
+ for similar_topic, similarity in similar_topics:
+ # 避免自连接
+ if topic != similar_topic:
+ # 根据相似度设置连接强度
+ strength = int(similarity * 10) # 将0.3-1.0的相似度映射到3-10的强度
+ print(f"\033[1;36m连接相似节点\033[0m: {topic} 和 {similar_topic} (强度: {strength})")
+ # 使用相似度作为初始连接强度
+ self.memory_graph.G.add_edge(topic, similar_topic, strength=strength)
+
+ # 连接同批次的相关话题
+ for i in range(len(all_topics)):
+ for j in range(i + 1, len(all_topics)):
+ print(f"\033[1;32m连接同批次节点\033[0m: {all_topics[i]} 和 {all_topics[j]}")
+ self.memory_graph.connect_dot(all_topics[i], all_topics[j])
+
+ self.memory_cortex.sync_memory_to_db()
+
+ def forget_connection(self, source, target):
+ """
+ 检查并可能遗忘一个连接
+
+ Args:
+ source: 连接的源节点
+ target: 连接的目标节点
+
+ Returns:
+ tuple: (是否有变化, 变化类型, 变化详情)
+ 变化类型: 0-无变化, 1-强度减少, 2-连接移除
+ """
+ current_time = datetime.datetime.now().timestamp()
+ # 获取边的属性
+ edge_data = self.memory_graph.G[source][target]
+ last_modified = edge_data.get('last_modified', current_time)
+
+ # 如果连接超过7天未更新
+ if current_time - last_modified > 6000: # test
+ # 获取当前强度
+ current_strength = edge_data.get('strength', 1)
+ # 减少连接强度
+ new_strength = current_strength - 1
+ edge_data['strength'] = new_strength
+ edge_data['last_modified'] = current_time
+
+ # 如果强度降为0,移除连接
+ if new_strength <= 0:
+ self.memory_graph.G.remove_edge(source, target)
+ return True, 2, f"移除连接: {source} - {target} (强度降至0)"
+ else:
+ return True, 1, f"减弱连接: {source} - {target} (强度: {current_strength} -> {new_strength})"
+
+ return False, 0, ""
+
+ def forget_topic(self, topic):
+ """
+ 检查并可能遗忘一个话题的记忆
+
+ Args:
+ topic: 要检查的话题
+
+ Returns:
+ tuple: (是否有变化, 变化类型, 变化详情)
+ 变化类型: 0-无变化, 1-记忆减少, 2-节点移除
+ """
+ current_time = datetime.datetime.now().timestamp()
+ # 获取节点的最后修改时间
+ node_data = self.memory_graph.G.nodes[topic]
+ last_modified = node_data.get('last_modified', current_time)
+
+ # 如果话题超过7天未更新
+ if current_time - last_modified > 3000: # test
+ memory_items = node_data.get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ if memory_items:
+ # 获取当前记忆数量
+ current_count = len(memory_items)
+ # 随机选择一条记忆删除
+ removed_item = random.choice(memory_items)
+ memory_items.remove(removed_item)
+
+ if memory_items:
+ # 更新节点的记忆项和最后修改时间
+ self.memory_graph.G.nodes[topic]['memory_items'] = memory_items
+ self.memory_graph.G.nodes[topic]['last_modified'] = current_time
+ return True, 1, f"减少记忆: {topic} (记忆数量: {current_count} -> {len(memory_items)})\n被移除的记忆: {removed_item}"
+ else:
+ # 如果没有记忆了,删除节点及其所有连接
+ self.memory_graph.G.remove_node(topic)
+ return True, 2, f"移除节点: {topic} (无剩余记忆)\n最后一条记忆: {removed_item}"
+
+ return False, 0, ""
+
+ async def operation_forget_topic(self, percentage=0.1):
+ """
+ 随机选择图中一定比例的节点和边进行检查,根据时间条件决定是否遗忘
+
+ Args:
+ percentage: 要检查的节点和边的比例,默认为0.1(10%)
+ """
+ # 获取所有节点和边
+ all_nodes = list(self.memory_graph.G.nodes())
+ all_edges = list(self.memory_graph.G.edges())
+
+ # 计算要检查的数量
+ check_nodes_count = max(1, int(len(all_nodes) * percentage))
+ check_edges_count = max(1, int(len(all_edges) * percentage))
+
+ # 随机选择要检查的节点和边
+ nodes_to_check = random.sample(all_nodes, check_nodes_count)
+ edges_to_check = random.sample(all_edges, check_edges_count)
+
+ # 用于统计不同类型的变化
+ edge_changes = {'weakened': 0, 'removed': 0}
+ node_changes = {'reduced': 0, 'removed': 0}
+
+ # 检查并遗忘连接
+ print("\n开始检查连接...")
+ for source, target in edges_to_check:
+ changed, change_type, details = self.forget_connection(source, target)
+ if changed:
+ if change_type == 1:
+ edge_changes['weakened'] += 1
+ logger.info(f"\033[1;34m[连接减弱]\033[0m {details}")
+ elif change_type == 2:
+ edge_changes['removed'] += 1
+ logger.info(f"\033[1;31m[连接移除]\033[0m {details}")
+
+ # 检查并遗忘话题
+ print("\n开始检查节点...")
+ for node in nodes_to_check:
+ changed, change_type, details = self.forget_topic(node)
+ if changed:
+ if change_type == 1:
+ node_changes['reduced'] += 1
+ logger.info(f"\033[1;33m[记忆减少]\033[0m {details}")
+ elif change_type == 2:
+ node_changes['removed'] += 1
+ logger.info(f"\033[1;31m[节点移除]\033[0m {details}")
+
+ # 同步到数据库
+ if any(count > 0 for count in edge_changes.values()) or any(count > 0 for count in node_changes.values()):
+ self.memory_cortex.sync_memory_to_db()
+ print("\n遗忘操作统计:")
+ print(f"连接变化: {edge_changes['weakened']} 个减弱, {edge_changes['removed']} 个移除")
+ print(f"节点变化: {node_changes['reduced']} 个减少记忆, {node_changes['removed']} 个移除")
+ else:
+ print("\n本次检查没有节点或连接满足遗忘条件")
+
+ async def merge_memory(self, topic):
+ """
+ 对指定话题的记忆进行合并压缩
+
+ Args:
+ topic: 要合并的话题节点
+ """
+ # 获取节点的记忆项
+ memory_items = self.memory_graph.G.nodes[topic].get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+
+ # 如果记忆项不足,直接返回
+ if len(memory_items) < 10:
+ return
+
+ # 随机选择10条记忆
+ selected_memories = random.sample(memory_items, 10)
+
+ # 拼接成文本
+ merged_text = "\n".join(selected_memories)
+ print(f"\n[合并记忆] 话题: {topic}")
+ print(f"选择的记忆:\n{merged_text}")
+
+ # 使用memory_compress生成新的压缩记忆
+ compressed_memories, _ = await self.memory_compress(selected_memories, 0.1)
+
+ # 从原记忆列表中移除被选中的记忆
+ for memory in selected_memories:
+ memory_items.remove(memory)
+
+ # 添加新的压缩记忆
+ for _, compressed_memory in compressed_memories:
+ memory_items.append(compressed_memory)
+ print(f"添加压缩记忆: {compressed_memory}")
+
+ # 更新节点的记忆项
+ self.memory_graph.G.nodes[topic]['memory_items'] = memory_items
+ print(f"完成记忆合并,当前记忆数量: {len(memory_items)}")
+
+ async def operation_merge_memory(self, percentage=0.1):
+ """
+ 随机检查一定比例的节点,对内容数量超过100的节点进行记忆合并
+
+ Args:
+ percentage: 要检查的节点比例,默认为0.1(10%)
+ """
+ # 获取所有节点
+ all_nodes = list(self.memory_graph.G.nodes())
+ # 计算要检查的节点数量
+ check_count = max(1, int(len(all_nodes) * percentage))
+ # 随机选择节点
+ nodes_to_check = random.sample(all_nodes, check_count)
+
+ merged_nodes = []
+ for node in nodes_to_check:
+ # 获取节点的内容条数
+ memory_items = self.memory_graph.G.nodes[node].get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+ content_count = len(memory_items)
+
+ # 如果内容数量超过100,进行合并
+ if content_count > 100:
+ print(f"\n检查节点: {node}, 当前记忆数量: {content_count}")
+ await self.merge_memory(node)
+ merged_nodes.append(node)
+
+ # 同步到数据库
+ if merged_nodes:
+ self.memory_cortex.sync_memory_to_db()
+ print(f"\n完成记忆合并操作,共处理 {len(merged_nodes)} 个节点")
+ else:
+ print("\n本次检查没有需要合并的节点")
+
+ async def _identify_topics(self, text: str) -> list:
+ """从文本中识别可能的主题"""
+ topics_response = self.llm_model_get_topic.generate_response(self.find_topic_llm(text, 5))
+ topics = [topic.strip() for topic in topics_response[0].replace(",", ",").replace("、", ",").replace(" ", ",").split(",") if topic.strip()]
+ return topics
+
+ def _find_similar_topics(self, topics: list, similarity_threshold: float = 0.4, debug_info: str = "") -> list:
+ """查找与给定主题相似的记忆主题"""
+ all_memory_topics = list(self.memory_graph.G.nodes())
+ all_similar_topics = []
+
+ for topic in topics:
+ if debug_info:
+ pass
+
+ topic_vector = text_to_vector(topic)
+ has_similar_topic = False
+
+ for memory_topic in all_memory_topics:
+ memory_vector = text_to_vector(memory_topic)
+ all_words = set(topic_vector.keys()) | set(memory_vector.keys())
+ v1 = [topic_vector.get(word, 0) for word in all_words]
+ v2 = [memory_vector.get(word, 0) for word in all_words]
+ similarity = cosine_similarity(v1, v2)
+
+ if similarity >= similarity_threshold:
+ has_similar_topic = True
+ all_similar_topics.append((memory_topic, similarity))
+
+ return all_similar_topics
+
+ def _get_top_topics(self, similar_topics: list, max_topics: int = 5) -> list:
+ """获取相似度最高的主题"""
+ seen_topics = set()
+ top_topics = []
+
+ for topic, score in sorted(similar_topics, key=lambda x: x[1], reverse=True):
+ if topic not in seen_topics and len(top_topics) < max_topics:
+ seen_topics.add(topic)
+ top_topics.append((topic, score))
+
+ return top_topics
+
+ async def memory_activate_value(self, text: str, max_topics: int = 5, similarity_threshold: float = 0.3) -> int:
+ """计算输入文本对记忆的激活程度"""
+ logger.info(f"[记忆激活]识别主题: {await self._identify_topics(text)}")
+
+ identified_topics = await self._identify_topics(text)
+ if not identified_topics:
+ return 0
+
+ all_similar_topics = self._find_similar_topics(
+ identified_topics,
+ similarity_threshold=similarity_threshold,
+ debug_info="记忆激活"
+ )
+
+ if not all_similar_topics:
+ return 0
+
+ top_topics = self._get_top_topics(all_similar_topics, max_topics)
+
+ if len(top_topics) == 1:
+ topic, score = top_topics[0]
+ memory_items = self.memory_graph.G.nodes[topic].get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+ content_count = len(memory_items)
+ penalty = 1.0 / (1 + math.log(content_count + 1))
+
+ activation = int(score * 50 * penalty)
+ print(f"\033[1;32m[记忆激活]\033[0m 单主题「{topic}」- 相似度: {score:.3f}, 内容数: {content_count}, 激活值: {activation}")
+ return activation
+
+ matched_topics = set()
+ topic_similarities = {}
+
+ for memory_topic, similarity in top_topics:
+ memory_items = self.memory_graph.G.nodes[memory_topic].get('memory_items', [])
+ if not isinstance(memory_items, list):
+ memory_items = [memory_items] if memory_items else []
+ content_count = len(memory_items)
+ penalty = 1.0 / (1 + math.log(content_count + 1))
+
+ for input_topic in identified_topics:
+ topic_vector = text_to_vector(input_topic)
+ memory_vector = text_to_vector(memory_topic)
+ all_words = set(topic_vector.keys()) | set(memory_vector.keys())
+ v1 = [topic_vector.get(word, 0) for word in all_words]
+ v2 = [memory_vector.get(word, 0) for word in all_words]
+ sim = cosine_similarity(v1, v2)
+ if sim >= similarity_threshold:
+ matched_topics.add(input_topic)
+ adjusted_sim = sim * penalty
+ topic_similarities[input_topic] = max(topic_similarities.get(input_topic, 0), adjusted_sim)
+ print(f"\033[1;32m[记忆激活]\033[0m 主题「{input_topic}」-> 「{memory_topic}」(内容数: {content_count}, 相似度: {adjusted_sim:.3f})")
+
+ topic_match = len(matched_topics) / len(identified_topics)
+ average_similarities = sum(topic_similarities.values()) / len(topic_similarities) if topic_similarities else 0
+
+ activation = int((topic_match + average_similarities) / 2 * 100)
+ print(f"\033[1;32m[记忆激活]\033[0m 匹配率: {topic_match:.3f}, 平均相似度: {average_similarities:.3f}, 激活值: {activation}")
+
+ return activation
+
+ async def get_relevant_memories(self, text: str, max_topics: int = 5, similarity_threshold: float = 0.4, max_memory_num: int = 5) -> list:
+ """根据输入文本获取相关的记忆内容"""
+ identified_topics = await self._identify_topics(text)
+
+ all_similar_topics = self._find_similar_topics(
+ identified_topics,
+ similarity_threshold=similarity_threshold,
+ debug_info="记忆检索"
+ )
+
+ relevant_topics = self._get_top_topics(all_similar_topics, max_topics)
+
+ relevant_memories = []
+ for topic, score in relevant_topics:
+ first_layer, _ = self.memory_graph.get_related_item(topic, depth=1)
+ if first_layer:
+ if len(first_layer) > max_memory_num/2:
+ first_layer = random.sample(first_layer, max_memory_num//2)
+ for memory in first_layer:
+ relevant_memories.append({
+ 'topic': topic,
+ 'similarity': score,
+ 'content': memory
+ })
+
+ relevant_memories.sort(key=lambda x: x['similarity'], reverse=True)
+
+ if len(relevant_memories) > max_memory_num:
+ relevant_memories = random.sample(relevant_memories, max_memory_num)
+
+ return relevant_memories
+
+ def find_topic_llm(self,text, topic_num):
+ prompt = f'这是一段文字:{text}。请你从这段话中总结出{topic_num}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,用逗号,隔开,尽可能精简。只需要列举{topic_num}个话题就好,不要有序号,不要告诉我其他内容。'
+ return prompt
+
+ def topic_what(self,text, topic, time_info):
+ prompt = f'这是一段文字,{time_info}:{text}。我想让你基于这段文字来概括"{topic}"这个概念,帮我总结成一句自然的话,可以包含时间和人物,以及具体的观点。只输出这句话就好'
+ return prompt
+
+def segment_text(text):
+ """使用jieba进行文本分词"""
+ seg_text = list(jieba.cut(text))
+ return seg_text
+
+def text_to_vector(text):
+ """将文本转换为词频向量"""
+ words = segment_text(text)
+ vector = {}
+ for word in words:
+ vector[word] = vector.get(word, 0) + 1
+ return vector
+
+def cosine_similarity(v1, v2):
+ """计算两个向量的余弦相似度"""
+ dot_product = sum(a * b for a, b in zip(v1, v2))
+ norm1 = math.sqrt(sum(a * a for a in v1))
+ norm2 = math.sqrt(sum(b * b for b in v2))
+ if norm1 == 0 or norm2 == 0:
+ return 0
+ return dot_product / (norm1 * norm2)
+
+def visualize_graph_lite(memory_graph: Memory_graph, color_by_memory: bool = False):
+ # 设置中文字体
+ plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
+ plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
+
+ G = memory_graph.G
+
+ # 创建一个新图用于可视化
+ H = G.copy()
+
+ # 过滤掉内容数量小于2的节点
+ nodes_to_remove = []
+ for node in H.nodes():
+ memory_items = H.nodes[node].get('memory_items', [])
+ memory_count = len(memory_items) if isinstance(memory_items, list) else (1 if memory_items else 0)
+ if memory_count < 2:
+ nodes_to_remove.append(node)
+
+ H.remove_nodes_from(nodes_to_remove)
+
+ # 如果没有符合条件的节点,直接返回
+ if len(H.nodes()) == 0:
+ print("没有找到内容数量大于等于2的节点")
+ return
+
+ # 计算节点大小和颜色
+ node_colors = []
+ node_sizes = []
+ nodes = list(H.nodes())
+
+ # 获取最大记忆数用于归一化节点大小
+ max_memories = 1
+ for node in nodes:
+ memory_items = H.nodes[node].get('memory_items', [])
+ memory_count = len(memory_items) if isinstance(memory_items, list) else (1 if memory_items else 0)
+ max_memories = max(max_memories, memory_count)
+
+ # 计算每个节点的大小和颜色
+ for node in nodes:
+ # 计算节点大小(基于记忆数量)
+ memory_items = H.nodes[node].get('memory_items', [])
+ memory_count = len(memory_items) if isinstance(memory_items, list) else (1 if memory_items else 0)
+ # 使用指数函数使变化更明显
+ ratio = memory_count / max_memories
+ size = 400 + 2000 * (ratio ** 2) # 增大节点大小
+ node_sizes.append(size)
+
+ # 计算节点颜色(基于连接数)
+ degree = H.degree(node)
+ if degree >= 30:
+ node_colors.append((1.0, 0, 0)) # 亮红色 (#FF0000)
+ else:
+ # 将1-10映射到0-1的范围
+ color_ratio = (degree - 1) / 29.0 if degree > 1 else 0
+ # 使用蓝到红的渐变
+ red = min(0.9, color_ratio)
+ blue = max(0.0, 1.0 - color_ratio)
+ node_colors.append((red, 0, blue))
+
+ # 绘制图形
+ plt.figure(figsize=(16, 12)) # 减小图形尺寸
+ pos = nx.spring_layout(H,
+ k=1, # 调整节点间斥力
+ iterations=100, # 增加迭代次数
+ scale=1.5, # 减小布局尺寸
+ weight='strength') # 使用边的strength属性作为权重
+
+ nx.draw(H, pos,
+ with_labels=True,
+ node_color=node_colors,
+ node_size=node_sizes,
+ font_size=12, # 保持增大的字体大小
+ font_family='SimHei',
+ font_weight='bold',
+ edge_color='gray',
+ width=1.5) # 统一的边宽度
+
+ title = '记忆图谱可视化(仅显示内容≥2的节点)\n节点大小表示记忆数量\n节点颜色:蓝(弱连接)到红(强连接)渐变,边的透明度表示连接强度\n连接强度越大的节点距离越近'
+ plt.title(title, fontsize=16, fontfamily='SimHei')
+ plt.show()
+
+async def main():
+ # 初始化数据库
+ logger.info("正在初始化数据库连接...")
+ db = Database.get_instance()
+ start_time = time.time()
+
+ test_pare = {'do_build_memory':True,'do_forget_topic':False,'do_visualize_graph':True,'do_query':False,'do_merge_memory':False}
+
+ # 创建记忆图
+ memory_graph = Memory_graph()
+
+ # 创建海马体
+ hippocampus = Hippocampus(memory_graph)
+
+ # 从数据库同步数据
+ hippocampus.memory_cortex.sync_memory_from_db()
+
+ end_time = time.time()
+ logger.info(f"\033[32m[加载海马体耗时: {end_time - start_time:.2f} 秒]\033[0m")
+
+ # 构建记忆
+ if test_pare['do_build_memory']:
+ logger.info("开始构建记忆...")
+ chat_size = 20
+ await hippocampus.operation_build_memory(chat_size=chat_size)
+
+ end_time = time.time()
+ logger.info(f"\033[32m[构建记忆耗时: {end_time - start_time:.2f} 秒,chat_size={chat_size},chat_count = 16]\033[0m")
+
+ if test_pare['do_forget_topic']:
+ logger.info("开始遗忘记忆...")
+ await hippocampus.operation_forget_topic(percentage=0.01)
+
+ end_time = time.time()
+ logger.info(f"\033[32m[遗忘记忆耗时: {end_time - start_time:.2f} 秒]\033[0m")
+
+ if test_pare['do_merge_memory']:
+ logger.info("开始合并记忆...")
+ await hippocampus.operation_merge_memory(percentage=0.1)
+
+ end_time = time.time()
+ logger.info(f"\033[32m[合并记忆耗时: {end_time - start_time:.2f} 秒]\033[0m")
+
+ if test_pare['do_visualize_graph']:
+ # 展示优化后的图形
+ logger.info("生成记忆图谱可视化...")
+ print("\n生成优化后的记忆图谱:")
+ visualize_graph_lite(memory_graph)
+
+ if test_pare['do_query']:
+ # 交互式查询
+ while True:
+ query = input("\n请输入新的查询概念(输入'退出'以结束):")
+ if query.lower() == '退出':
+ break
+
+ items_list = memory_graph.get_related_item(query)
+ if items_list:
+ first_layer, second_layer = items_list
+ if first_layer:
+ print("\n直接相关的记忆:")
+ for item in first_layer:
+ print(f"- {item}")
+ if second_layer:
+ print("\n间接相关的记忆:")
+ for item in second_layer:
+ print(f"- {item}")
+ else:
+ print("未找到相关记忆。")
+
+
+if __name__ == "__main__":
+ import asyncio
+ asyncio.run(main())
+
+
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index fe8e1a100..ac567600a 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -44,8 +44,8 @@ class LLM_request:
self.db.db.llm_usage.create_index([("model_name", 1)])
self.db.db.llm_usage.create_index([("user_id", 1)])
self.db.db.llm_usage.create_index([("request_type", 1)])
- except Exception as e:
- logger.error(f"创建数据库索引失败")
+ except Exception:
+ logger.error("创建数据库索引失败")
def _record_usage(self, prompt_tokens: int, completion_tokens: int, total_tokens: int,
user_id: str = "system", request_type: str = "chat",
@@ -80,7 +80,7 @@ class LLM_request:
f"总计: {total_tokens}"
)
except Exception:
- logger.error(f"记录token使用情况失败")
+ logger.error("记录token使用情况失败")
def _calculate_cost(self, prompt_tokens: int, completion_tokens: int) -> float:
"""计算API调用成本
@@ -194,7 +194,7 @@ class LLM_request:
if hasattr(global_config, 'llm_normal') and global_config.llm_normal.get(
'name') == old_model_name:
global_config.llm_normal['name'] = self.model_name
- logger.warning(f"已将全局配置中的 llm_normal 模型降级")
+ logger.warning("已将全局配置中的 llm_normal 模型降级")
# 更新payload中的模型名
if payload and 'model' in payload:
@@ -227,7 +227,7 @@ class LLM_request:
delta_content = ""
accumulated_content += delta_content
except Exception:
- logger.exception(f"解析流式输出错")
+ logger.exception("解析流式输出错")
content = accumulated_content
reasoning_content = ""
think_match = re.search(r'(.*?)', content, re.DOTALL)
@@ -355,7 +355,7 @@ class LLM_request:
"""构建请求头"""
if no_key:
return {
- "Authorization": f"Bearer **********",
+ "Authorization": "Bearer **********",
"Content-Type": "application/json"
}
else:
diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py
index fc07a152d..e280c6bce 100644
--- a/src/plugins/schedule/schedule_generator.py
+++ b/src/plugins/schedule/schedule_generator.py
@@ -68,7 +68,7 @@ class ScheduleGenerator:
1. 早上的学习和工作安排
2. 下午的活动和任务
3. 晚上的计划和休息时间
- 请按照时间顺序列出具体时间点和对应的活动,用一个时间点而不是时间段来表示时间,用JSON格式返回日程表,仅返回内容,不要返回注释,时间采用24小时制,格式为{"时间": "活动","时间": "活动",...}。"""
+ 请按照时间顺序列出具体时间点和对应的活动,用一个时间点而不是时间段来表示时间,用JSON格式返回日程表,仅返回内容,不要返回注释,不要添加任何markdown或代码块样式,时间采用24小时制,格式为{"时间": "活动","时间": "活动",...}。"""
try:
schedule_text, _ = await self.llm_scheduler.generate_response(prompt)
@@ -91,7 +91,7 @@ class ScheduleGenerator:
try:
schedule_dict = json.loads(schedule_text)
return schedule_dict
- except json.JSONDecodeError as e:
+ except json.JSONDecodeError:
logger.exception("解析日程失败: {}".format(schedule_text))
return False
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index a071355a3..2974389e6 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -155,7 +155,7 @@ class LLMStatistics:
all_stats = self._collect_all_statistics()
self._save_statistics(all_stats)
except Exception:
- logger.exception(f"统计数据处理失败")
+ logger.exception("统计数据处理失败")
# 等待1分钟
for _ in range(60):
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index ff39c9a69..1b33b42cf 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -15,6 +15,7 @@ version = "0.0.5"
[bot]
qq = 123
nickname = "麦麦"
+alias_names = ["小麦", "阿麦"]
[personality]
prompt_personality = [