diff --git a/![新版麦麦开始学习.bat b/![新版麦麦开始学习.bat
new file mode 100644
index 000000000..41fc98368
--- /dev/null
+++ b/![新版麦麦开始学习.bat
@@ -0,0 +1,46 @@
+@echo off
+CHCP 65001 > nul
+setlocal enabledelayedexpansion
+
+REM 查找venv虚拟环境
+set "venv_path=%~dp0venv\Scripts\activate.bat"
+if not exist "%venv_path%" (
+ echo 错误: 未找到虚拟环境,请确保venv目录存在
+ pause
+ exit /b 1
+)
+
+REM 激活虚拟环境
+call "%venv_path%"
+if %ERRORLEVEL% neq 0 (
+ echo 错误: 虚拟环境激活失败
+ pause
+ exit /b 1
+)
+
+REM 运行预处理脚本
+python "%~dp0scripts\raw_data_preprocessor.py"
+if %ERRORLEVEL% neq 0 (
+ echo 错误: raw_data_preprocessor.py 执行失败
+ pause
+ exit /b 1
+)
+
+REM 运行信息提取脚本
+python "%~dp0scripts\info_extraction.py"
+if %ERRORLEVEL% neq 0 (
+ echo 错误: info_extraction.py 执行失败
+ pause
+ exit /b 1
+)
+
+REM 运行OpenIE导入脚本
+python "%~dp0scripts\import_openie.py"
+if %ERRORLEVEL% neq 0 (
+ echo 错误: import_openie.py 执行失败
+ pause
+ exit /b 1
+)
+
+echo 所有处理步骤完成!
+pause
\ No newline at end of file
diff --git a/.dockerignore b/.dockerignore
index 6c2d07736..fac1bf99a 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,6 +1,5 @@
.git
__pycache__
-*.pyc
*.pyo
*.pyd
.DS_Store
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index 76636d746..605d838ce 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -24,6 +24,9 @@ jobs:
- name: Clone maim_message
run: git clone https://github.com/MaiM-with-u/maim_message maim_message
+ - name: Clone lpmm
+ run: git clone https://github.com/MaiM-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
+
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
diff --git a/.github/workflows/ruff-pr.yml b/.github/workflows/ruff-pr.yml
new file mode 100644
index 000000000..bb83de8c9
--- /dev/null
+++ b/.github/workflows/ruff-pr.yml
@@ -0,0 +1,9 @@
+name: Ruff
+on: [ pull_request ]
+jobs:
+ ruff:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: astral-sh/ruff-action@v3
+
diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml
index b3056fa6a..58921a76f 100644
--- a/.github/workflows/ruff.yml
+++ b/.github/workflows/ruff.yml
@@ -1,5 +1,5 @@
name: Ruff
-on: [ push, pull_request ]
+on: [ push ]
permissions:
contents: write
diff --git a/.gitignore b/.gitignore
index 3e9b98685..88995eccc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,7 +4,9 @@ mongodb/
NapCat.Framework.Windows.Once/
log/
logs/
+tool_call_benchmark.py
run_ad.bat
+llm_tool_benchmark_results.json
MaiBot-Napcat-Adapter-main
MaiBot-Napcat-Adapter
/test
@@ -28,8 +30,9 @@ memory_graph.gml
config/bot_config_dev.toml
config/bot_config.toml
config/bot_config.toml.bak
+config/lpmm_config.toml
+config/lpmm_config.toml.bak
src/plugins/remote/client_uuid.json
-run_none.bat
(测试版)麦麦生成人格.bat
(临时版)麦麦开始学习.bat
src/plugins/utils/statistic.py
@@ -240,6 +243,57 @@ logs
.vscode
/config/*
-run_none.bat
config/old/bot_config_20250405_212257.toml
+temp/
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
diff --git a/Dockerfile b/Dockerfile
index 838e2b993..23165a23e 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,12 +6,22 @@ WORKDIR /MaiMBot
# 复制依赖列表
COPY requirements.txt .
-# 同级目录下需要有 maim_message
-COPY maim_message /maim_message
+# 同级目录下需要有 maim_message MaiMBot-LPMM
+#COPY maim_message /maim_message
+COPY MaiMBot-LPMM /MaiMBot-LPMM
+
+# 编译器
+RUN apt-get update && apt-get install -y build-essential
+
+# lpmm编译安装
+RUN cd /MaiMBot-LPMM && uv pip install --system -r requirements.txt
+RUN uv pip install --system Cython py-cpuinfo setuptools
+RUN cd /MaiMBot-LPMM/lib/quick_algo && python build_lib.py --cleanup --cythonize --install
+
# 安装依赖
RUN uv pip install --system --upgrade pip
-RUN uv pip install --system -e /maim_message
+#RUN uv pip install --system -e /maim_message
RUN uv pip install --system -r requirements.txt
# 复制项目代码
diff --git a/README.md b/README.md
index 325e3ad22..58cb82c7d 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@
-
+
👆 点击观看麦麦演示视频 👆
@@ -62,7 +61,7 @@
### 📢 版本信息
-**最新版本: v0.6.2** ([查看更新日志](changelogs/changelog.md))
+**最新版本: v0.6.3** ([查看更新日志](changelogs/changelog.md))
> [!WARNING]
> 请阅读教程后更新!!!!!!!
> 请阅读教程后更新!!!!!!!
@@ -91,15 +90,15 @@
- 项目处于活跃开发阶段,功能和API可能随时调整
### 💬交流群(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
+- [一群](https://qm.qq.com/q/VQ3XZrWgMs) 766798517
+- [二群](https://qm.qq.com/q/RzmCiRtHEW) 571780722
- [五群](https://qm.qq.com/q/JxvHZnxyec) 1022489779
-- [一群](https://qm.qq.com/q/VQ3XZrWgMs) 766798517 【已满】
-- [二群](https://qm.qq.com/q/RzmCiRtHEW) 571780722【已满】
- [三群](https://qm.qq.com/q/wlH5eT8OmQ) 1035228475【已满】
- [四群](https://qm.qq.com/q/wlH5eT8OmQ) 729957033【已满】
-
📚 文档
+📚 文档
### (部分内容可能过时,请注意版本对应)
@@ -111,19 +110,20 @@
- [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html) - 基于MaiCore的新版本部署方式(与旧版本不兼容)
-## 🎯 功能介绍
+## 🎯 0.6.3 功能介绍
-| 模块 | 主要功能 | 特点 |
-|------|---------|------|
-| 💬 聊天系统 | • 心流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
-| 🧠 心流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动
• 工具调用能力 | 智能化决策 |
-| 🧠 记忆系统 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
-| 😊 表情系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
-| 📅 日程系统 | • 动态日程生成
• 自定义想象力
• 思维流联动 | 智能规划 |
-| 👥 关系系统 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
-| 📊 统计系统 | • 使用数据统计
• LLM调用记录
• 实时控制台显示 | 数据可视 |
-| 🔧 系统功能 | • 优雅关闭机制
• 自动数据保存
• 异常处理完善 | 稳定可靠 |
-| 🛠️ 工具系统 | • 知识获取工具
• 自动注册机制
• 多工具支持 | 扩展功能 |
+| 模块 | 主要功能 | 特点 |
+|----------|------------------------------------------------------------------|-------|
+| 💬 聊天系统 | • **统一调控不同回复逻辑**
• 智能交互模式 (普通聊天/专注聊天)
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC)增强 | 拟人化交互 |
+| 🧠 心流系统 | • 实时思考生成
• **智能状态管理**
• **概率回复机制**
• 自动启停机制
• 日程系统联动
• **上下文感知工具调用** | 智能化决策 |
+| 🧠 记忆系统 | • **记忆整合与提取**
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
+| 😊 表情系统 | • **全新表情包系统**
• **优化选择逻辑**
• 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
+| 📅 日程系统 | • 动态日程生成
• 自定义想象力
• 思维流联动 | 智能规划 |
+| 👥 关系系统 | • **工具调用动态更新**
• 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
+| 📊 统计系统 | • 使用数据统计
• LLM调用记录
• 实时控制台显示 | 数据可视 |
+| 🛠️ 工具系统 | • **LPMM知识库集成**
• **上下文感知调用**
• 知识获取工具
• 自动注册机制
• 多工具支持 | 扩展功能 |
+| 📚 **知识库(LPMM)** | • **全新LPMM系统**
• **强大的信息检索能力** | 知识增强 |
+| ✨ **昵称系统** | • **自动为群友取昵称**
• **降低认错人概率** (早期阶段) | 身份识别 |
## 📐 项目架构
@@ -143,21 +143,9 @@ graph TD
E --> M[情绪识别]
```
-
-## 开发计划TODO:LIST
-
-- 人格功能:WIP
-- 对特定对象的侧写功能
-- 图片发送,转发功能:WIP
-- 幽默和meme功能:WIP
-- 兼容gif的解析和保存
-- 小程序转发链接解析
-- 修复已知bug
-- 自动生成的回复逻辑,例如自生成的回复方向,回复风格
-
## ✍️如何给本项目报告BUG/提交建议/做贡献
-MaiCore是一个开源项目,我们非常欢迎你的参与。你的贡献,无论是提交bug报告、功能需求还是代码pr,都对项目非常宝贵。我们非常感谢你的支持!🎉 但无序的讨论会降低沟通效率,进而影响问题的解决速度,因此在提交任何贡献前,请务必先阅读本项目的[贡献指南](CONTRIBUTE.md)(待补完)
+MaiCore是一个开源项目,我们非常欢迎你的参与。你的贡献,无论是提交bug报告、功能需求还是代码pr,都对项目非常宝贵。我们非常感谢你的支持!🎉 但无序的讨论会降低沟通效率,进而影响问题的解决速度,因此在提交任何贡献前,请务必先阅读本项目的[贡献指南](depends-data/CONTRIBUTE.md)(待补完)
@@ -181,12 +169,16 @@ MaiCore是一个开源项目,我们非常欢迎你的参与。你的贡献,
- [NapCat](https://github.com/NapNeko/NapCatQQ): 现代化的基于 NTQQ 的 Bot 协议端实现
+## 麦麦仓库状态
+
+
+
### 贡献者
感谢各位大佬!
-
+
**也感谢每一位给麦麦发展提出宝贵意见与建议的用户,感谢陪伴麦麦走到现在的你们**
diff --git a/bot.py b/bot.py
index 653efd45d..d547c360e 100644
--- a/bot.py
+++ b/bot.py
@@ -6,17 +6,17 @@ import sys
from pathlib import Path
import time
import platform
+import traceback
from dotenv import load_dotenv
-from src.common.logger import get_module_logger, LogConfig, CONFIRM_STYLE_CONFIG
+from src.common.logger_manager import get_logger
+
+# from src.common.logger import LogConfig, CONFIRM_STYLE_CONFIG
from src.common.crash_logger import install_crash_handler
from src.main import MainSystem
-logger = get_module_logger("main_bot")
-confirm_logger_config = LogConfig(
- console_format=CONFIRM_STYLE_CONFIG["console_format"],
- file_format=CONFIRM_STYLE_CONFIG["file_format"],
-)
-confirm_logger = get_module_logger("confirm", config=confirm_logger_config)
+
+logger = get_logger("main")
+confirm_logger = get_logger("confirm")
# 获取没有加载env时的环境变量
env_mask = {key: os.getenv(key) for key in os.environ}
@@ -51,6 +51,16 @@ def init_config():
shutil.copy("template/bot_config_template.toml", "config/bot_config.toml")
logger.info("复制完成,请修改config/bot_config.toml和.env中的配置后重新启动")
+ if not os.path.exists("config/lpmm_config.toml"):
+ logger.warning("检测到lpmm_config.toml不存在,正在从模板复制")
+
+ # 检查config目录是否存在
+ if not os.path.exists("config"):
+ os.makedirs("config")
+ logger.info("创建config目录")
+
+ shutil.copy("template/lpmm_config_template.toml", "config/lpmm_config.toml")
+ logger.info("复制完成,请修改config/lpmm_config.toml和.env中的配置后重新启动")
def init_env():
@@ -236,7 +246,7 @@ if __name__ == "__main__":
loop.close()
except Exception as e:
- logger.error(f"主程序异常: {str(e)}")
+ logger.error(f"主程序异常: {str(e)} {str(traceback.format_exc())}")
if loop and not loop.is_closed():
loop.run_until_complete(graceful_shutdown())
loop.close()
diff --git a/changelogs/changelog.md b/changelogs/changelog.md
index 0ddb486bf..0d6608b02 100644
--- a/changelogs/changelog.md
+++ b/changelogs/changelog.md
@@ -1,5 +1,84 @@
# Changelog
+## [0.6.3] - 2025-4-15
+
+### 摘要
+- MaiBot 0.6.3 版本发布!核心重构回复逻辑,统一为心流系统管理,智能切换交互模式。
+- 引入全新的 LPMM 知识库系统,大幅提升信息获取能力。
+- 新增昵称系统,改善群聊中的身份识别。
+- 提供独立的桌宠适配器连接程序。
+- 优化日志输出,修复若干问题。
+
+### 🌟 核心功能增强
+#### 统一回复逻辑 (Unified Reply Logic)
+- **核心重构**: 移除了经典 (Reasoning) 与心流 (Heart Flow) 模式的区分,将回复逻辑完全整合到 `SubHeartflow` 中进行统一管理,由主心流统一调控。保留 Heart FC 模式的特色功能。
+- **智能交互模式**: `SubHeartflow` 现在可以根据情境智能选择不同的交互模式:
+ - **普通聊天 (Normal Chat)**: 类似于之前的 Reasoning 模式,进行常规回复(激活逻辑暂未改变)。
+ - **心流聊天 (Heart Flow Chat)**: 基于改进的 PFC 模式,能更好地理解上下文,减少重复和认错人的情况,并支持**工具调用**以获取额外信息。
+ - **离线模式 (Offline/Absent)**: 在特定情况下,麦麦可能会选择暂时不查看或回复群聊消息。
+- **状态管理**: 交互模式的切换由 `SubHeartflow` 内部逻辑和 `SubHeartflowManager` 根据整体状态 (`MaiState`) 和配置进行管理。
+- **流程优化**: 拆分了子心流的思考模块,使整体对话流程更加清晰。
+- **状态判断改进**: 将 CHAT 状态判断交给 LLM 处理,使对话更自然。
+- **回复机制**: 实现更为灵活的概率回复机制,使机器人能够自然地融入群聊环境。
+- **重复性检查**: 加入心流回复重复性检查机制,防止麦麦陷入固定回复模式。
+
+#### 全新知识库系统 (New Knowledge Base System - LPMM)
+- **引入 LPMM**: 新增了 **LPMM (Large Psychology Model Maker)** 知识库系统,具有强大的信息检索能力,能显著提升麦麦获取和利用知识的效率。
+- **功能集成**: 集成了 LPMM 知识库查询功能,进一步扩展信息检索能力。
+- **推荐使用**: 强烈建议使用新的 LPMM 系统以获得最佳体验。旧的知识库系统仍然可用作为备选。
+
+#### 昵称系统 (Nickname System)
+- **自动取名**: 麦麦现在会尝试给群友取昵称,减少对易变的群昵称的依赖,从而降低认错人的概率。
+- **持续完善**: 该系统目前仍处于早期阶段,会持续进行优化。
+
+#### 记忆与上下文增强 (Memory and Context Enhancement)
+- **聊天记录压缩**: 大幅优化聊天记录压缩系统,使机器人能够处理5倍于之前的上下文记忆量。
+- **长消息截断**: 新增了长消息自动截断与模糊化功能,随着时间推移降低超长消息的权重,避免被特定冗余信息干扰。
+- **记忆提取**: 优化记忆提取功能,提高对历史对话的理解和引用能力。
+- **记忆整合**: 为记忆系统加入了合并与整合机制,优化长期记忆的结构与效率。
+- **中期记忆调用**: 完善中期记忆调用机制,使机器人能够更自然地回忆和引用较早前的对话。
+- **Prompt 优化**: 进一步优化了关系系统和记忆系统相关的提示词(prompt)。
+
+#### 私聊 PFC 功能增强 (Private Chat PFC Enhancement)
+- **功能修复与优化**: 修复了私聊 PFC 载入聊天记录缺失的 bug,优化了 prompt 构建,增加了审核机制,调整了重试次数,并将机器人发言存入数据库。
+- **实验性质**: 请注意,PFC 仍然是一个实验性功能,可能在未来版本中被修改或移除,目前不接受相关 Bug 反馈。
+
+#### 情感与互动增强 (Emotion and Interaction Enhancement)
+- **全新表情包系统**: 新的表情包系统上线,表情含义更丰富,发送更快速。
+- **表情包使用优化**: 优化了表情包的选择逻辑,减少重复使用特定表情包的情况,使表达更生动。
+- **提示词优化**: 优化提示词(prompt)构建,增强对话质量和情感表达。
+- **积极性配置**: 优化"让麦麦更愿意说话"的相关配置,使机器人更积极参与对话。
+- **颜文字保护**: 保护颜文字处理机制,确保表情正确显示。
+
+#### 工具与集成 (Tools and Integration)
+- **动态更新**: 使用工具调用来更新关系和心情,取代原先的固定更新机制。
+- **智能调用**: 工具调用时会考虑上下文,使调用更加智能。
+- **知识库依赖**: 添加 LPMM 知识库依赖,扩展知识检索工具。
+
+### 💻 系统架构优化
+#### 日志优化 (Logging Optimization)
+- **输出更清晰**: 优化了日志信息的格式和内容,使其更易于阅读和理解。
+
+#### 模型与消息整合 (Model and Message Integration)
+- **模型合并**: 合并工具调用模型和心流模型,提高整体一致性。
+- **消息规范**: 全面改用 `maim_message`,移除对 `rest` 的支持。
+
+#### (临时) 简易 GUI (Temporary Simple GUI)
+- **运行状态查看**: 提供了一个非常基础的图形用户界面,用于查看麦麦的运行状态。
+- **临时方案**: 这是一个临时性的解决方案,功能简陋,**将在 0.6.4 版本中被全新的 Web UI 所取代**。此 GUI 不会包含在主程序包中,而是通过一键包提供,并且不接受 Bug 反馈。
+
+### 🐛 问题修复
+- **记忆检索优化**: 提高了记忆检索的准确性和效率。
+- 修复了一些其他小问题。
+
+### 🔧 其他改进
+#### 桌宠适配器 (Bug Catcher Adapter)
+- **独立适配器**: 提供了一个"桌宠"独立适配器,用于连接麦麦和桌宠。
+- **获取方式**: 可在 MaiBot 的 GitHub 组织中找到该适配器,不包含在主程序内。
+
+#### 一键包内容 (One-Click Package Contents)
+- **辅助程序**: 一键包中包含了简易 GUI 和 **麦麦帮助配置** 等辅助程序,后者可在配置出现问题时提供帮助。
+
## [0.6.2] - 2025-4-14
### 摘要
diff --git a/changelogs/changelog_config.md b/changelogs/changelog_config.md
index e438ea31e..5aa5fb922 100644
--- a/changelogs/changelog_config.md
+++ b/changelogs/changelog_config.md
@@ -33,7 +33,7 @@
- 调整了部分配置项的默认值
- 调整了配置项的顺序,将 `groups` 配置项移到了更靠前的位置
- 在 `message` 配置项中:
- - 新增了 `max_response_length` 参数
+ - 新增了 `model_max_output_length` 参数
- 在 `willing` 配置项中新增了 `emoji_response_penalty` 参数
- 将 `personality` 配置项中的 `prompt_schedule` 重命名为 `prompt_schedule_gen`
diff --git a/CONTRIBUTE.md b/depends-data/CONTRIBUTE.md
similarity index 99%
rename from CONTRIBUTE.md
rename to depends-data/CONTRIBUTE.md
index 440a8202d..c372aebe8 100644
--- a/CONTRIBUTE.md
+++ b/depends-data/CONTRIBUTE.md
@@ -19,7 +19,6 @@
● [我有问题](#我有问题)
● [我想做贡献](#我想做贡献)
-● [我想报告BUG](#报告BUG)
● [我想提出建议](#提出建议)
## 我有问题
diff --git a/llm_tool_benchmark_results.json b/llm_tool_benchmark_results.json
new file mode 100644
index 000000000..6caa7c315
--- /dev/null
+++ b/llm_tool_benchmark_results.json
@@ -0,0 +1,145 @@
+{
+ "测试时间": "2025-04-28 14:12:36",
+ "测试迭代次数": 10,
+ "不使用工具调用": {
+ "平均耗时": 4.596814393997192,
+ "最短耗时": 2.957131862640381,
+ "最长耗时": 10.121938705444336,
+ "标准差": 2.1705468730949593,
+ "所有耗时": [
+ 3.18,
+ 4.65,
+ 10.12,
+ 3.5,
+ 4.46,
+ 4.24,
+ 3.23,
+ 6.2,
+ 2.96,
+ 3.42
+ ]
+ },
+ "不使用工具调用_详细响应": [
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"怎么啦?\",\n \"emoji_query\": \"友好地询问\"\n}\n```",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "decide_reply_action(\n action=\"text_reply\",\n reasoning=\"千石连续两次叫了我的名字,显然是想引起我的注意或有事要说,作为礼貌应当回应\",\n emoji_query=\"友善的回应\"\n)",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"怎么啦?我在呢~\",\n \"emoji_query\": \"友好的询问\",\n \"reasoning\": \"由于对方连续两次提到我的名字,显然是想与我交流,应当及时给予友好回应避免冷场\"\n}\n```",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"在呢在呢~怎么啦?\",\n \"emoji_query\": \"好奇的回应\"\n}\n```",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "decide_reply_action(\n action=\"text_reply\",\n reasoning=\"千石连续两次提到我的名字,显然需要我回应。文字回复更正式且能明确表示我在关注他的信息。\",\n emoji_query=\"友好的回应\"\n)",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"reasoning\": \"千石连续两次提到我的名字,显然是需要我的回应。作为日常交流,应该给予友善简短的答复。\",\n \"emoji_query\": \"疑惑的歪头\"\n}\n```",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"咋啦千石~\",\n \"emoji_query\": \"好奇的询问\"\n}\n```",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "decide_reply_action\n```json\n{\n \"action\": \"text_reply\",\n \"content\": \"我在呢~怎么啦?\",\n \"emoji_query\": \"友好的关心\",\n \"reasoning\": \"千石连续两次呼唤我的名字,显然是有事情要找我或想引起我的注意。根据回复原则2(有人提到你但未回应),应该用友善的文字进行回应,并附上表达关心的表情符号来延...",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"千石怎么啦~\",\n \"emoji_query\": \"好奇的探询\"\n}\n```",
+ "推理内容摘要": ""
+ },
+ {
+ "内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"怎么啦?突然叫我两次\",\n \"emoji_query\": \"好奇的疑问\"\n}\n```",
+ "推理内容摘要": ""
+ }
+ ],
+ "使用工具调用": {
+ "平均耗时": 8.139546775817871,
+ "最短耗时": 4.9980738162994385,
+ "最长耗时": 18.803313732147217,
+ "标准差": 4.008772720760647,
+ "所有耗时": [
+ 5.81,
+ 18.8,
+ 6.06,
+ 8.06,
+ 10.07,
+ 6.34,
+ 7.9,
+ 6.66,
+ 5.0,
+ 6.69
+ ]
+ },
+ "使用工具调用_详细响应": [
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ },
+ {
+ "内容摘要": "",
+ "推理内容摘要": "",
+ "工具调用数量": 0,
+ "工具调用详情": []
+ }
+ ],
+ "差异百分比": 77.07
+}
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 0fcb31f83..06068d888 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/scripts/import_openie.py b/scripts/import_openie.py
new file mode 100644
index 000000000..26cbd8ce1
--- /dev/null
+++ b/scripts/import_openie.py
@@ -0,0 +1,167 @@
+# try:
+# import src.plugins.knowledge.lib.quick_algo
+# except ImportError:
+# print("未找到quick_algo库,无法使用quick_algo算法")
+# print("请安装quick_algo库 - 在lib.quick_algo中,执行命令:python setup.py build_ext --inplace")
+
+import sys
+import os
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
+from typing import Dict, List
+
+from src.plugins.knowledge.src.lpmmconfig import PG_NAMESPACE, global_config
+from src.plugins.knowledge.src.embedding_store import EmbeddingManager
+from src.plugins.knowledge.src.llm_client import LLMClient
+from src.plugins.knowledge.src.open_ie import OpenIE
+from src.plugins.knowledge.src.kg_manager import KGManager
+from src.common.logger import get_module_logger
+from src.plugins.knowledge.src.utils.hash import get_sha256
+
+
+# 添加项目根目录到 sys.path
+
+
+logger = get_module_logger("LPMM知识库-OpenIE导入")
+
+
+def hash_deduplicate(
+ raw_paragraphs: Dict[str, str],
+ triple_list_data: Dict[str, List[List[str]]],
+ stored_pg_hashes: set,
+ stored_paragraph_hashes: set,
+):
+ """Hash去重
+
+ Args:
+ raw_paragraphs: 索引的段落原文
+ triple_list_data: 索引的三元组列表
+ stored_pg_hashes: 已存储的段落hash集合
+ stored_paragraph_hashes: 已存储的段落hash集合
+
+ Returns:
+ new_raw_paragraphs: 去重后的段落
+ new_triple_list_data: 去重后的三元组
+ """
+ # 保存去重后的段落
+ new_raw_paragraphs = dict()
+ # 保存去重后的三元组
+ new_triple_list_data = dict()
+
+ for _, (raw_paragraph, triple_list) in enumerate(zip(raw_paragraphs.values(), triple_list_data.values())):
+ # 段落hash
+ paragraph_hash = get_sha256(raw_paragraph)
+ if ((PG_NAMESPACE + "-" + paragraph_hash) in stored_pg_hashes) and (paragraph_hash in stored_paragraph_hashes):
+ continue
+ new_raw_paragraphs[paragraph_hash] = raw_paragraph
+ new_triple_list_data[paragraph_hash] = triple_list
+
+ return new_raw_paragraphs, new_triple_list_data
+
+
+def handle_import_openie(openie_data: OpenIE, embed_manager: EmbeddingManager, kg_manager: KGManager) -> bool:
+ # 从OpenIE数据中提取段落原文与三元组列表
+ # 索引的段落原文
+ raw_paragraphs = openie_data.extract_raw_paragraph_dict()
+ # 索引的实体列表
+ entity_list_data = openie_data.extract_entity_dict()
+ # 索引的三元组列表
+ triple_list_data = openie_data.extract_triple_dict()
+ if len(raw_paragraphs) != len(entity_list_data) or len(raw_paragraphs) != len(triple_list_data):
+ logger.error("OpenIE数据存在异常")
+ return False
+ # 将索引换为对应段落的hash值
+ logger.info("正在进行段落去重与重索引")
+ raw_paragraphs, triple_list_data = hash_deduplicate(
+ raw_paragraphs,
+ triple_list_data,
+ embed_manager.stored_pg_hashes,
+ kg_manager.stored_paragraph_hashes,
+ )
+ if len(raw_paragraphs) != 0:
+ # 获取嵌入并保存
+ logger.info(f"段落去重完成,剩余待处理的段落数量:{len(raw_paragraphs)}")
+ logger.info("开始Embedding")
+ embed_manager.store_new_data_set(raw_paragraphs, triple_list_data)
+ # Embedding-Faiss重索引
+ logger.info("正在重新构建向量索引")
+ embed_manager.rebuild_faiss_index()
+ logger.info("向量索引构建完成")
+ embed_manager.save_to_file()
+ logger.info("Embedding完成")
+ # 构建新段落的RAG
+ logger.info("开始构建RAG")
+ kg_manager.build_kg(triple_list_data, embed_manager)
+ kg_manager.save_to_file()
+ logger.info("RAG构建完成")
+ else:
+ logger.info("无新段落需要处理")
+ return True
+
+
+def main():
+ # 新增确认提示
+ print("=== 重要操作确认 ===")
+ print("OpenIE导入时会大量发送请求,可能会撞到请求速度上限,请注意选用的模型")
+ print("同之前样例:在本地模型下,在70分钟内我们发送了约8万条请求,在网络允许下,速度会更快")
+ print("推荐使用硅基流动的Pro/BAAI/bge-m3")
+ print("每百万Token费用为0.7元")
+ print("知识导入时,会消耗大量系统资源,建议在较好配置电脑上运行")
+ print("同上样例,导入时10700K几乎跑满,14900HX占用80%,峰值内存占用约3G")
+ confirm = input("确认继续执行?(y/n): ").strip().lower()
+ if confirm != "y":
+ logger.info("用户取消操作")
+ print("操作已取消")
+ sys.exit(1)
+ print("\n" + "=" * 40 + "\n")
+
+ logger.info("----开始导入openie数据----\n")
+
+ logger.info("创建LLM客户端")
+ llm_client_list = dict()
+ for key in global_config["llm_providers"]:
+ llm_client_list[key] = LLMClient(
+ global_config["llm_providers"][key]["base_url"],
+ global_config["llm_providers"][key]["api_key"],
+ )
+
+ # 初始化Embedding库
+ embed_manager = embed_manager = EmbeddingManager(llm_client_list[global_config["embedding"]["provider"]])
+ logger.info("正在从文件加载Embedding库")
+ try:
+ embed_manager.load_from_file()
+ except Exception as e:
+ logger.error("从文件加载Embedding库时发生错误:{}".format(e))
+ logger.info("Embedding库加载完成")
+ # 初始化KG
+ kg_manager = KGManager()
+ logger.info("正在从文件加载KG")
+ try:
+ kg_manager.load_from_file()
+ except Exception as e:
+ logger.error("从文件加载KG时发生错误:{}".format(e))
+ logger.info("KG加载完成")
+
+ logger.info(f"KG节点数量:{len(kg_manager.graph.get_node_list())}")
+ logger.info(f"KG边数量:{len(kg_manager.graph.get_edge_list())}")
+
+ # 数据比对:Embedding库与KG的段落hash集合
+ for pg_hash in kg_manager.stored_paragraph_hashes:
+ key = PG_NAMESPACE + "-" + pg_hash
+ if key not in embed_manager.stored_pg_hashes:
+ logger.warning(f"KG中存在Embedding库中不存在的段落:{key}")
+
+ logger.info("正在导入OpenIE数据文件")
+ try:
+ openie_data = OpenIE.load()
+ except Exception as e:
+ logger.error("导入OpenIE数据文件时发生错误:{}".format(e))
+ return False
+ if handle_import_openie(openie_data, embed_manager, kg_manager) is False:
+ logger.error("处理OpenIE数据时发生错误")
+ return False
+ return None
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/info_extraction.py b/scripts/info_extraction.py
new file mode 100644
index 000000000..fdb445285
--- /dev/null
+++ b/scripts/info_extraction.py
@@ -0,0 +1,178 @@
+import json
+import os
+import signal
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from threading import Lock, Event
+import sys
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
+# 添加项目根目录到 sys.path
+
+import tqdm
+
+from src.common.logger import get_module_logger
+from src.plugins.knowledge.src.lpmmconfig import global_config
+from src.plugins.knowledge.src.ie_process import info_extract_from_str
+from src.plugins.knowledge.src.llm_client import LLMClient
+from src.plugins.knowledge.src.open_ie import OpenIE
+from src.plugins.knowledge.src.raw_processing import load_raw_data
+
+logger = get_module_logger("LPMM知识库-信息提取")
+
+TEMP_DIR = "./temp"
+
+# 创建一个线程安全的锁,用于保护文件操作和共享数据
+file_lock = Lock()
+open_ie_doc_lock = Lock()
+
+# 创建一个事件标志,用于控制程序终止
+shutdown_event = Event()
+
+
+def process_single_text(pg_hash, raw_data, llm_client_list):
+ """处理单个文本的函数,用于线程池"""
+ temp_file_path = f"{TEMP_DIR}/{pg_hash}.json"
+
+ # 使用文件锁检查和读取缓存文件
+ with file_lock:
+ if os.path.exists(temp_file_path):
+ try:
+ # 存在对应的提取结果
+ logger.info(f"找到缓存的提取结果:{pg_hash}")
+ with open(temp_file_path, "r", encoding="utf-8") as f:
+ return json.load(f), None
+ except json.JSONDecodeError:
+ # 如果JSON文件损坏,删除它并重新处理
+ logger.warning(f"缓存文件损坏,重新处理:{pg_hash}")
+ os.remove(temp_file_path)
+
+ entity_list, rdf_triple_list = info_extract_from_str(
+ llm_client_list[global_config["entity_extract"]["llm"]["provider"]],
+ llm_client_list[global_config["rdf_build"]["llm"]["provider"]],
+ raw_data,
+ )
+ if entity_list is None or rdf_triple_list is None:
+ return None, pg_hash
+ else:
+ doc_item = {
+ "idx": pg_hash,
+ "passage": raw_data,
+ "extracted_entities": entity_list,
+ "extracted_triples": rdf_triple_list,
+ }
+ # 保存临时提取结果
+ with file_lock:
+ try:
+ with open(temp_file_path, "w", encoding="utf-8") as f:
+ json.dump(doc_item, f, ensure_ascii=False, indent=4)
+ except Exception as e:
+ logger.error(f"保存缓存文件失败:{pg_hash}, 错误:{e}")
+ # 如果保存失败,确保不会留下损坏的文件
+ if os.path.exists(temp_file_path):
+ os.remove(temp_file_path)
+ # 设置shutdown_event以终止程序
+ shutdown_event.set()
+ return None, pg_hash
+ return doc_item, None
+
+
+def signal_handler(signum, frame):
+ """处理Ctrl+C信号"""
+ logger.info("\n接收到中断信号,正在优雅地关闭程序...")
+ shutdown_event.set()
+
+
+def main():
+ # 设置信号处理器
+ signal.signal(signal.SIGINT, signal_handler)
+
+ # 新增用户确认提示
+ print("=== 重要操作确认 ===")
+ print("实体提取操作将会花费较多资金和时间,建议在空闲时段执行。")
+ print("举例:600万字全剧情,提取选用deepseek v3 0324,消耗约40元,约3小时。")
+ print("建议使用硅基流动的非Pro模型")
+ print("或者使用可以用赠金抵扣的Pro模型")
+ print("请确保账户余额充足,并且在执行前确认无误。")
+ confirm = input("确认继续执行?(y/n): ").strip().lower()
+ if confirm != "y":
+ logger.info("用户取消操作")
+ print("操作已取消")
+ sys.exit(1)
+ print("\n" + "=" * 40 + "\n")
+
+ logger.info("--------进行信息提取--------\n")
+
+ logger.info("创建LLM客户端")
+ llm_client_list = dict()
+ for key in global_config["llm_providers"]:
+ llm_client_list[key] = LLMClient(
+ global_config["llm_providers"][key]["base_url"],
+ global_config["llm_providers"][key]["api_key"],
+ )
+
+ logger.info("正在加载原始数据")
+ sha256_list, raw_datas = load_raw_data()
+ logger.info("原始数据加载完成\n")
+
+ # 创建临时目录
+ if not os.path.exists(f"{TEMP_DIR}"):
+ os.makedirs(f"{TEMP_DIR}")
+
+ failed_sha256 = []
+ open_ie_doc = []
+
+ # 创建线程池,最大线程数为50
+ workers = global_config["info_extraction"]["workers"]
+ with ThreadPoolExecutor(max_workers=workers) as executor:
+ # 提交所有任务到线程池
+ future_to_hash = {
+ executor.submit(process_single_text, pg_hash, raw_data, llm_client_list): pg_hash
+ for pg_hash, raw_data in zip(sha256_list, raw_datas)
+ }
+
+ # 使用tqdm显示进度
+ with tqdm.tqdm(total=len(future_to_hash), postfix="正在进行提取:") as pbar:
+ # 处理完成的任务
+ try:
+ for future in as_completed(future_to_hash):
+ if shutdown_event.is_set():
+ # 取消所有未完成的任务
+ for f in future_to_hash:
+ if not f.done():
+ f.cancel()
+ break
+
+ doc_item, failed_hash = future.result()
+ if failed_hash:
+ failed_sha256.append(failed_hash)
+ logger.error(f"提取失败:{failed_hash}")
+ elif doc_item:
+ with open_ie_doc_lock:
+ open_ie_doc.append(doc_item)
+ pbar.update(1)
+ except KeyboardInterrupt:
+ # 如果在这里捕获到KeyboardInterrupt,说明signal_handler可能没有正常工作
+ logger.info("\n接收到中断信号,正在优雅地关闭程序...")
+ shutdown_event.set()
+ # 取消所有未完成的任务
+ for f in future_to_hash:
+ if not f.done():
+ f.cancel()
+
+ # 保存信息提取结果
+ sum_phrase_chars = sum([len(e) for chunk in open_ie_doc for e in chunk["extracted_entities"]])
+ sum_phrase_words = sum([len(e.split()) for chunk in open_ie_doc for e in chunk["extracted_entities"]])
+ num_phrases = sum([len(chunk["extracted_entities"]) for chunk in open_ie_doc])
+ openie_obj = OpenIE(
+ open_ie_doc,
+ round(sum_phrase_chars / num_phrases, 4),
+ round(sum_phrase_words / num_phrases, 4),
+ )
+ OpenIE.save(openie_obj)
+
+ logger.info("--------信息提取完成--------")
+ logger.info(f"提取失败的文段SHA256:{failed_sha256}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/interest_monitor_gui.py b/scripts/interest_monitor_gui.py
new file mode 100644
index 000000000..1f03b9695
--- /dev/null
+++ b/scripts/interest_monitor_gui.py
@@ -0,0 +1,667 @@
+import tkinter as tk
+from tkinter import ttk
+import time
+import os
+from datetime import datetime, timedelta
+import random
+from collections import deque
+import json # 引入 json
+
+# --- 引入 Matplotlib ---
+from matplotlib.figure import Figure
+from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
+import matplotlib.dates as mdates # 用于处理日期格式
+import matplotlib # 导入 matplotlib
+
+# --- 配置 ---
+LOG_FILE_PATH = os.path.join("logs", "interest", "interest_history.log") # 指向历史日志文件
+REFRESH_INTERVAL_MS = 200 # 刷新间隔 (毫秒) - 可以适当调长,因为读取文件可能耗时
+WINDOW_TITLE = "Interest Monitor (Live History)"
+MAX_HISTORY_POINTS = 1000 # 图表上显示的最大历史点数 (可以增加)
+MAX_STREAMS_TO_DISPLAY = 15 # 最多显示多少个聊天流的折线图 (可以增加)
+MAX_QUEUE_SIZE = 30 # 新增:历史想法队列最大长度
+
+# *** 添加 Matplotlib 中文字体配置 ***
+# 尝试使用 'SimHei' 或 'Microsoft YaHei',如果找不到,matplotlib 会回退到默认字体
+# 确保你的系统上安装了这些字体
+matplotlib.rcParams["font.sans-serif"] = ["SimHei", "Microsoft YaHei"]
+matplotlib.rcParams["axes.unicode_minus"] = False # 解决负号'-'显示为方块的问题
+
+
+class InterestMonitorApp:
+ def __init__(self, root):
+ self.root = root
+ self.root.title(WINDOW_TITLE)
+ self.root.geometry("1800x800") # 调整窗口大小以适应图表
+
+ # --- 数据存储 ---
+ # 使用 deque 来存储有限的历史数据点
+ # key: stream_id, value: deque([(timestamp, interest_level), ...])
+ self.stream_history = {}
+ # key: stream_id, value: deque([(timestamp, reply_probability), ...])
+ self.probability_history = {}
+ self.stream_colors = {} # 为每个 stream 分配颜色
+ self.stream_display_names = {} # 存储显示名称 (group_name)
+ self.selected_stream_id = tk.StringVar() # 用于 Combobox 绑定
+
+ # --- 新增:存储其他参数 ---
+ # 顶层信息
+ self.latest_main_mind = tk.StringVar(value="N/A")
+ self.latest_mai_state = tk.StringVar(value="N/A")
+ self.latest_subflow_count = tk.IntVar(value=0)
+ # 子流最新状态 (key: stream_id)
+ self.stream_sub_minds = {}
+ self.stream_chat_states = {}
+ self.stream_threshold_status = {}
+ self.stream_last_active = {}
+ self.stream_last_interaction = {}
+ # 用于显示单个流详情的 StringVar
+ self.single_stream_sub_mind = tk.StringVar(value="想法: N/A")
+ self.single_stream_chat_state = tk.StringVar(value="状态: N/A")
+ self.single_stream_threshold = tk.StringVar(value="阈值: N/A")
+ self.single_stream_last_active = tk.StringVar(value="活跃: N/A")
+ self.single_stream_last_interaction = tk.StringVar(value="交互: N/A")
+
+ # 新增:历史想法队列
+ self.main_mind_history = deque(maxlen=MAX_QUEUE_SIZE)
+ self.last_main_mind_timestamp = 0 # 记录最后一条main_mind的时间戳
+
+ # --- UI 元素 ---
+
+ # --- 新增:顶部全局信息框架 ---
+ self.global_info_frame = ttk.Frame(root, padding="5 0 5 5") # 顶部内边距调整
+ self.global_info_frame.pack(side=tk.TOP, fill=tk.X, pady=(5, 0)) # 底部外边距为0
+
+ ttk.Label(self.global_info_frame, text="全局状态:").pack(side=tk.LEFT, padx=(0, 10))
+ ttk.Label(self.global_info_frame, textvariable=self.latest_mai_state).pack(side=tk.LEFT, padx=5)
+ ttk.Label(self.global_info_frame, text="想法:").pack(side=tk.LEFT, padx=(10, 0))
+ ttk.Label(self.global_info_frame, textvariable=self.latest_main_mind).pack(side=tk.LEFT, padx=5)
+ ttk.Label(self.global_info_frame, text="子流数:").pack(side=tk.LEFT, padx=(10, 0))
+ ttk.Label(self.global_info_frame, textvariable=self.latest_subflow_count).pack(side=tk.LEFT, padx=5)
+
+ # 创建 Notebook (选项卡控件)
+ self.notebook = ttk.Notebook(root)
+ # 修改:fill 和 expand,让 notebook 填充剩余空间
+ self.notebook.pack(pady=(5, 0), padx=10, fill=tk.BOTH, expand=1) # 顶部外边距改小
+
+ # --- 第一个选项卡:所有流 ---
+ self.frame_all = ttk.Frame(self.notebook, padding="5 5 5 5")
+ self.notebook.add(self.frame_all, text="所有聊天流")
+
+ # 状态标签 (移动到最底部)
+ self.status_label = tk.Label(root, text="Initializing...", anchor="w", fg="grey")
+ self.status_label.pack(side=tk.BOTTOM, fill=tk.X, padx=10, pady=(0, 5)) # 调整边距
+
+ # Matplotlib 图表设置 (用于第一个选项卡)
+ self.fig = Figure(figsize=(5, 4), dpi=100)
+ self.ax = self.fig.add_subplot(111)
+ # 配置在 update_plot 中进行,避免重复
+
+ # 创建 Tkinter 画布嵌入 Matplotlib 图表 (用于第一个选项卡)
+ self.canvas = FigureCanvasTkAgg(self.fig, master=self.frame_all) # <--- 放入 frame_all
+ self.canvas_widget = self.canvas.get_tk_widget()
+ self.canvas_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
+
+ # --- 第二个选项卡:单个流 ---
+ self.frame_single = ttk.Frame(self.notebook, padding="5 5 5 5")
+ self.notebook.add(self.frame_single, text="单个聊天流详情")
+
+ # 单个流选项卡的上部控制区域
+ self.control_frame_single = ttk.Frame(self.frame_single)
+ self.control_frame_single.pack(side=tk.TOP, fill=tk.X, pady=5)
+
+ ttk.Label(self.control_frame_single, text="选择聊天流:").pack(side=tk.LEFT, padx=(0, 5))
+ self.stream_selector = ttk.Combobox(
+ self.control_frame_single, textvariable=self.selected_stream_id, state="readonly", width=50
+ )
+ self.stream_selector.pack(side=tk.LEFT, fill=tk.X, expand=True)
+ self.stream_selector.bind("<
>", self.on_stream_selected)
+
+ # --- 新增:单个流详情显示区域 ---
+ self.single_stream_details_frame = ttk.Frame(self.frame_single, padding="5 5 5 0")
+ self.single_stream_details_frame.pack(side=tk.TOP, fill=tk.X, pady=(0, 5))
+
+ ttk.Label(self.single_stream_details_frame, textvariable=self.single_stream_sub_mind).pack(side=tk.LEFT, padx=5)
+ ttk.Label(self.single_stream_details_frame, textvariable=self.single_stream_chat_state).pack(
+ side=tk.LEFT, padx=5
+ )
+ ttk.Label(self.single_stream_details_frame, textvariable=self.single_stream_threshold).pack(
+ side=tk.LEFT, padx=5
+ )
+ ttk.Label(self.single_stream_details_frame, textvariable=self.single_stream_last_active).pack(
+ side=tk.LEFT, padx=5
+ )
+ ttk.Label(self.single_stream_details_frame, textvariable=self.single_stream_last_interaction).pack(
+ side=tk.LEFT, padx=5
+ )
+
+ # Matplotlib 图表设置 (用于第二个选项卡)
+ self.fig_single = Figure(figsize=(5, 4), dpi=100)
+ # 修改:创建两个子图,一个显示兴趣度,一个显示概率
+ self.ax_single_interest = self.fig_single.add_subplot(211) # 2行1列的第1个
+ self.ax_single_probability = self.fig_single.add_subplot(
+ 212, sharex=self.ax_single_interest
+ ) # 2行1列的第2个,共享X轴
+
+ # 创建 Tkinter 画布嵌入 Matplotlib 图表 (用于第二个选项卡)
+ self.canvas_single = FigureCanvasTkAgg(self.fig_single, master=self.frame_single) # <--- 放入 frame_single
+ self.canvas_widget_single = self.canvas_single.get_tk_widget()
+ self.canvas_widget_single.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
+
+ # --- 新增第三个选项卡:麦麦历史想法 ---
+ self.frame_mind_history = ttk.Frame(self.notebook, padding="5 5 5 5")
+ self.notebook.add(self.frame_mind_history, text="麦麦历史想法")
+
+ # 聊天框样式的文本框(只读)+ 滚动条
+ self.mind_text_scroll = tk.Scrollbar(self.frame_mind_history)
+ self.mind_text_scroll.pack(side=tk.RIGHT, fill=tk.Y)
+ self.mind_text = tk.Text(
+ self.frame_mind_history,
+ height=25,
+ state="disabled",
+ wrap="word",
+ font=("微软雅黑", 12),
+ yscrollcommand=self.mind_text_scroll.set,
+ )
+ self.mind_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, padx=5, pady=5)
+ self.mind_text_scroll.config(command=self.mind_text.yview)
+
+ # --- 初始化和启动刷新 ---
+ self.update_display() # 首次加载并开始刷新循环
+
+ def on_stream_selected(self, event=None):
+ """当 Combobox 选择改变时调用,更新单个流的图表"""
+ self.update_single_stream_plot()
+
+ def get_random_color(self):
+ """生成随机颜色用于区分线条"""
+ return "#{:06x}".format(random.randint(0, 0xFFFFFF))
+
+ def load_main_mind_history(self):
+ """只读取包含main_mind的日志行,维护历史想法队列"""
+ if not os.path.exists(LOG_FILE_PATH):
+ return
+
+ main_mind_entries = []
+ try:
+ with open(LOG_FILE_PATH, "r", encoding="utf-8") as f:
+ for line in f:
+ try:
+ log_entry = json.loads(line.strip())
+ if "main_mind" in log_entry:
+ ts = log_entry.get("timestamp", 0)
+ main_mind_entries.append((ts, log_entry))
+ except Exception:
+ continue
+ main_mind_entries.sort(key=lambda x: x[0])
+ recent_entries = main_mind_entries[-MAX_QUEUE_SIZE:]
+ self.main_mind_history.clear()
+ for _ts, entry in recent_entries:
+ self.main_mind_history.append(entry)
+ if recent_entries:
+ self.last_main_mind_timestamp = recent_entries[-1][0]
+ # 首次加载时刷新
+ self.refresh_mind_text()
+ except Exception:
+ pass
+
+ def update_main_mind_history(self):
+ """实时监控log文件,发现新main_mind数据则更新队列和展示(仅有新数据时刷新)"""
+ if not os.path.exists(LOG_FILE_PATH):
+ return
+
+ new_entries = []
+ try:
+ with open(LOG_FILE_PATH, "r", encoding="utf-8") as f:
+ for line in reversed(list(f)):
+ try:
+ log_entry = json.loads(line.strip())
+ if "main_mind" in log_entry:
+ ts = log_entry.get("timestamp", 0)
+ if ts > self.last_main_mind_timestamp:
+ new_entries.append((ts, log_entry))
+ else:
+ break
+ except Exception:
+ continue
+ if new_entries:
+ for ts, entry in sorted(new_entries):
+ if len(self.main_mind_history) >= MAX_QUEUE_SIZE:
+ self.main_mind_history.popleft()
+ self.main_mind_history.append(entry)
+ self.last_main_mind_timestamp = ts
+ self.refresh_mind_text() # 只有有新数据时才刷新
+ except Exception:
+ pass
+
+ def refresh_mind_text(self):
+ """刷新聊天框样式的历史想法展示"""
+ self.mind_text.config(state="normal")
+ self.mind_text.delete(1.0, tk.END)
+ for entry in self.main_mind_history:
+ ts = entry.get("timestamp", 0)
+ dt_str = datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S") if ts else ""
+ main_mind = entry.get("main_mind", "")
+ mai_state = entry.get("mai_state", "")
+ subflow_count = entry.get("subflow_count", "")
+ msg = f"[{dt_str}] 状态:{mai_state} 子流:{subflow_count}\n{main_mind}\n\n"
+ self.mind_text.insert(tk.END, msg)
+ self.mind_text.see(tk.END)
+ self.mind_text.config(state="disabled")
+
+ def load_and_update_history(self):
+ """从 history log 文件加载数据并更新历史记录"""
+ if not os.path.exists(LOG_FILE_PATH):
+ self.set_status(f"Error: Log file not found at {LOG_FILE_PATH}", "red")
+ # 如果文件不存在,不清空现有数据,以便显示最后一次成功读取的状态
+ return
+
+ # *** Reset display names each time we reload ***
+ new_stream_history = {}
+ new_stream_display_names = {}
+ new_probability_history = {} # <--- 重置概率历史
+ # --- 新增:重置其他子流状态 --- (如果需要的话,但通常覆盖即可)
+ # self.stream_sub_minds = {}
+ # self.stream_chat_states = {}
+ # ... 等等 ...
+
+ read_count = 0
+ error_count = 0
+ # *** Calculate the timestamp threshold for the last 30 minutes ***
+ current_time = time.time()
+ time_threshold = current_time - (15 * 60) # 30 minutes in seconds
+
+ try:
+ with open(LOG_FILE_PATH, "r", encoding="utf-8") as f:
+ for line in f:
+ read_count += 1
+ try:
+ log_entry = json.loads(line.strip())
+ timestamp = log_entry.get("timestamp") # 获取顶层时间戳
+
+ # *** 时间过滤 ***
+ if timestamp is None:
+ error_count += 1
+ continue # 跳过没有时间戳的行
+ try:
+ entry_timestamp = float(timestamp)
+ if entry_timestamp < time_threshold:
+ continue # 跳过时间过早的条目
+ except (ValueError, TypeError):
+ error_count += 1
+ continue # 跳过时间戳格式错误的行
+
+ # --- 新增:更新顶层信息 (使用最后一个有效行的数据) ---
+ self.latest_main_mind.set(
+ log_entry.get("main_mind", self.latest_main_mind.get())
+ ) # 保留旧值如果缺失
+ self.latest_mai_state.set(log_entry.get("mai_state", self.latest_mai_state.get()))
+ self.latest_subflow_count.set(log_entry.get("subflow_count", self.latest_subflow_count.get()))
+
+ # --- 修改开始:迭代 subflows ---
+ subflows = log_entry.get("subflows")
+ if not isinstance(subflows, list): # 检查 subflows 是否存在且为列表
+ error_count += 1
+ continue # 跳过没有 subflows 或格式无效的行
+
+ for subflow_entry in subflows:
+ stream_id = subflow_entry.get("stream_id")
+ interest_level = subflow_entry.get("interest_level")
+ # 获取 group_name,如果不存在则回退到 stream_id
+ group_name = subflow_entry.get("group_name", stream_id)
+ # reply_probability = subflow_entry.get("reply_probability") # 获取概率值 # <-- 注释掉旧行
+ start_hfc_probability = subflow_entry.get(
+ "start_hfc_probability"
+ ) # <-- 添加新行,读取新字段
+
+ # *** 检查必要的字段 ***
+ # 注意:时间戳已在顶层检查过
+ if stream_id is None or interest_level is None:
+ # 这里可以选择记录子流错误,但暂时跳过
+ continue # 跳过无效的 subflow 条目
+
+ # 确保 interest_level 可以转换为浮点数
+ try:
+ interest_level_float = float(interest_level)
+ except (ValueError, TypeError):
+ continue # 跳过 interest_level 无效的 subflow
+
+ # 如果是第一次读到这个 stream_id,则创建 deque
+ if stream_id not in new_stream_history:
+ new_stream_history[stream_id] = deque(maxlen=MAX_HISTORY_POINTS)
+ new_probability_history[stream_id] = deque(maxlen=MAX_HISTORY_POINTS) # 创建概率 deque
+ # 检查是否已有颜色,没有则分配
+ if stream_id not in self.stream_colors:
+ self.stream_colors[stream_id] = self.get_random_color()
+
+ # *** 存储此 stream_id 最新的显示名称 ***
+ new_stream_display_names[stream_id] = group_name
+
+ # --- 新增:存储其他子流信息 ---
+ self.stream_sub_minds[stream_id] = subflow_entry.get("sub_mind", "N/A")
+ self.stream_chat_states[stream_id] = subflow_entry.get("sub_chat_state", "N/A")
+ self.stream_threshold_status[stream_id] = subflow_entry.get("is_above_threshold", False)
+ self.stream_last_active[stream_id] = subflow_entry.get(
+ "chat_state_changed_time"
+ ) # 存储原始时间戳
+
+ # 添加数据点 (使用顶层时间戳)
+ new_stream_history[stream_id].append((entry_timestamp, interest_level_float))
+
+ # 添加概率数据点 (如果存在且有效)
+ # if reply_probability is not None: # <-- 注释掉旧判断
+ if start_hfc_probability is not None: # <-- 修改判断条件
+ try:
+ # 尝试将概率转换为浮点数
+ # probability_float = float(reply_probability) # <-- 注释掉旧转换
+ probability_float = float(start_hfc_probability) # <-- 使用新变量
+ new_probability_history[stream_id].append((entry_timestamp, probability_float))
+ except (TypeError, ValueError):
+ # 如果概率值无效,可以跳过或记录一个默认值,这里跳过
+ pass
+ # --- 修改结束 ---
+
+ except json.JSONDecodeError:
+ error_count += 1
+ # logger.warning(f"Skipping invalid JSON line: {line.strip()}")
+ continue # 跳过无法解析的行
+ # except (TypeError, ValueError) as e: # 这个外层 catch 可能不再需要,因为类型错误在内部处理了
+ # error_count += 1
+ # # logger.warning(f"Skipping line due to data type error ({e}): {line.strip()}")
+ # continue # 跳过数据类型错误的行
+
+ # 读取完成后,用新数据替换旧数据
+ self.stream_history = new_stream_history
+ self.stream_display_names = new_stream_display_names # *** Update display names ***
+ self.probability_history = new_probability_history # <--- 更新概率历史
+ # 清理不再存在的 stream_id 的附加信息 (可选,但保持一致性)
+ streams_to_remove = set(self.stream_sub_minds.keys()) - set(new_stream_history.keys())
+ for sid in streams_to_remove:
+ self.stream_sub_minds.pop(sid, None)
+ self.stream_chat_states.pop(sid, None)
+ self.stream_threshold_status.pop(sid, None)
+ self.stream_last_active.pop(sid, None)
+ self.stream_last_interaction.pop(sid, None)
+ # 颜色和显示名称也应该清理,但当前逻辑是保留旧颜色
+ # self.stream_colors.pop(sid, None)
+ status_msg = f"Data loaded at {datetime.now().strftime('%H:%M:%S')}. Lines read: {read_count}."
+ if error_count > 0:
+ status_msg += f" Skipped {error_count} invalid lines."
+ self.set_status(status_msg, "orange")
+ else:
+ self.set_status(status_msg, "green")
+
+ except IOError as e:
+ self.set_status(f"Error reading file {LOG_FILE_PATH}: {e}", "red")
+ except Exception as e:
+ self.set_status(f"An unexpected error occurred during loading: {e}", "red")
+
+ # --- 更新 Combobox ---
+ self.update_stream_selector()
+
+ def update_stream_selector(self):
+ """更新单个流选项卡中的 Combobox 列表"""
+ # 创建 (display_name, stream_id) 对的列表,按 display_name 排序
+ available_streams = sorted(
+ [
+ (name, sid)
+ for sid, name in self.stream_display_names.items()
+ if sid in self.stream_history and self.stream_history[sid]
+ ],
+ key=lambda item: item[0], # 按显示名称排序
+ )
+
+ # 更新 Combobox 的值 (仅显示 display_name)
+ self.stream_selector["values"] = [name for name, sid in available_streams]
+
+ # 检查当前选中的 stream_id 是否仍然有效
+ current_selection_name = self.selected_stream_id.get()
+ current_selection_valid = any(name == current_selection_name for name, sid in available_streams)
+
+ if not current_selection_valid and available_streams:
+ # 如果当前选择无效,并且有可选流,则默认选中第一个
+ self.selected_stream_id.set(available_streams[0][0])
+ # 手动触发一次更新,因为 set 不会触发 <>
+ self.update_single_stream_plot()
+ elif not available_streams:
+ # 如果没有可选流,清空选择
+ self.selected_stream_id.set("")
+ self.update_single_stream_plot() # 清空图表
+
+ def update_all_streams_plot(self):
+ """更新第一个选项卡的 Matplotlib 图表 (显示所有流)"""
+ self.ax.clear() # 清除旧图
+ # *** 设置中文标题和标签 ***
+ self.ax.set_title("兴趣度随时间变化图 (所有活跃流)")
+ self.ax.set_xlabel("时间")
+ self.ax.set_ylabel("兴趣度")
+ self.ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
+ self.ax.grid(True)
+ self.ax.set_ylim(0, 10) # 固定 Y 轴范围 0-10
+
+ # 只绘制最新的 N 个 stream (按最后记录的兴趣度排序)
+ # 注意:现在是基于文件读取的快照排序,可能不是实时最新
+ active_streams = sorted(
+ self.stream_history.items(),
+ key=lambda item: item[1][-1][1] if item[1] else 0, # 按最后兴趣度排序
+ reverse=True,
+ )[:MAX_STREAMS_TO_DISPLAY]
+
+ all_times = [] # 用于确定 X 轴范围
+
+ for stream_id, history in active_streams:
+ if not history:
+ continue
+
+ timestamps, interests = zip(*history)
+ # 将 time.time() 时间戳转换为 matplotlib 可识别的日期格式
+ try:
+ mpl_dates = [datetime.fromtimestamp(ts) for ts in timestamps]
+ all_times.extend(mpl_dates) # 收集所有时间点
+
+ # *** Use display name for label ***
+ display_label = self.stream_display_names.get(stream_id, stream_id)
+
+ self.ax.plot(
+ mpl_dates,
+ interests,
+ label=display_label, # *** Use display_label ***
+ color=self.stream_colors.get(stream_id, "grey"),
+ marker=".",
+ markersize=3,
+ linestyle="-",
+ linewidth=1,
+ )
+ except ValueError as e:
+ print(f"Skipping plot for {stream_id} due to invalid timestamp: {e}")
+ continue
+
+ if all_times:
+ # 根据数据动态调整 X 轴范围,留一点边距
+ min_time = min(all_times)
+ max_time = max(all_times)
+ # delta = max_time - min_time
+ # self.ax.set_xlim(min_time - delta * 0.05, max_time + delta * 0.05)
+ self.ax.set_xlim(min_time, max_time)
+
+ # 自动格式化X轴标签
+ self.fig.autofmt_xdate()
+ else:
+ # 如果没有数据,设置一个默认的时间范围,例如最近一小时
+ now = datetime.now()
+ one_hour_ago = now - timedelta(hours=1)
+ self.ax.set_xlim(one_hour_ago, now)
+
+ # 添加图例
+ if active_streams:
+ # 调整图例位置和大小
+ # 字体已通过全局 matplotlib.rcParams 设置
+ self.ax.legend(loc="upper left", bbox_to_anchor=(1.02, 1), borderaxespad=0.0, fontsize="x-small")
+ # 调整布局,确保图例不被裁剪
+ self.fig.tight_layout(rect=[0, 0, 0.85, 1]) # 右侧留出空间给图例
+
+ self.canvas.draw() # 重绘画布
+
+ def update_single_stream_plot(self):
+ """更新第二个选项卡的 Matplotlib 图表 (显示单个选定的流)"""
+ self.ax_single_interest.clear()
+ self.ax_single_probability.clear()
+
+ # 设置子图标题和标签
+ self.ax_single_interest.set_title("兴趣度")
+ self.ax_single_interest.set_ylim(0, 10) # 固定 Y 轴范围 0-10
+
+ # self.ax_single_probability.set_title("回复评估概率") # <-- 注释掉旧标题
+ self.ax_single_probability.set_title("HFC 启动概率") # <-- 修改标题
+ self.ax_single_probability.set_xlabel("时间")
+ # self.ax_single_probability.set_ylabel("概率") # <-- 注释掉旧标签
+ self.ax_single_probability.set_ylabel("HFC 概率") # <-- 修改 Y 轴标签
+ self.ax_single_probability.grid(True)
+ self.ax_single_probability.set_ylim(0, 1.05) # 固定 Y 轴范围 0-1
+ self.ax_single_probability.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
+
+ selected_name = self.selected_stream_id.get()
+ selected_sid = None
+
+ # --- 新增:根据选中的名称找到 stream_id ---
+ if selected_name:
+ for sid, name in self.stream_display_names.items():
+ if name == selected_name:
+ selected_sid = sid
+ break
+
+ all_times = [] # 用于确定 X 轴范围
+
+ # --- 新增:绘制兴趣度图 ---
+ if selected_sid and selected_sid in self.stream_history and self.stream_history[selected_sid]:
+ history = self.stream_history[selected_sid]
+ timestamps, interests = zip(*history)
+ try:
+ mpl_dates = [datetime.fromtimestamp(ts) for ts in timestamps]
+ all_times.extend(mpl_dates)
+ self.ax_single_interest.plot(
+ mpl_dates,
+ interests,
+ color=self.stream_colors.get(selected_sid, "blue"),
+ marker=".",
+ markersize=3,
+ linestyle="-",
+ linewidth=1,
+ )
+ except ValueError as e:
+ print(f"Skipping interest plot for {selected_sid} due to invalid timestamp: {e}")
+
+ # --- 新增:绘制概率图 ---
+ if selected_sid and selected_sid in self.probability_history and self.probability_history[selected_sid]:
+ prob_history = self.probability_history[selected_sid]
+ prob_timestamps, probabilities = zip(*prob_history)
+ try:
+ prob_mpl_dates = [datetime.fromtimestamp(ts) for ts in prob_timestamps]
+ # 注意:概率图的时间点可能与兴趣度不同,也需要加入 all_times
+ all_times.extend(prob_mpl_dates)
+ self.ax_single_probability.plot(
+ prob_mpl_dates,
+ probabilities,
+ color=self.stream_colors.get(selected_sid, "green"), # 可以用不同颜色
+ marker=".",
+ markersize=3,
+ linestyle="-",
+ linewidth=1,
+ )
+ except ValueError as e:
+ print(f"Skipping probability plot for {selected_sid} due to invalid timestamp: {e}")
+
+ # --- 新增:调整 X 轴范围和格式 ---
+ if all_times:
+ min_time = min(all_times)
+ max_time = max(all_times)
+ # 设置共享的 X 轴范围
+ self.ax_single_interest.set_xlim(min_time, max_time)
+ # self.ax_single_probability.set_xlim(min_time, max_time) # sharex 会自动同步
+ # 自动格式化X轴标签 (应用到共享轴的最后一个子图上通常即可)
+ self.fig_single.autofmt_xdate()
+ else:
+ # 如果没有数据,设置一个默认的时间范围
+ now = datetime.now()
+ one_hour_ago = now - timedelta(hours=1)
+ self.ax_single_interest.set_xlim(one_hour_ago, now)
+ # self.ax_single_probability.set_xlim(one_hour_ago, now) # sharex 会自动同步
+
+ # --- 新增:更新单个流的详细信息标签 ---
+ self.update_single_stream_details(selected_sid)
+
+ # --- 新增:重新绘制画布 ---
+ self.canvas_single.draw()
+
+ def format_timestamp(self, ts):
+ """辅助函数:格式化时间戳,处理 None 或无效值"""
+ if ts is None:
+ return "N/A"
+ try:
+ # 假设 ts 是 float 类型的时间戳
+ dt_object = datetime.fromtimestamp(float(ts))
+ return dt_object.strftime("%Y-%m-%d %H:%M:%S")
+ except (ValueError, TypeError):
+ return "Invalid Time"
+
+ def update_single_stream_details(self, stream_id):
+ """更新单个流详情区域的标签内容"""
+ if stream_id:
+ sub_mind = self.stream_sub_minds.get(stream_id, "N/A")
+ chat_state = self.stream_chat_states.get(stream_id, "N/A")
+ threshold = self.stream_threshold_status.get(stream_id, False)
+ last_active_ts = self.stream_last_active.get(stream_id)
+ last_interaction_ts = self.stream_last_interaction.get(stream_id)
+
+ self.single_stream_sub_mind.set(f"想法: {sub_mind}")
+ self.single_stream_chat_state.set(f"状态: {chat_state}")
+ self.single_stream_threshold.set(f"阈值以上: {'是' if threshold else '否'}")
+ self.single_stream_last_active.set(f"最后活跃: {self.format_timestamp(last_active_ts)}")
+ self.single_stream_last_interaction.set(f"最后交互: {self.format_timestamp(last_interaction_ts)}")
+ else:
+ # 如果没有选择流,则清空详情
+ self.single_stream_sub_mind.set("想法: N/A")
+ self.single_stream_chat_state.set("状态: N/A")
+ self.single_stream_threshold.set("阈值: N/A")
+ self.single_stream_last_active.set("活跃: N/A")
+ self.single_stream_last_interaction.set("交互: N/A")
+
+ def update_display(self):
+ """主更新循环"""
+ try:
+ # --- 新增:首次加载历史想法 ---
+ if not hasattr(self, "_main_mind_loaded"):
+ self.load_main_mind_history()
+ self._main_mind_loaded = True
+ else:
+ self.update_main_mind_history() # 只有有新main_mind数据时才刷新界面
+ # *** 修改:分别调用两个图表的更新方法 ***
+ self.load_and_update_history() # 从文件加载数据并更新内部状态
+ self.update_all_streams_plot() # 更新所有流的图表
+ self.update_single_stream_plot() # 更新单个流的图表
+ except Exception as e:
+ # 提供更详细的错误信息
+ import traceback
+
+ error_msg = f"Error during update: {e}\n{traceback.format_exc()}"
+ self.set_status(error_msg, "red")
+ print(error_msg) # 打印详细错误到控制台
+
+ # 安排下一次刷新
+ self.root.after(REFRESH_INTERVAL_MS, self.update_display)
+
+ def set_status(self, message: str, color: str = "grey"):
+ """更新状态栏标签"""
+ # 限制状态栏消息长度
+ max_len = 150
+ display_message = (message[:max_len] + "...") if len(message) > max_len else message
+ self.status_label.config(text=display_message, fg=color)
+
+
+if __name__ == "__main__":
+ # 导入 timedelta 用于默认时间范围
+ from datetime import timedelta
+
+ root = tk.Tk()
+ app = InterestMonitorApp(root)
+ root.mainloop()
diff --git a/scripts/raw_data_preprocessor.py b/scripts/raw_data_preprocessor.py
new file mode 100644
index 000000000..2fc30352e
--- /dev/null
+++ b/scripts/raw_data_preprocessor.py
@@ -0,0 +1,92 @@
+import json
+import os
+from pathlib import Path
+import sys # 新增系统模块导入
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
+from src.common.logger import get_module_logger
+
+logger = get_module_logger("LPMM数据库-原始数据处理")
+
+# 添加项目根目录到 sys.path
+
+
+def check_and_create_dirs():
+ """检查并创建必要的目录"""
+ required_dirs = ["data/lpmm_raw_data", "data/imported_lpmm_data"]
+
+ for dir_path in required_dirs:
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+ logger.info(f"已创建目录: {dir_path}")
+
+
+def process_text_file(file_path):
+ """处理单个文本文件,返回段落列表"""
+ with open(file_path, "r", encoding="utf-8") as f:
+ raw = f.read()
+
+ paragraphs = []
+ paragraph = ""
+ for line in raw.split("\n"):
+ if line.strip() == "":
+ if paragraph != "":
+ paragraphs.append(paragraph.strip())
+ paragraph = ""
+ else:
+ paragraph += line + "\n"
+
+ if paragraph != "":
+ paragraphs.append(paragraph.strip())
+
+ return paragraphs
+
+
+def main():
+ # 新增用户确认提示
+ print("=== 重要操作确认 ===")
+ print("如果你并非第一次导入知识")
+ print("请先删除data/import.json文件,备份data/openie.json文件")
+ print("在进行知识库导入之前")
+ print("请修改config/lpmm_config.toml中的配置项")
+ confirm = input("确认继续执行?(y/n): ").strip().lower()
+ if confirm != "y":
+ logger.error("操作已取消")
+ sys.exit(1)
+ print("\n" + "=" * 40 + "\n")
+
+ # 检查并创建必要的目录
+ check_and_create_dirs()
+
+ # 检查输出文件是否存在
+ if os.path.exists("data/import.json"):
+ logger.error("错误: data/import.json 已存在,请先处理或删除该文件")
+ sys.exit(1)
+
+ if os.path.exists("data/openie.json"):
+ logger.error("错误: data/openie.json 已存在,请先处理或删除该文件")
+ sys.exit(1)
+
+ # 获取所有原始文本文件
+ raw_files = list(Path("data/lpmm_raw_data").glob("*.txt"))
+ if not raw_files:
+ logger.warning("警告: data/lpmm_raw_data 中没有找到任何 .txt 文件")
+ sys.exit(1)
+
+ # 处理所有文件
+ all_paragraphs = []
+ for file in raw_files:
+ logger.info(f"正在处理文件: {file.name}")
+ paragraphs = process_text_file(file)
+ all_paragraphs.extend(paragraphs)
+
+ # 保存合并后的结果
+ output_path = "data/import.json"
+ with open(output_path, "w", encoding="utf-8") as f:
+ json.dump(all_paragraphs, f, ensure_ascii=False, indent=4)
+
+ logger.info(f"处理完成,结果已保存到: {output_path}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/run.sh b/scripts/run.sh
index b7ecbc849..9fd3127f6 100644
--- a/scripts/run.sh
+++ b/scripts/run.sh
@@ -4,7 +4,7 @@
# 适用于Arch/Ubuntu 24.10/Debian 12/CentOS 9
# 请小心使用任何一键脚本!
-INSTALLER_VERSION="0.0.3-refactor"
+INSTALLER_VERSION="0.0.4-refactor"
LANG=C.UTF-8
# 如无法访问GitHub请修改此处镜像地址
@@ -19,10 +19,10 @@ RESET="\e[0m"
declare -A REQUIRED_PACKAGES=(
["common"]="git sudo python3 curl gnupg"
- ["debian"]="python3-venv python3-pip"
- ["ubuntu"]="python3-venv python3-pip"
- ["centos"]="python3-pip"
- ["arch"]="python-virtualenv python-pip"
+ ["debian"]="python3-venv python3-pip build-essential"
+ ["ubuntu"]="python3-venv python3-pip build-essential"
+ ["centos"]="epel-release python3-pip python3-devel gcc gcc-c++ make"
+ ["arch"]="python-virtualenv python-pip base-devel"
)
# 默认项目目录
diff --git a/src/MaiBot0.6roadmap.md b/src/MaiBot0.6roadmap.md
new file mode 100644
index 000000000..54774197e
--- /dev/null
+++ b/src/MaiBot0.6roadmap.md
@@ -0,0 +1,16 @@
+MaiCore/MaiBot 0.6路线图 draft
+
+0.6.3:解决0.6.x版本核心问题,改进功能
+主要功能加入
+LPMM全面替代旧知识库
+采用新的HFC回复模式,取代旧心流
+合并推理模式和心流模式,根据麦麦自己决策回复模式
+提供新的表情包系统
+
+0.6.4:提升用户体验,交互优化
+加入webui
+提供麦麦 API
+修复prompt建构的各种问题
+修复各种bug
+调整代码文件结构,重构部分落后设计
+
diff --git a/src/api/__init__.py b/src/api/__init__.py
new file mode 100644
index 000000000..f5bc08a6e
--- /dev/null
+++ b/src/api/__init__.py
@@ -0,0 +1,8 @@
+from fastapi import FastAPI
+from strawberry.fastapi import GraphQLRouter
+
+app = FastAPI()
+
+graphql_router = GraphQLRouter(schema=None, path="/") # Replace `None` with your actual schema
+
+app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])
diff --git a/src/api/config_api.py b/src/api/config_api.py
new file mode 100644
index 000000000..025888d82
--- /dev/null
+++ b/src/api/config_api.py
@@ -0,0 +1,155 @@
+from typing import Dict, List, Optional
+import strawberry
+
+# from packaging.version import Version, InvalidVersion
+# from packaging.specifiers import SpecifierSet, InvalidSpecifier
+# from ..config.config import global_config
+# import os
+from packaging.version import Version
+
+
+@strawberry.type
+class BotConfig:
+ """机器人配置类"""
+
+ INNER_VERSION: Version
+ MAI_VERSION: str # 硬编码的版本信息
+
+ # bot
+ BOT_QQ: Optional[int]
+ BOT_NICKNAME: Optional[str]
+ BOT_ALIAS_NAMES: List[str] # 别名,可以通过这个叫它
+
+ # group
+ talk_allowed_groups: set
+ talk_frequency_down_groups: set
+ ban_user_id: set
+
+ # personality
+ personality_core: str # 建议20字以内,谁再写3000字小作文敲谁脑袋
+ personality_sides: List[str]
+ # identity
+ identity_detail: List[str]
+ height: int # 身高 单位厘米
+ weight: int # 体重 单位千克
+ age: int # 年龄 单位岁
+ gender: str # 性别
+ appearance: str # 外貌特征
+
+ # schedule
+ ENABLE_SCHEDULE_GEN: bool # 是否启用日程生成
+ PROMPT_SCHEDULE_GEN: str
+ SCHEDULE_DOING_UPDATE_INTERVAL: int # 日程表更新间隔 单位秒
+ SCHEDULE_TEMPERATURE: float # 日程表温度,建议0.5-1.0
+ TIME_ZONE: str # 时区
+
+ # message
+ MAX_CONTEXT_SIZE: int # 上下文最大消息数
+ emoji_chance: float # 发送表情包的基础概率
+ thinking_timeout: int # 思考时间
+ model_max_output_length: int # 最大回复长度
+ message_buffer: bool # 消息缓冲器
+
+ ban_words: set
+ ban_msgs_regex: set
+ # heartflow
+ # enable_heartflow: bool = False # 是否启用心流
+ sub_heart_flow_update_interval: int # 子心流更新频率,间隔 单位秒
+ sub_heart_flow_freeze_time: int # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
+ sub_heart_flow_stop_time: int # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
+ heart_flow_update_interval: int # 心流更新频率,间隔 单位秒
+ observation_context_size: int # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
+ compressed_length: int # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
+ compress_length_limit: int # 最多压缩份数,超过该数值的压缩上下文会被删除
+
+ # willing
+ willing_mode: str # 意愿模式
+ response_willing_amplifier: float # 回复意愿放大系数
+ response_interested_rate_amplifier: float # 回复兴趣度放大系数
+ down_frequency_rate: float # 降低回复频率的群组回复意愿降低系数
+ emoji_response_penalty: float # 表情包回复惩罚
+ mentioned_bot_inevitable_reply: bool # 提及 bot 必然回复
+ at_bot_inevitable_reply: bool # @bot 必然回复
+
+ # response
+ response_mode: str # 回复策略
+ MODEL_R1_PROBABILITY: float # R1模型概率
+ MODEL_V3_PROBABILITY: float # V3模型概率
+ # MODEL_R1_DISTILL_PROBABILITY: float # R1蒸馏模型概率
+
+ # emoji
+ max_emoji_num: int # 表情包最大数量
+ max_reach_deletion: bool # 开启则在达到最大数量时删除表情包,关闭则不会继续收集表情包
+ EMOJI_CHECK_INTERVAL: int # 表情包检查间隔(分钟)
+ EMOJI_REGISTER_INTERVAL: int # 表情包注册间隔(分钟)
+ EMOJI_SAVE: bool # 偷表情包
+ EMOJI_CHECK: bool # 是否开启过滤
+ EMOJI_CHECK_PROMPT: str # 表情包过滤要求
+
+ # memory
+ build_memory_interval: int # 记忆构建间隔(秒)
+ memory_build_distribution: list # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重
+ build_memory_sample_num: int # 记忆构建采样数量
+ build_memory_sample_length: int # 记忆构建采样长度
+ memory_compress_rate: float # 记忆压缩率
+
+ forget_memory_interval: int # 记忆遗忘间隔(秒)
+ memory_forget_time: int # 记忆遗忘时间(小时)
+ memory_forget_percentage: float # 记忆遗忘比例
+
+ memory_ban_words: list # 添加新的配置项默认值
+
+ # mood
+ mood_update_interval: float # 情绪更新间隔 单位秒
+ mood_decay_rate: float # 情绪衰减率
+ mood_intensity_factor: float # 情绪强度因子
+
+ # keywords
+ keywords_reaction_rules: list # 关键词回复规则
+
+ # chinese_typo
+ chinese_typo_enable: bool # 是否启用中文错别字生成器
+ chinese_typo_error_rate: float # 单字替换概率
+ chinese_typo_min_freq: int # 最小字频阈值
+ chinese_typo_tone_error_rate: float # 声调错误概率
+ chinese_typo_word_replace_rate: float # 整词替换概率
+
+ # response_splitter
+ enable_response_splitter: bool # 是否启用回复分割器
+ response_max_length: int # 回复允许的最大长度
+ response_max_sentence_num: int # 回复允许的最大句子数
+
+ # remote
+ remote_enable: bool # 是否启用远程控制
+
+ # experimental
+ enable_friend_chat: bool # 是否启用好友聊天
+ # enable_think_flow: bool # 是否启用思考流程
+ enable_pfc_chatting: bool # 是否启用PFC聊天
+
+ # 模型配置
+ llm_reasoning: Dict[str, str] # LLM推理
+ # llm_reasoning_minor: Dict[str, str]
+ llm_normal: Dict[str, str] # LLM普通
+ llm_topic_judge: Dict[str, str] # LLM话题判断
+ llm_summary: Dict[str, str] # LLM话题总结
+ llm_emotion_judge: Dict[str, str] # LLM情感判断
+ embedding: Dict[str, str] # 嵌入
+ vlm: Dict[str, str] # VLM
+ moderation: Dict[str, str] # 审核
+
+ # 实验性
+ llm_observation: Dict[str, str] # LLM观察
+ llm_sub_heartflow: Dict[str, str] # LLM子心流
+ llm_heartflow: Dict[str, str] # LLM心流
+
+ api_urls: Dict[str, str] # API URLs
+
+
+@strawberry.type
+class EnvConfig:
+ pass
+
+ @strawberry.field
+ def get_env(self) -> str:
+ return "env"
diff --git a/src/api/graphql/__init__.py b/src/api/graphql/__init__.py
new file mode 100644
index 000000000..b0efa7f9a
--- /dev/null
+++ b/src/api/graphql/__init__.py
@@ -0,0 +1,22 @@
+import strawberry
+
+from fastapi import FastAPI
+from strawberry.fastapi import GraphQLRouter
+
+from src.common.server import global_server
+
+
+@strawberry.type
+class Query:
+ @strawberry.field
+ def hello(self) -> str:
+ return "Hello World"
+
+
+schema = strawberry.Schema(Query)
+
+graphql_app = GraphQLRouter(schema)
+
+fast_api_app: FastAPI = global_server.get_app()
+
+fast_api_app.include_router(graphql_app, prefix="/graphql")
diff --git a/src/api/graphql/schema.py b/src/api/graphql/schema.py
new file mode 100644
index 000000000..2ae28399f
--- /dev/null
+++ b/src/api/graphql/schema.py
@@ -0,0 +1 @@
+pass
diff --git a/src/common/log_decorators.py b/src/common/log_decorators.py
new file mode 100644
index 000000000..9838717f9
--- /dev/null
+++ b/src/common/log_decorators.py
@@ -0,0 +1,107 @@
+import functools
+import inspect
+from typing import Callable, Any
+from .logger import logger, add_custom_style_handler
+
+
+def use_log_style(
+ style_name: str,
+ console_format: str,
+ console_level: str = "INFO",
+ # file_format: Optional[str] = None, # 暂未支持文件输出
+ # file_level: str = "DEBUG",
+) -> Callable:
+ """装饰器:为函数内的日志启用特定的自定义样式。
+
+ Args:
+ style_name (str): 自定义样式的唯一名称。
+ console_format (str): 控制台输出的格式字符串。
+ console_level (str, optional): 控制台日志级别. Defaults to "INFO".
+ # file_format (Optional[str], optional): 文件输出格式 (暂未支持). Defaults to None.
+ # file_level (str, optional): 文件日志级别 (暂未支持). Defaults to "DEBUG".
+
+ Returns:
+ Callable: 返回装饰器本身。
+ """
+
+ def decorator(func: Callable) -> Callable:
+ # 获取被装饰函数所在的模块名
+ module = inspect.getmodule(func)
+ if module is None:
+ # 如果无法获取模块(例如,在交互式解释器中定义函数),则使用默认名称
+ module_name = "unknown_module"
+ logger.warning(f"无法确定函数 {func.__name__} 的模块,将使用 '{module_name}'")
+ else:
+ module_name = module.__name__
+
+ # 在函数首次被调用(或模块加载时)确保自定义处理器已添加
+ # 注意:这会在模块加载时执行,而不是每次函数调用时
+ # print(f"Setting up custom style '{style_name}' for module '{module_name}' in decorator definition")
+ add_custom_style_handler(
+ module_name=module_name,
+ style_name=style_name,
+ console_format=console_format,
+ console_level=console_level,
+ # file_format=file_format,
+ # file_level=file_level,
+ )
+
+ @functools.wraps(func)
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
+ # 创建绑定了模块名和自定义样式标记的 logger 实例
+ custom_logger = logger.bind(module=module_name, custom_style=style_name)
+ # print(f"Executing {func.__name__} with custom logger for style '{style_name}'")
+ # 将自定义 logger 作为第一个参数传递给原函数
+ # 注意:这要求被装饰的函数第一个参数用于接收 logger
+ try:
+ return func(custom_logger, *args, **kwargs)
+ except TypeError as e:
+ # 捕获可能的类型错误,比如原函数不接受 logger 参数
+ logger.error(
+ f"调用 {func.__name__} 时出错:请确保该函数接受一个 logger 实例作为其第一个参数。错误:{e}"
+ )
+ # 可以选择重新抛出异常或返回特定值
+ raise e
+
+ return wrapper
+
+ return decorator
+
+
+# --- 示例用法 (可以在其他模块中这样使用) ---
+
+# # 假设这是你的模块 my_module.py
+# from src.common.log_decorators import use_log_style
+# from src.common.logger import get_module_logger, LoguruLogger
+
+# # 获取模块的标准 logger
+# standard_logger = get_module_logger(__name__)
+
+# # 定义一个自定义样式
+# MY_SPECIAL_STYLE = "special"
+# MY_SPECIAL_FORMAT = " SPECIAL [{time:HH:mm:ss}] | {message}"
+
+# @use_log_style(style_name=MY_SPECIAL_STYLE, console_format=MY_SPECIAL_FORMAT)
+# def my_function_with_special_logs(custom_logger: LoguruLogger, x: int, y: int):
+# standard_logger.info("这是一条使用标准格式的日志")
+# custom_logger.info(f"开始执行特殊操作,参数: x={x}, y={y}")
+# result = x + y
+# custom_logger.success(f"特殊操作完成,结果: {result}")
+# standard_logger.info("标准格式日志:函数即将结束")
+# return result
+
+# @use_log_style(style_name="another_style", console_format="任务: {message}")
+# def another_task(task_logger: LoguruLogger, task_name: str):
+# standard_logger.debug("准备执行另一个任务")
+# task_logger.info(f"正在处理任务 '{task_name}'")
+# # ... 执行任务 ...
+# task_logger.warning("任务处理中遇到一个警告")
+# standard_logger.info("另一个任务的标准日志")
+
+# if __name__ == "__main__":
+# print("\n--- 调用 my_function_with_special_logs ---")
+# my_function_with_special_logs(10, 5)
+# print("\n--- 调用 another_task ---")
+# another_task("数据清理")
+# print("\n--- 单独使用标准 logger ---")
+# standard_logger.info("这是一条完全独立的标准日志")
diff --git a/src/common/logger.py b/src/common/logger.py
index 7365e34a6..b5317d58b 100644
--- a/src/common/logger.py
+++ b/src/common/logger.py
@@ -1,11 +1,61 @@
from loguru import logger
-from typing import Dict, Optional, Union, List
+from typing import Dict, Optional, Union, List, Tuple
import sys
import os
from types import ModuleType
from pathlib import Path
from dotenv import load_dotenv
-# from ..plugins.chat.config import global_config
+
+"""
+日志颜色说明:
+
+1. 主程序(Main)
+浅黄色标题 | 浅黄色消息
+
+2. 海马体(Memory)
+浅黄色标题 | 浅黄色消息
+
+3. PFC(前额叶皮质)
+浅绿色标题 | 浅绿色消息
+
+4. 心情(Mood)
+品红色标题 | 品红色消息
+
+5. 工具使用(Tool)
+品红色标题 | 品红色消息
+
+6. 关系(Relation)
+浅品红色标题 | 浅品红色消息
+
+7. 配置(Config)
+浅青色标题 | 浅青色消息
+
+8. 麦麦大脑袋
+浅绿色标题 | 浅绿色消息
+
+9. 在干嘛
+青色标题 | 青色消息
+
+10. 麦麦组织语言
+浅绿色标题 | 浅绿色消息
+
+11. 见闻(Chat)
+浅蓝色标题 | 绿色消息
+
+12. 表情包(Emoji)
+橙色标题 | 橙色消息 fg #FFD700
+
+13. 子心流
+
+13. 其他模块
+模块名标题 | 对应颜色消息
+
+
+注意:
+1. 级别颜色遵循loguru默认配置
+2. 可通过环境变量修改日志级别
+"""
+
# 加载 .env 文件
env_path = Path(__file__).resolve().parent.parent.parent / ".env"
@@ -26,12 +76,17 @@ LoguruLogger = logger.__class__
# 全局注册表:记录模块与处理器ID的映射
_handler_registry: Dict[str, List[int]] = {}
+_custom_style_handlers: Dict[Tuple[str, str], List[int]] = {} # 记录自定义样式处理器ID
# 获取日志存储根地址
current_file_path = Path(__file__).resolve()
LOG_ROOT = "logs"
-SIMPLE_OUTPUT = os.getenv("SIMPLE_OUTPUT", "false")
+SIMPLE_OUTPUT = os.getenv("SIMPLE_OUTPUT", "false").strip().lower()
+if SIMPLE_OUTPUT == "true":
+ SIMPLE_OUTPUT = True
+else:
+ SIMPLE_OUTPUT = False
print(f"SIMPLE_OUTPUT: {SIMPLE_OUTPUT}")
if not SIMPLE_OUTPUT:
@@ -42,12 +97,9 @@ if not SIMPLE_OUTPUT:
"file_level": "DEBUG",
# 格式配置
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
- "{level: <8} | "
- "{extra[module]: <12} | "
- "{message}"
+ "{time:YYYY-MM-DD HH:mm:ss} | {extra[module]: <12} | {message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | {message}",
"log_dir": LOG_ROOT,
"rotation": "00:00",
"retention": "3 days",
@@ -59,8 +111,8 @@ else:
"console_level": "INFO",
"file_level": "DEBUG",
# 格式配置
- "console_format": ("{time:MM-DD HH:mm} | {extra[module]} | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | {extra[module]} | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | {message}",
"log_dir": LOG_ROOT,
"rotation": "00:00",
"retention": "3 days",
@@ -68,59 +120,73 @@ else:
}
-# 海马体日志样式配置
-MEMORY_STYLE_CONFIG = {
+MAIN_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
- "海马体 | "
+ "主程序 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 海马体 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 主程序 | {message}",
},
"simple": {
"console_format": (
- "{time:MM-DD HH:mm} | 海马体 | {message}"
+ "{time:MM-DD HH:mm} | 主程序 | {message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 海马体 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 主程序 | {message}",
},
}
+# pfc配置
+PFC_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "PFC | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | PFC | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | PFC | {message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | PFC | {message}",
+ },
+}
# MOOD
MOOD_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
- "心情 | "
+ "心情 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 心情 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 心情 | {message} ",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}",
},
}
# tool use
TOOL_USE_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"工具使用 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 工具使用 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 工具使用 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}",
},
}
@@ -129,17 +195,16 @@ TOOL_USE_STYLE_CONFIG = {
RELATION_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"关系 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 关系 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 关系 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 关系 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 关系 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 关系 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 关系 | {message}",
},
}
@@ -147,87 +212,82 @@ RELATION_STYLE_CONFIG = {
CONFIG_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"配置 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 配置 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 配置 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 配置 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 配置 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 配置 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 配置 | {message}",
},
}
SENDER_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"消息发送 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 消息发送 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 消息发送 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 消息发送 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 消息发送 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 消息发送 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 消息发送 | {message}",
},
}
HEARTFLOW_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"麦麦大脑袋 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦大脑袋 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦大脑袋 | {message}",
},
"simple": {
"console_format": (
- "{time:MM-DD HH:mm} | 麦麦大脑袋 | {message}"
+ "{time:MM-DD HH:mm} | 麦麦大脑袋 | {message}"
), # noqa: E501
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦大脑袋 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦大脑袋 | {message}",
},
}
SCHEDULE_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"在干嘛 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 在干嘛 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 在干嘛 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 在干嘛 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 在干嘛 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 在干嘛 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 在干嘛 | {message}",
},
}
LLM_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"麦麦组织语言 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦组织语言 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦组织语言 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 麦麦组织语言 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦组织语言 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 麦麦组织语言 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦组织语言 | {message}",
},
}
@@ -236,17 +296,16 @@ LLM_STYLE_CONFIG = {
TOPIC_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"话题 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 话题 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 话题 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 主题 | {message}"),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 话题 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 主题 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 话题 | {message}",
},
}
@@ -254,65 +313,520 @@ TOPIC_STYLE_CONFIG = {
CHAT_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
- "见闻 | "
+ "见闻 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 见闻 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 见闻 | {message}",
},
"simple": {
+ "console_format": ("{time:MM-DD HH:mm} | 见闻 | {message}"), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 见闻 | {message}",
+ },
+}
+
+REMOTE_STYLE_CONFIG = {
+ "advanced": {
"console_format": (
- "{time:MM-DD HH:mm} | 见闻 | {message}"
- ), # noqa: E501
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 见闻 | {message}"),
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "远程 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 远程 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 远程| {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 远程 | {message}",
},
}
SUB_HEARTFLOW_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "麦麦水群 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦小脑袋 | {message}",
+ },
+ "simple": {
+ "console_format": ("{time:MM-DD HH:mm} | 麦麦水群 | {message}"), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦水群 | {message}",
+ },
+}
+
+SUB_HEARTFLOW_MIND_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"麦麦小脑袋 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦小脑袋 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦小脑袋 | {message}",
+ },
+ "simple": {
+ "console_format": ("{time:MM-DD HH:mm} | 麦麦小脑袋 | {message}"), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦小脑袋 | {message}",
+ },
+}
+
+SUBHEARTFLOW_MANAGER_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "麦麦水群[管理] | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦水群[管理] | {message}",
+ },
+ "simple": {
+ "console_format": ("{time:MM-DD HH:mm} | 麦麦水群[管理] | {message}"), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦水群[管理] | {message}",
+ },
+}
+
+BASE_TOOL_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "工具使用 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}",
},
"simple": {
"console_format": (
- "{time:MM-DD HH:mm} | 麦麦小脑袋 | {message}"
+ "{time:MM-DD HH:mm} | 工具使用 | {message}"
), # noqa: E501
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦小脑袋 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}",
+ },
+}
+
+CHAT_STREAM_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "聊天流 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天流 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 聊天流 | {message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天流 | {message}",
+ },
+}
+
+CHAT_MESSAGE_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "聊天消息 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天消息 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 聊天消息 | {message}"
+ ), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天消息 | {message}",
+ },
+}
+
+PERSON_INFO_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "人物信息 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 人物信息 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 人物信息 | {message}"
+ ), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 人物信息 | {message}",
+ },
+}
+
+BACKGROUND_TASKS_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "后台任务 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 后台任务 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 后台任务 | {message}"
+ ), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 后台任务 | {message}",
},
}
WILLING_STYLE_CONFIG = {
"advanced": {
"console_format": (
- "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{time:YYYY-MM-DD HH:mm:ss} | "
"{level: <8} | "
- "{extra[module]: <12} | "
"意愿 | "
"{message}"
),
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 意愿 | {message}"),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 意愿 | {message}",
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 意愿 | {message}"), # noqa: E501
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 意愿 | {message}"),
+ "console_format": "{time:MM-DD HH:mm} | 意愿 | {message} ", # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 意愿 | {message}",
+ },
+}
+
+PFC_ACTION_PLANNER_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "PFC私聊规划 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | PFC私聊规划 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | PFC私聊规划 | {message} ", # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | PFC私聊规划 | {message}",
+ },
+}
+
+# EMOJI,橙色,全着色
+EMOJI_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "表情包 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 表情包 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 表情包 | {message} ", # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 表情包 | {message}",
+ },
+}
+
+MAI_STATE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "麦麦状态 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦状态 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 麦麦状态 | {message} ", # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 麦麦状态 | {message}",
+ },
+}
+
+
+# 海马体日志样式配置
+MEMORY_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "海马体 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 海马体 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 海马体 | {message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 海马体 | {message}",
+ },
+}
+
+
+# LPMM配置
+LPMM_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "LPMM | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | LPMM | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | LPMM | {message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | LPMM | {message}",
+ },
+}
+
+OBSERVATION_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "聊天观察 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天观察 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 聊天观察 | {message}"
+ ), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天观察 | {message}",
+ },
+}
+
+CHAT_IMAGE_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "聊天图片 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天图片 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 聊天图片 | {message}"
+ ), # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 聊天图片 | {message}",
+ },
+}
+
+# HFC log
+HFC_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "专注聊天 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 专注聊天 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 专注聊天 | {message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 专注聊天 | {message}",
},
}
CONFIRM_STYLE_CONFIG = {
- "console_format": ("{message}"), # noqa: E501
- "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | EULA与PRIVACY确认 | {message}"),
+ "console_format": "{message}", # noqa: E501
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | EULA与PRIVACY确认 | {message}",
}
+# 天依蓝配置
+TIANYI_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "天依 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 天依 | {message}",
+ },
+ "simple": {
+ "console_format": (
+ "{time:MM-DD HH:mm} | 天依 | {message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 天依 | {message}",
+ },
+}
+
+# 模型日志样式配置
+MODEL_UTILS_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "模型 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 模型 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 模型 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 模型 | {message}",
+ },
+}
+
+MESSAGE_BUFFER_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "消息缓存 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 消息缓存 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 消息缓存 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 消息缓存 | {message}",
+ },
+}
+
+PROMPT_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "提示词构建 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 提示词构建 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 提示词构建 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 提示词构建 | {message}",
+ },
+}
+
+CHANGE_MOOD_TOOL_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "心情工具 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情工具 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 心情工具 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情工具 | {message}",
+ },
+}
+
+CHANGE_RELATIONSHIP_TOOL_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "关系工具 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 关系工具 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 关系工具 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 关系工具 | {message}",
+ },
+}
+
+GET_KNOWLEDGE_TOOL_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "获取知识 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 获取知识 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 获取知识 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 获取知识 | {message}",
+ },
+}
+
+GET_TIME_DATE_TOOL_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "获取时间日期 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 获取时间日期 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 获取时间日期 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 获取时间日期 | {message}",
+ },
+}
+
+LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "LPMM获取知识 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | LPMM获取知识 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | LPMM获取知识 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | LPMM获取知识 | {message}",
+ },
+}
+
+INIT_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "初始化 | "
+ "{message}"
+ ),
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 初始化 | {message}",
+ },
+ "simple": {
+ "console_format": "{time:MM-DD HH:mm} | 初始化 | {message}",
+ "file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 初始化 | {message}",
+ },
+}
+
+
# 根据SIMPLE_OUTPUT选择配置
+MAIN_STYLE_CONFIG = MAIN_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MAIN_STYLE_CONFIG["advanced"]
+EMOJI_STYLE_CONFIG = EMOJI_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else EMOJI_STYLE_CONFIG["advanced"]
+PFC_ACTION_PLANNER_STYLE_CONFIG = (
+ PFC_ACTION_PLANNER_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else PFC_ACTION_PLANNER_STYLE_CONFIG["advanced"]
+)
+REMOTE_STYLE_CONFIG = REMOTE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else REMOTE_STYLE_CONFIG["advanced"]
+BASE_TOOL_STYLE_CONFIG = BASE_TOOL_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else BASE_TOOL_STYLE_CONFIG["advanced"]
+PERSON_INFO_STYLE_CONFIG = PERSON_INFO_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else PERSON_INFO_STYLE_CONFIG["advanced"]
+SUBHEARTFLOW_MANAGER_STYLE_CONFIG = (
+ SUBHEARTFLOW_MANAGER_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else SUBHEARTFLOW_MANAGER_STYLE_CONFIG["advanced"]
+)
+BACKGROUND_TASKS_STYLE_CONFIG = (
+ BACKGROUND_TASKS_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else BACKGROUND_TASKS_STYLE_CONFIG["advanced"]
+)
MEMORY_STYLE_CONFIG = MEMORY_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MEMORY_STYLE_CONFIG["advanced"]
+CHAT_STREAM_STYLE_CONFIG = CHAT_STREAM_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_STREAM_STYLE_CONFIG["advanced"]
TOPIC_STYLE_CONFIG = TOPIC_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else TOPIC_STYLE_CONFIG["advanced"]
SENDER_STYLE_CONFIG = SENDER_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else SENDER_STYLE_CONFIG["advanced"]
LLM_STYLE_CONFIG = LLM_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else LLM_STYLE_CONFIG["advanced"]
@@ -324,9 +838,47 @@ HEARTFLOW_STYLE_CONFIG = HEARTFLOW_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else
SUB_HEARTFLOW_STYLE_CONFIG = (
SUB_HEARTFLOW_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else SUB_HEARTFLOW_STYLE_CONFIG["advanced"]
) # noqa: E501
+SUB_HEARTFLOW_MIND_STYLE_CONFIG = (
+ SUB_HEARTFLOW_MIND_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else SUB_HEARTFLOW_MIND_STYLE_CONFIG["advanced"]
+)
WILLING_STYLE_CONFIG = WILLING_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else WILLING_STYLE_CONFIG["advanced"]
+MAI_STATE_CONFIG = MAI_STATE_CONFIG["simple"] if SIMPLE_OUTPUT else MAI_STATE_CONFIG["advanced"]
CONFIG_STYLE_CONFIG = CONFIG_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CONFIG_STYLE_CONFIG["advanced"]
TOOL_USE_STYLE_CONFIG = TOOL_USE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else TOOL_USE_STYLE_CONFIG["advanced"]
+PFC_STYLE_CONFIG = PFC_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else PFC_STYLE_CONFIG["advanced"]
+LPMM_STYLE_CONFIG = LPMM_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else LPMM_STYLE_CONFIG["advanced"]
+HFC_STYLE_CONFIG = HFC_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else HFC_STYLE_CONFIG["advanced"]
+TIANYI_STYLE_CONFIG = TIANYI_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else TIANYI_STYLE_CONFIG["advanced"]
+MODEL_UTILS_STYLE_CONFIG = MODEL_UTILS_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MODEL_UTILS_STYLE_CONFIG["advanced"]
+PROMPT_STYLE_CONFIG = PROMPT_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else PROMPT_STYLE_CONFIG["advanced"]
+CHANGE_MOOD_TOOL_STYLE_CONFIG = (
+ CHANGE_MOOD_TOOL_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHANGE_MOOD_TOOL_STYLE_CONFIG["advanced"]
+)
+CHANGE_RELATIONSHIP_TOOL_STYLE_CONFIG = (
+ CHANGE_RELATIONSHIP_TOOL_STYLE_CONFIG["simple"]
+ if SIMPLE_OUTPUT
+ else CHANGE_RELATIONSHIP_TOOL_STYLE_CONFIG["advanced"]
+)
+GET_KNOWLEDGE_TOOL_STYLE_CONFIG = (
+ GET_KNOWLEDGE_TOOL_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else GET_KNOWLEDGE_TOOL_STYLE_CONFIG["advanced"]
+)
+GET_TIME_DATE_TOOL_STYLE_CONFIG = (
+ GET_TIME_DATE_TOOL_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else GET_TIME_DATE_TOOL_STYLE_CONFIG["advanced"]
+)
+LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG = (
+ LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG["simple"]
+ if SIMPLE_OUTPUT
+ else LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG["advanced"]
+)
+OBSERVATION_STYLE_CONFIG = OBSERVATION_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else OBSERVATION_STYLE_CONFIG["advanced"]
+MESSAGE_BUFFER_STYLE_CONFIG = (
+ MESSAGE_BUFFER_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MESSAGE_BUFFER_STYLE_CONFIG["advanced"]
+)
+CHAT_MESSAGE_STYLE_CONFIG = (
+ CHAT_MESSAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_MESSAGE_STYLE_CONFIG["advanced"]
+)
+CHAT_IMAGE_STYLE_CONFIG = CHAT_IMAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_IMAGE_STYLE_CONFIG["advanced"]
+INIT_STYLE_CONFIG = INIT_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else INIT_STYLE_CONFIG["advanced"]
def is_registered_module(record: dict) -> bool:
@@ -391,7 +943,7 @@ def get_module_logger(
sink=sys.stderr,
level=os.getenv("CONSOLE_LOG_LEVEL", console_level or current_config["console_level"]),
format=current_config["console_format"],
- filter=lambda record: record["extra"].get("module") == module_name,
+ filter=lambda record: record["extra"].get("module") == module_name and "custom_style" not in record["extra"],
enqueue=True,
)
handler_ids.append(console_id)
@@ -410,7 +962,7 @@ def get_module_logger(
retention=current_config["retention"],
compression=current_config["compression"],
encoding="utf-8",
- filter=lambda record: record["extra"].get("module") == module_name,
+ filter=lambda record: record["extra"].get("module") == module_name and "custom_style" not in record["extra"],
enqueue=True,
)
handler_ids.append(file_id)
@@ -427,6 +979,87 @@ def get_module_logger(
return logger.bind(module=module_name)
+def add_custom_style_handler(
+ module_name: str,
+ style_name: str,
+ console_format: str,
+ console_level: str = "INFO",
+ # file_format: Optional[str] = None, # 暂时只支持控制台
+ # file_level: str = "DEBUG",
+ # config: Optional[LogConfig] = None, # 暂时不使用全局配置
+) -> None:
+ """为指定模块和样式名添加自定义日志处理器(目前仅支持控制台)."""
+ handler_key = (module_name, style_name)
+
+ # 如果已存在该模块和样式的处理器,则不重复添加
+ if handler_key in _custom_style_handlers:
+ # print(f"Custom handler for {handler_key} already exists.")
+ return
+
+ handler_ids = []
+
+ # 添加自定义控制台处理器
+ try:
+ custom_console_id = logger.add(
+ sink=sys.stderr,
+ level=os.getenv(f"{module_name.upper()}_{style_name.upper()}_CONSOLE_LEVEL", console_level),
+ format=console_format,
+ filter=lambda record: record["extra"].get("module") == module_name
+ and record["extra"].get("custom_style") == style_name,
+ enqueue=True,
+ )
+ handler_ids.append(custom_console_id)
+ # print(f"Added custom console handler {custom_console_id} for {handler_key}")
+ except Exception as e:
+ logger.error(f"Failed to add custom console handler for {handler_key}: {e}")
+ # 如果添加失败,确保列表为空,避免记录不存在的ID
+ handler_ids = []
+
+ # # 文件处理器 (可选,按需启用)
+ # if file_format:
+ # current_config = config.config if config else DEFAULT_CONFIG
+ # log_dir = Path(current_config["log_dir"])
+ # log_dir.mkdir(parents=True, exist_ok=True)
+ # # 可以考虑将自定义样式的日志写入单独文件或模块主文件
+ # log_file = log_dir / module_name / f"{style_name}_{{time:YYYY-MM-DD}}.log"
+ # log_file.parent.mkdir(parents=True, exist_ok=True)
+ # try:
+ # custom_file_id = logger.add(
+ # sink=str(log_file),
+ # level=os.getenv(f"{module_name.upper()}_{style_name.upper()}_FILE_LEVEL", file_level),
+ # format=file_format,
+ # rotation=current_config["rotation"],
+ # retention=current_config["retention"],
+ # compression=current_config["compression"],
+ # encoding="utf-8",
+ # filter=lambda record: record["extra"].get("module") == module_name
+ # and record["extra"].get("custom_style") == style_name,
+ # enqueue=True,
+ # )
+ # handler_ids.append(custom_file_id)
+ # except Exception as e:
+ # logger.error(f"Failed to add custom file handler for {handler_key}: {e}")
+
+ # 更新自定义处理器注册表
+ if handler_ids:
+ _custom_style_handlers[handler_key] = handler_ids
+
+
+def remove_custom_style_handler(module_name: str, style_name: str) -> None:
+ """移除指定模块和样式名的自定义日志处理器."""
+ handler_key = (module_name, style_name)
+ if handler_key in _custom_style_handlers:
+ for handler_id in _custom_style_handlers[handler_key]:
+ try:
+ logger.remove(handler_id)
+ # print(f"Removed custom handler {handler_id} for {handler_key}")
+ except ValueError:
+ # 可能已经被移除或不存在
+ # print(f"Handler {handler_id} for {handler_key} already removed or invalid.")
+ pass
+ del _custom_style_handlers[handler_key]
+
+
def remove_module_logger(module_name: str) -> None:
"""清理指定模块的日志处理器"""
if module_name in _handler_registry:
@@ -459,7 +1092,7 @@ other_log_dir.mkdir(parents=True, exist_ok=True)
DEFAULT_FILE_HANDLER = logger.add(
sink=str(other_log_dir / "{time:YYYY-MM-DD}.log"),
level=os.getenv("DEFAULT_FILE_LOG_LEVEL", "DEBUG"),
- format=("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name: <15} | {message}"),
+ format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name: <15} | {message}",
rotation=DEFAULT_CONFIG["rotation"],
retention=DEFAULT_CONFIG["retention"],
compression=DEFAULT_CONFIG["compression"],
diff --git a/src/common/logger_manager.py b/src/common/logger_manager.py
new file mode 100644
index 000000000..5c5538385
--- /dev/null
+++ b/src/common/logger_manager.py
@@ -0,0 +1,102 @@
+from src.common.logger import get_module_logger, LogConfig
+from src.common.logger import (
+ BACKGROUND_TASKS_STYLE_CONFIG,
+ MAIN_STYLE_CONFIG,
+ MEMORY_STYLE_CONFIG,
+ PFC_STYLE_CONFIG,
+ MOOD_STYLE_CONFIG,
+ TOOL_USE_STYLE_CONFIG,
+ RELATION_STYLE_CONFIG,
+ CONFIG_STYLE_CONFIG,
+ HEARTFLOW_STYLE_CONFIG,
+ SCHEDULE_STYLE_CONFIG,
+ LLM_STYLE_CONFIG,
+ CHAT_STYLE_CONFIG,
+ EMOJI_STYLE_CONFIG,
+ SUB_HEARTFLOW_STYLE_CONFIG,
+ SUB_HEARTFLOW_MIND_STYLE_CONFIG,
+ SUBHEARTFLOW_MANAGER_STYLE_CONFIG,
+ BASE_TOOL_STYLE_CONFIG,
+ CHAT_STREAM_STYLE_CONFIG,
+ PERSON_INFO_STYLE_CONFIG,
+ WILLING_STYLE_CONFIG,
+ PFC_ACTION_PLANNER_STYLE_CONFIG,
+ MAI_STATE_CONFIG,
+ LPMM_STYLE_CONFIG,
+ HFC_STYLE_CONFIG,
+ TIANYI_STYLE_CONFIG,
+ REMOTE_STYLE_CONFIG,
+ TOPIC_STYLE_CONFIG,
+ SENDER_STYLE_CONFIG,
+ CONFIRM_STYLE_CONFIG,
+ MODEL_UTILS_STYLE_CONFIG,
+ PROMPT_STYLE_CONFIG,
+ CHANGE_MOOD_TOOL_STYLE_CONFIG,
+ CHANGE_RELATIONSHIP_TOOL_STYLE_CONFIG,
+ GET_KNOWLEDGE_TOOL_STYLE_CONFIG,
+ GET_TIME_DATE_TOOL_STYLE_CONFIG,
+ LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG,
+ OBSERVATION_STYLE_CONFIG,
+ MESSAGE_BUFFER_STYLE_CONFIG,
+ CHAT_MESSAGE_STYLE_CONFIG,
+ CHAT_IMAGE_STYLE_CONFIG,
+ INIT_STYLE_CONFIG,
+)
+
+# 可根据实际需要补充更多模块配置
+MODULE_LOGGER_CONFIGS = {
+ "background_tasks": BACKGROUND_TASKS_STYLE_CONFIG, # 后台任务
+ "main": MAIN_STYLE_CONFIG, # 主程序
+ "memory": MEMORY_STYLE_CONFIG, # 海马体
+ "pfc": PFC_STYLE_CONFIG, # PFC
+ "mood": MOOD_STYLE_CONFIG, # 心情
+ "tool_use": TOOL_USE_STYLE_CONFIG, # 工具使用
+ "relation": RELATION_STYLE_CONFIG, # 关系
+ "config": CONFIG_STYLE_CONFIG, # 配置
+ "heartflow": HEARTFLOW_STYLE_CONFIG, # 麦麦大脑袋
+ "schedule": SCHEDULE_STYLE_CONFIG, # 在干嘛
+ "llm": LLM_STYLE_CONFIG, # 麦麦组织语言
+ "chat": CHAT_STYLE_CONFIG, # 见闻
+ "emoji": EMOJI_STYLE_CONFIG, # 表情包
+ "sub_heartflow": SUB_HEARTFLOW_STYLE_CONFIG, # 麦麦水群
+ "sub_heartflow_mind": SUB_HEARTFLOW_MIND_STYLE_CONFIG, # 麦麦小脑袋
+ "subheartflow_manager": SUBHEARTFLOW_MANAGER_STYLE_CONFIG, # 麦麦水群[管理]
+ "base_tool": BASE_TOOL_STYLE_CONFIG, # 工具使用
+ "chat_stream": CHAT_STREAM_STYLE_CONFIG, # 聊天流
+ "person_info": PERSON_INFO_STYLE_CONFIG, # 人物信息
+ "willing": WILLING_STYLE_CONFIG, # 意愿
+ "pfc_action_planner": PFC_ACTION_PLANNER_STYLE_CONFIG, # PFC私聊规划
+ "mai_state": MAI_STATE_CONFIG, # 麦麦状态
+ "lpmm": LPMM_STYLE_CONFIG, # LPMM
+ "hfc": HFC_STYLE_CONFIG, # HFC
+ "tianyi": TIANYI_STYLE_CONFIG, # 天依
+ "remote": REMOTE_STYLE_CONFIG, # 远程
+ "topic": TOPIC_STYLE_CONFIG, # 话题
+ "sender": SENDER_STYLE_CONFIG, # 消息发送
+ "confirm": CONFIRM_STYLE_CONFIG, # EULA与PRIVACY确认
+ "model_utils": MODEL_UTILS_STYLE_CONFIG, # 模型工具
+ "prompt": PROMPT_STYLE_CONFIG, # 提示词
+ "change_mood_tool": CHANGE_MOOD_TOOL_STYLE_CONFIG, # 改变心情工具
+ "change_relationship": CHANGE_RELATIONSHIP_TOOL_STYLE_CONFIG, # 改变关系工具
+ "get_knowledge_tool": GET_KNOWLEDGE_TOOL_STYLE_CONFIG, # 获取知识工具
+ "get_time_date": GET_TIME_DATE_TOOL_STYLE_CONFIG, # 获取时间日期工具
+ "lpm_get_knowledge_tool": LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG, # LPMM获取知识工具
+ "observation": OBSERVATION_STYLE_CONFIG, # 聊天观察
+ "message_buffer": MESSAGE_BUFFER_STYLE_CONFIG, # 消息缓冲
+ "chat_message": CHAT_MESSAGE_STYLE_CONFIG, # 聊天消息
+ "chat_image": CHAT_IMAGE_STYLE_CONFIG, # 聊天图片
+ "init": INIT_STYLE_CONFIG, # 初始化
+ # ...如有更多模块,继续添加...
+}
+
+
+def get_logger(module_name: str):
+ style_config = MODULE_LOGGER_CONFIGS.get(module_name)
+ if style_config:
+ log_config = LogConfig(
+ console_format=style_config["console_format"],
+ file_format=style_config["file_format"],
+ )
+ return get_module_logger(module_name, config=log_config)
+ # 若无特殊样式,使用默认
+ return get_module_logger(module_name)
diff --git a/src/common/message_repository.py b/src/common/message_repository.py
new file mode 100644
index 000000000..fc7b7e542
--- /dev/null
+++ b/src/common/message_repository.py
@@ -0,0 +1,75 @@
+from src.common.database import db
+from src.common.logger import get_module_logger
+import traceback
+from typing import List, Dict, Any, Optional
+
+logger = get_module_logger(__name__)
+
+
+def find_messages(
+ filter: Dict[str, Any], sort: Optional[List[tuple[str, int]]] = None, limit: int = 0, limit_mode: str = "latest"
+) -> List[Dict[str, Any]]:
+ """
+ 根据提供的过滤器、排序和限制条件查找消息。
+
+ Args:
+ filter: MongoDB 查询过滤器。
+ sort: MongoDB 排序条件列表,例如 [('time', 1)]。仅在 limit 为 0 时生效。
+ limit: 返回的最大文档数,0表示不限制。
+ limit_mode: 当 limit > 0 时生效。 'earliest' 表示获取最早的记录, 'latest' 表示获取最新的记录(结果仍按时间正序排列)。默认为 'latest'。
+
+ Returns:
+ 消息文档列表,如果出错则返回空列表。
+ """
+ try:
+ query = db.messages.find(filter)
+ results: List[Dict[str, Any]] = []
+
+ if limit > 0:
+ if limit_mode == "earliest":
+ # 获取时间最早的 limit 条记录,已经是正序
+ query = query.sort([("time", 1)]).limit(limit)
+ results = list(query)
+ else: # 默认为 'latest'
+ # 获取时间最晚的 limit 条记录
+ query = query.sort([("time", -1)]).limit(limit)
+ latest_results = list(query)
+ # 将结果按时间正序排列
+ # 假设消息文档中总是有 'time' 字段且可排序
+ results = sorted(latest_results, key=lambda msg: msg.get("time"))
+ else:
+ # limit 为 0 时,应用传入的 sort 参数
+ if sort:
+ query = query.sort(sort)
+ results = list(query)
+
+ return results
+ except Exception as e:
+ log_message = (
+ f"查找消息失败 (filter={filter}, sort={sort}, limit={limit}, limit_mode={limit_mode}): {e}\n"
+ + traceback.format_exc()
+ )
+ logger.error(log_message)
+ return []
+
+
+def count_messages(filter: Dict[str, Any]) -> int:
+ """
+ 根据提供的过滤器计算消息数量。
+
+ Args:
+ filter: MongoDB 查询过滤器。
+
+ Returns:
+ 符合条件的消息数量,如果出错则返回 0。
+ """
+ try:
+ count = db.messages.count_documents(filter)
+ return count
+ except Exception as e:
+ log_message = f"计数消息失败 (filter={filter}): {e}\n" + traceback.format_exc()
+ logger.error(log_message)
+ return 0
+
+
+# 你可以在这里添加更多与 messages 集合相关的数据库操作函数,例如 find_one_message, insert_message 等。
diff --git a/src/common/server.py b/src/common/server.py
index a4998a305..517996293 100644
--- a/src/common/server.py
+++ b/src/common/server.py
@@ -45,7 +45,8 @@ class Server:
async def run(self):
"""启动服务器"""
- config = Config(app=self.app, host=self._host, port=self._port)
+ # 禁用 uvicorn 默认日志和访问日志
+ config = Config(app=self.app, host=self._host, port=self._port, log_config=None, access_log=False)
self._server = UvicornServer(config=config)
try:
await self._server.serve()
diff --git a/src/plugins/config/auto_update.py b/src/config/auto_update.py
similarity index 100%
rename from src/plugins/config/auto_update.py
rename to src/config/auto_update.py
diff --git a/src/plugins/config/config.py b/src/config/config.py
similarity index 80%
rename from src/plugins/config/config.py
rename to src/config/config.py
index d0a209d35..e6cf16d4d 100644
--- a/src/plugins/config/config.py
+++ b/src/config/config.py
@@ -13,21 +13,15 @@ from packaging import version
from packaging.version import Version, InvalidVersion
from packaging.specifiers import SpecifierSet, InvalidSpecifier
-from src.common.logger import get_module_logger, CONFIG_STYLE_CONFIG, LogConfig
+from src.common.logger_manager import get_logger
-# 定义日志配置
-config_config = LogConfig(
- # 使用消息发送专用样式
- console_format=CONFIG_STYLE_CONFIG["console_format"],
- file_format=CONFIG_STYLE_CONFIG["file_format"],
-)
# 配置主程序日志格式
-logger = get_module_logger("config", config=config_config)
+logger = get_logger("config")
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = False
-mai_version_main = "0.6.2"
+mai_version_main = "0.6.3"
mai_version_fix = ""
if mai_version_fix:
@@ -44,7 +38,7 @@ else:
def update_config():
# 获取根目录路径
- root_dir = Path(__file__).parent.parent.parent.parent
+ root_dir = Path(__file__).parent.parent.parent
template_dir = root_dir / "template"
config_dir = root_dir / "config"
old_config_dir = config_dir / "old"
@@ -62,8 +56,7 @@ def update_config():
shutil.copy2(template_path, old_config_path)
logger.info(f"已创建新配置文件,请填写后重新运行: {old_config_path}")
# 如果是新创建的配置文件,直接返回
- quit()
- return
+ return quit()
# 读取旧配置文件和模板文件
with open(old_config_path, "r", encoding="utf-8") as f:
@@ -131,9 +124,6 @@ def update_config():
logger.info("配置文件更新完成")
-logger = get_module_logger("config")
-
-
@dataclass
class BotConfig:
"""机器人配置类"""
@@ -142,7 +132,7 @@ class BotConfig:
MAI_VERSION: str = mai_version # 硬编码的版本信息
# bot
- BOT_QQ: Optional[int] = 114514
+ BOT_QQ: Optional[str] = "114514"
BOT_NICKNAME: Optional[str] = None
BOT_ALIAS_NAMES: List[str] = field(default_factory=list) # 别名,可以通过这个叫它
@@ -180,27 +170,34 @@ class BotConfig:
SCHEDULE_TEMPERATURE: float = 0.5 # 日程表温度,建议0.5-1.0
TIME_ZONE: str = "Asia/Shanghai" # 时区
- # message
- MAX_CONTEXT_SIZE: int = 15 # 上下文最大消息数
- emoji_chance: float = 0.2 # 发送表情包的基础概率
- thinking_timeout: int = 120 # 思考时间
- max_response_length: int = 1024 # 最大回复长度
+ # chat
+ allow_focus_mode: bool = True # 是否允许专注聊天状态
+
+ base_normal_chat_num: int = 3 # 最多允许多少个群进行普通聊天
+ base_focused_chat_num: int = 2 # 最多允许多少个群进行专注聊天
+
+ observation_context_size: int = 12 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
+
message_buffer: bool = True # 消息缓冲器
ban_words = set()
ban_msgs_regex = set()
- # heartflow
- # enable_heartflow: bool = False # 是否启用心流
- sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒
- sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
- sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
- heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
- observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
+ # focus_chat
+ reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发
+ default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
+ consecutive_no_reply_threshold = 3
+
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
- # willing
+ # normal_chat
+ model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
+ model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
+
+ emoji_chance: float = 0.2 # 发送表情包的基础概率
+ thinking_timeout: int = 120 # 思考时间
+
willing_mode: str = "classical" # 意愿模式
response_willing_amplifier: float = 1.0 # 回复意愿放大系数
response_interested_rate_amplifier: float = 1.0 # 回复兴趣度放大系数
@@ -209,18 +206,15 @@ class BotConfig:
mentioned_bot_inevitable_reply: bool = False # 提及 bot 必然回复
at_bot_inevitable_reply: bool = False # @bot 必然回复
- # response
- response_mode: str = "heart_flow" # 回复策略
- MODEL_R1_PROBABILITY: float = 0.8 # R1模型概率
- MODEL_V3_PROBABILITY: float = 0.1 # V3模型概率
- # MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
-
# emoji
max_emoji_num: int = 200 # 表情包最大数量
max_reach_deletion: bool = True # 开启则在达到最大数量时删除表情包,关闭则不会继续收集表情包
EMOJI_CHECK_INTERVAL: int = 120 # 表情包检查间隔(分钟)
- EMOJI_REGISTER_INTERVAL: int = 10 # 表情包注册间隔(分钟)
- EMOJI_SAVE: bool = True # 偷表情包
+
+ save_pic: bool = False # 是否保存图片
+ save_emoji: bool = False # 是否保存表情包
+ steal_emoji: bool = True # 是否偷取表情包,让麦麦可以发送她保存的这些表情包
+
EMOJI_CHECK: bool = False # 是否开启过滤
EMOJI_CHECK_PROMPT: str = "符合公序良俗" # 表情包过滤要求
@@ -237,6 +231,10 @@ class BotConfig:
memory_forget_time: int = 24 # 记忆遗忘时间(小时)
memory_forget_percentage: float = 0.01 # 记忆遗忘比例
+ consolidate_memory_interval: int = 1000 # 记忆整合间隔(秒)
+ consolidation_similarity_threshold: float = 0.7 # 相似度阈值
+ consolidate_memory_percentage: float = 0.01 # 检查节点比例
+
memory_ban_words: list = field(
default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"]
) # 添加新的配置项默认值
@@ -257,10 +255,13 @@ class BotConfig:
chinese_typo_word_replace_rate = 0.02 # 整词替换概率
# response_splitter
+ enable_kaomoji_protection = False # 是否启用颜文字保护
enable_response_splitter = True # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 3 # 回复允许的最大句子数
+ model_max_output_length: int = 800 # 最大回复长度
+
# remote
remote_enable: bool = True # 是否启用远程控制
@@ -274,31 +275,16 @@ class BotConfig:
# llm_reasoning_minor: Dict[str, str] = field(default_factory=lambda: {})
llm_normal: Dict[str, str] = field(default_factory=lambda: {})
llm_topic_judge: Dict[str, str] = field(default_factory=lambda: {})
- llm_summary_by_topic: Dict[str, str] = field(default_factory=lambda: {})
- llm_emotion_judge: Dict[str, str] = field(default_factory=lambda: {})
+ llm_summary: Dict[str, str] = field(default_factory=lambda: {})
embedding: Dict[str, str] = field(default_factory=lambda: {})
vlm: Dict[str, str] = field(default_factory=lambda: {})
moderation: Dict[str, str] = field(default_factory=lambda: {})
- # 实验性
llm_observation: Dict[str, str] = field(default_factory=lambda: {})
llm_sub_heartflow: Dict[str, str] = field(default_factory=lambda: {})
llm_heartflow: Dict[str, str] = field(default_factory=lambda: {})
-
- build_memory_interval: int = 600 # 记忆构建间隔(秒)
-
- forget_memory_interval: int = 600 # 记忆遗忘间隔(秒)
- memory_forget_time: int = 24 # 记忆遗忘时间(小时)
- memory_forget_percentage: float = 0.01 # 记忆遗忘比例
- memory_compress_rate: float = 0.1 # 记忆压缩率
- build_memory_sample_num: int = 10 # 记忆构建采样数量
- build_memory_sample_length: int = 20 # 记忆构建采样长度
- memory_build_distribution: list = field(
- default_factory=lambda: [4, 2, 0.6, 24, 8, 0.4]
- ) # 记忆构建分布,参数:分布1均值,标准差,权重,分布2均值,标准差,权重
- memory_ban_words: list = field(
- default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"]
- ) # 添加新的配置项默认值
+ llm_tool_use: Dict[str, str] = field(default_factory=lambda: {})
+ llm_plan: Dict[str, str] = field(default_factory=lambda: {})
api_urls: Dict[str, str] = field(default_factory=lambda: {})
@@ -306,7 +292,7 @@ class BotConfig:
def get_config_dir() -> str:
"""获取配置文件目录"""
current_dir = os.path.dirname(os.path.abspath(__file__))
- root_dir = os.path.abspath(os.path.join(current_dir, "..", "..", ".."))
+ root_dir = os.path.abspath(os.path.join(current_dir, "..", ".."))
config_dir = os.path.join(root_dir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
@@ -402,78 +388,80 @@ class BotConfig:
def emoji(parent: dict):
emoji_config = parent["emoji"]
config.EMOJI_CHECK_INTERVAL = emoji_config.get("check_interval", config.EMOJI_CHECK_INTERVAL)
- config.EMOJI_REGISTER_INTERVAL = emoji_config.get("register_interval", config.EMOJI_REGISTER_INTERVAL)
config.EMOJI_CHECK_PROMPT = emoji_config.get("check_prompt", config.EMOJI_CHECK_PROMPT)
- config.EMOJI_SAVE = emoji_config.get("auto_save", config.EMOJI_SAVE)
config.EMOJI_CHECK = emoji_config.get("enable_check", config.EMOJI_CHECK)
if config.INNER_VERSION in SpecifierSet(">=1.1.1"):
config.max_emoji_num = emoji_config.get("max_emoji_num", config.max_emoji_num)
config.max_reach_deletion = emoji_config.get("max_reach_deletion", config.max_reach_deletion)
+ if config.INNER_VERSION in SpecifierSet(">=1.4.2"):
+ config.save_pic = emoji_config.get("save_pic", config.save_pic)
+ config.save_emoji = emoji_config.get("save_emoji", config.save_emoji)
+ config.steal_emoji = emoji_config.get("steal_emoji", config.steal_emoji)
def bot(parent: dict):
# 机器人基础配置
bot_config = parent["bot"]
bot_qq = bot_config.get("qq")
- config.BOT_QQ = int(bot_qq)
+ config.BOT_QQ = str(bot_qq)
config.BOT_NICKNAME = bot_config.get("nickname", config.BOT_NICKNAME)
config.BOT_ALIAS_NAMES = bot_config.get("alias_names", config.BOT_ALIAS_NAMES)
- def response(parent: dict):
- response_config = parent["response"]
- config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
- config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
- # config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
- # "model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
- # )
- config.max_response_length = response_config.get("max_response_length", config.max_response_length)
- if config.INNER_VERSION in SpecifierSet(">=1.0.4"):
- config.response_mode = response_config.get("response_mode", config.response_mode)
+ def chat(parent: dict):
+ chat_config = parent["chat"]
+ config.allow_focus_mode = chat_config.get("allow_focus_mode", config.allow_focus_mode)
+ config.base_normal_chat_num = chat_config.get("base_normal_chat_num", config.base_normal_chat_num)
+ config.base_focused_chat_num = chat_config.get("base_focused_chat_num", config.base_focused_chat_num)
+ config.observation_context_size = chat_config.get(
+ "observation_context_size", config.observation_context_size
+ )
+ config.message_buffer = chat_config.get("message_buffer", config.message_buffer)
+ config.ban_words = chat_config.get("ban_words", config.ban_words)
+ for r in chat_config.get("ban_msgs_regex", config.ban_msgs_regex):
+ config.ban_msgs_regex.add(re.compile(r))
- def heartflow(parent: dict):
- heartflow_config = parent["heartflow"]
- config.sub_heart_flow_update_interval = heartflow_config.get(
- "sub_heart_flow_update_interval", config.sub_heart_flow_update_interval
+ def normal_chat(parent: dict):
+ normal_chat_config = parent["normal_chat"]
+ config.model_reasoning_probability = normal_chat_config.get(
+ "model_reasoning_probability", config.model_reasoning_probability
)
- config.sub_heart_flow_freeze_time = heartflow_config.get(
- "sub_heart_flow_freeze_time", config.sub_heart_flow_freeze_time
+ config.model_normal_probability = normal_chat_config.get(
+ "model_normal_probability", config.model_normal_probability
)
- config.sub_heart_flow_stop_time = heartflow_config.get(
- "sub_heart_flow_stop_time", config.sub_heart_flow_stop_time
- )
- config.heart_flow_update_interval = heartflow_config.get(
- "heart_flow_update_interval", config.heart_flow_update_interval
- )
- if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
- config.observation_context_size = heartflow_config.get(
- "observation_context_size", config.observation_context_size
- )
- config.compressed_length = heartflow_config.get("compressed_length", config.compressed_length)
- config.compress_length_limit = heartflow_config.get(
- "compress_length_limit", config.compress_length_limit
- )
+ config.emoji_chance = normal_chat_config.get("emoji_chance", config.emoji_chance)
+ config.thinking_timeout = normal_chat_config.get("thinking_timeout", config.thinking_timeout)
- def willing(parent: dict):
- willing_config = parent["willing"]
- config.willing_mode = willing_config.get("willing_mode", config.willing_mode)
+ config.willing_mode = normal_chat_config.get("willing_mode", config.willing_mode)
+ config.response_willing_amplifier = normal_chat_config.get(
+ "response_willing_amplifier", config.response_willing_amplifier
+ )
+ config.response_interested_rate_amplifier = normal_chat_config.get(
+ "response_interested_rate_amplifier", config.response_interested_rate_amplifier
+ )
+ config.down_frequency_rate = normal_chat_config.get("down_frequency_rate", config.down_frequency_rate)
+ config.emoji_response_penalty = normal_chat_config.get(
+ "emoji_response_penalty", config.emoji_response_penalty
+ )
- if config.INNER_VERSION in SpecifierSet(">=0.0.11"):
- config.response_willing_amplifier = willing_config.get(
- "response_willing_amplifier", config.response_willing_amplifier
- )
- config.response_interested_rate_amplifier = willing_config.get(
- "response_interested_rate_amplifier", config.response_interested_rate_amplifier
- )
- config.down_frequency_rate = willing_config.get("down_frequency_rate", config.down_frequency_rate)
- config.emoji_response_penalty = willing_config.get(
- "emoji_response_penalty", config.emoji_response_penalty
- )
- if config.INNER_VERSION in SpecifierSet(">=1.2.5"):
- config.mentioned_bot_inevitable_reply = willing_config.get(
- "mentioned_bot_inevitable_reply", config.mentioned_bot_inevitable_reply
- )
- config.at_bot_inevitable_reply = willing_config.get(
- "at_bot_inevitable_reply", config.at_bot_inevitable_reply
- )
+ config.mentioned_bot_inevitable_reply = normal_chat_config.get(
+ "mentioned_bot_inevitable_reply", config.mentioned_bot_inevitable_reply
+ )
+ config.at_bot_inevitable_reply = normal_chat_config.get(
+ "at_bot_inevitable_reply", config.at_bot_inevitable_reply
+ )
+
+ def focus_chat(parent: dict):
+ focus_chat_config = parent["focus_chat"]
+ config.compressed_length = focus_chat_config.get("compressed_length", config.compressed_length)
+ config.compress_length_limit = focus_chat_config.get("compress_length_limit", config.compress_length_limit)
+ config.reply_trigger_threshold = focus_chat_config.get(
+ "reply_trigger_threshold", config.reply_trigger_threshold
+ )
+ config.default_decay_rate_per_second = focus_chat_config.get(
+ "default_decay_rate_per_second", config.default_decay_rate_per_second
+ )
+ config.consecutive_no_reply_threshold = focus_chat_config.get(
+ "consecutive_no_reply_threshold", config.consecutive_no_reply_threshold
+ )
def model(parent: dict):
# 加载模型配置
@@ -484,14 +472,17 @@ class BotConfig:
# "llm_reasoning_minor",
"llm_normal",
"llm_topic_judge",
- "llm_summary_by_topic",
- "llm_emotion_judge",
+ "llm_summary",
"vlm",
"embedding",
"llm_tool_use",
"llm_observation",
"llm_sub_heartflow",
+ "llm_plan",
"llm_heartflow",
+ "llm_PFC_action_planner",
+ "llm_PFC_chat",
+ "llm_PFC_reply_checker",
]
for item in config_list:
@@ -560,26 +551,6 @@ class BotConfig:
logger.error(f"模型 {item} 在config中不存在,请检查,或尝试更新配置文件")
raise KeyError(f"模型 {item} 在config中不存在,请检查,或尝试更新配置文件")
- def message(parent: dict):
- msg_config = parent["message"]
- config.MAX_CONTEXT_SIZE = msg_config.get("max_context_size", config.MAX_CONTEXT_SIZE)
- config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
- config.ban_words = msg_config.get("ban_words", config.ban_words)
- config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
- config.response_willing_amplifier = msg_config.get(
- "response_willing_amplifier", config.response_willing_amplifier
- )
- config.response_interested_rate_amplifier = msg_config.get(
- "response_interested_rate_amplifier", config.response_interested_rate_amplifier
- )
- config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
- for r in msg_config.get("ban_msgs_regex", config.ban_msgs_regex):
- config.ban_msgs_regex.add(re.compile(r))
- if config.INNER_VERSION in SpecifierSet(">=0.0.11"):
- config.max_response_length = msg_config.get("max_response_length", config.max_response_length)
- if config.INNER_VERSION in SpecifierSet(">=1.1.4"):
- config.message_buffer = msg_config.get("message_buffer", config.message_buffer)
-
def memory(parent: dict):
memory_config = parent["memory"]
config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
@@ -600,6 +571,16 @@ class BotConfig:
config.build_memory_sample_length = memory_config.get(
"build_memory_sample_length", config.build_memory_sample_length
)
+ if config.INNER_VERSION in SpecifierSet(">=1.5.1"):
+ config.consolidate_memory_interval = memory_config.get(
+ "consolidate_memory_interval", config.consolidate_memory_interval
+ )
+ config.consolidation_similarity_threshold = memory_config.get(
+ "consolidation_similarity_threshold", config.consolidation_similarity_threshold
+ )
+ config.consolidate_memory_percentage = memory_config.get(
+ "consolidate_memory_percentage", config.consolidate_memory_percentage
+ )
def remote(parent: dict):
remote_config = parent["remote"]
@@ -640,12 +621,25 @@ class BotConfig:
config.response_max_sentence_num = response_splitter_config.get(
"response_max_sentence_num", config.response_max_sentence_num
)
+ if config.INNER_VERSION in SpecifierSet(">=1.4.2"):
+ config.enable_kaomoji_protection = response_splitter_config.get(
+ "enable_kaomoji_protection", config.enable_kaomoji_protection
+ )
+ if config.INNER_VERSION in SpecifierSet(">=1.6.0"):
+ config.model_max_output_length = response_splitter_config.get(
+ "model_max_output_length", config.model_max_output_length
+ )
def groups(parent: dict):
groups_config = parent["groups"]
- config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
- config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
- config.ban_user_id = set(groups_config.get("ban_user_id", []))
+ # config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
+ config.talk_allowed_groups = set(str(group) for group in groups_config.get("talk_allowed", []))
+ # config.talk_frequency_down_groups = set(groups_config.get("talk_frequency_down", []))
+ config.talk_frequency_down_groups = set(
+ str(group) for group in groups_config.get("talk_frequency_down", [])
+ )
+ # config.ban_user_id = set(groups_config.get("ban_user_id", []))
+ config.ban_user_id = set(str(user) for user in groups_config.get("ban_user_id", []))
def platforms(parent: dict):
platforms_config = parent["platforms"]
@@ -680,10 +674,7 @@ class BotConfig:
"personality": {"func": personality, "support": ">=0.0.0"},
"identity": {"func": identity, "support": ">=1.2.4"},
"schedule": {"func": schedule, "support": ">=0.0.11", "necessary": False},
- "message": {"func": message, "support": ">=0.0.0"},
- "willing": {"func": willing, "support": ">=0.0.9", "necessary": False},
"emoji": {"func": emoji, "support": ">=0.0.0"},
- "response": {"func": response, "support": ">=0.0.0"},
"model": {"func": model, "support": ">=0.0.0"},
"memory": {"func": memory, "support": ">=0.0.0", "necessary": False},
"mood": {"func": mood, "support": ">=0.0.0"},
@@ -693,7 +684,9 @@ class BotConfig:
"platforms": {"func": platforms, "support": ">=1.0.0"},
"response_splitter": {"func": response_splitter, "support": ">=0.0.11", "necessary": False},
"experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False},
- "heartflow": {"func": heartflow, "support": ">=1.0.2", "necessary": False},
+ "chat": {"func": chat, "support": ">=1.6.0", "necessary": False},
+ "normal_chat": {"func": normal_chat, "support": ">=1.6.0", "necessary": False},
+ "focus_chat": {"func": focus_chat, "support": ">=1.6.0", "necessary": False},
}
# 原地修改,将 字符串版本表达式 转换成 版本对象
diff --git a/src/do_tool/tool_can_use/change_mood.py b/src/do_tool/not_used/change_mood.py
similarity index 71%
rename from src/do_tool/tool_can_use/change_mood.py
rename to src/do_tool/not_used/change_mood.py
index 53410068f..430561a26 100644
--- a/src/do_tool/tool_can_use/change_mood.py
+++ b/src/do_tool/not_used/change_mood.py
@@ -1,12 +1,11 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
-from src.plugins.config.config import global_config
-from src.common.logger import get_module_logger
+from src.config.config import global_config
+from src.common.logger_manager import get_logger
from src.plugins.moods.moods import MoodManager
-from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
from typing import Dict, Any
-logger = get_module_logger("change_mood_tool")
+logger = get_logger("change_mood_tool")
class ChangeMoodTool(BaseTool):
@@ -23,29 +22,29 @@ class ChangeMoodTool(BaseTool):
"required": ["text", "response_set"],
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行心情改变
Args:
function_args: 工具参数
- message_processed_plain_text: 原始消息文本
- response_set: 原始消息文本
+ message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
response_set = function_args.get("response_set")
- message_processed_plain_text = function_args.get("text")
+ _message_processed_plain_text = function_args.get("text")
mood_manager = MoodManager.get_instance()
- gpt = ResponseGenerator()
+ # gpt = ResponseGenerator()
if response_set is None:
response_set = ["你还没有回复"]
- ori_response = ",".join(response_set)
- _stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text)
+ _ori_response = ",".join(response_set)
+ # _stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text)
+ emotion = "平静"
mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
return {"name": "change_mood", "content": f"你的心情刚刚变化了,现在的心情是: {emotion}"}
except Exception as e:
diff --git a/src/do_tool/tool_can_use/change_relationship.py b/src/do_tool/not_used/change_relationship.py
similarity index 63%
rename from src/do_tool/tool_can_use/change_relationship.py
rename to src/do_tool/not_used/change_relationship.py
index 9aa084855..4af32fb8c 100644
--- a/src/do_tool/tool_can_use/change_relationship.py
+++ b/src/do_tool/not_used/change_relationship.py
@@ -1,10 +1,9 @@
-# from src.plugins.person_info.relationship_manager import relationship_manager
-from src.common.logger import get_module_logger
+from typing import Dict, Any
+from src.common.logger_manager import get_logger
from src.do_tool.tool_can_use.base_tool import BaseTool
-# from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
-logger = get_module_logger("relationship_tool")
+logger = get_logger("relationship_tool")
class RelationshipTool(BaseTool):
@@ -20,22 +19,20 @@ class RelationshipTool(BaseTool):
"required": ["text", "changed_value", "reason"],
}
- async def execute(self, args: dict, message_txt: str) -> dict:
+ async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> dict:
"""执行工具功能
Args:
- args: 包含工具参数的字典
- text: 原始消息文本
- changed_value: 变更值
- reason: 变更原因
+ function_args: 包含工具参数的字典
+ message_txt: 原始消息文本
Returns:
dict: 包含执行结果的字典
"""
try:
- text = args.get("text")
- changed_value = args.get("changed_value")
- reason = args.get("reason")
+ text = function_args.get("text")
+ changed_value = function_args.get("changed_value")
+ reason = function_args.get("reason")
return {"content": f"因为你刚刚因为{reason},所以你和发[{text}]这条消息的人的关系值变化为{changed_value}"}
diff --git a/src/do_tool/tool_can_use/get_current_task.py b/src/do_tool/not_used/get_current_task.py
similarity index 100%
rename from src/do_tool/tool_can_use/get_current_task.py
rename to src/do_tool/not_used/get_current_task.py
diff --git a/src/do_tool/tool_can_use/mid_chat_mem.py b/src/do_tool/not_used/mid_chat_mem.py
similarity index 86%
rename from src/do_tool/tool_can_use/mid_chat_mem.py
rename to src/do_tool/not_used/mid_chat_mem.py
index 26d26704a..71726a57f 100644
--- a/src/do_tool/tool_can_use/mid_chat_mem.py
+++ b/src/do_tool/not_used/mid_chat_mem.py
@@ -9,11 +9,11 @@ class GetMidMemoryTool(BaseTool):
"""从记忆系统中获取相关记忆的工具"""
name = "mid_chat_mem"
- description = "之前的聊天内容中获取具体信息,当最新消息提到,或者你需要回复的消息中提到,你可以使用这个工具"
+ description = "之前的聊天内容概述id中获取具体信息,如果没有聊天内容概述id,就不要使用"
parameters = {
"type": "object",
"properties": {
- "id": {"type": "integer", "description": "要查询的聊天记录id"},
+ "id": {"type": "integer", "description": "要查询的聊天记录概述id"},
},
"required": ["id"],
}
diff --git a/src/do_tool/tool_can_use/send_emoji.py b/src/do_tool/not_used/send_emoji.py
similarity index 95%
rename from src/do_tool/tool_can_use/send_emoji.py
rename to src/do_tool/not_used/send_emoji.py
index 9cd48f0e4..3c6c8a3f1 100644
--- a/src/do_tool/tool_can_use/send_emoji.py
+++ b/src/do_tool/not_used/send_emoji.py
@@ -17,7 +17,7 @@ class SendEmojiTool(BaseTool):
"required": ["text"],
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
text = function_args.get("text", message_txt)
return {
"name": "send_emoji",
diff --git a/src/do_tool/tool_can_use/base_tool.py b/src/do_tool/tool_can_use/base_tool.py
index b1edf8055..1dd15bafc 100644
--- a/src/do_tool/tool_can_use/base_tool.py
+++ b/src/do_tool/tool_can_use/base_tool.py
@@ -3,9 +3,9 @@ import inspect
import importlib
import pkgutil
import os
-from src.common.logger import get_module_logger
+from src.common.logger_manager import get_logger
-logger = get_module_logger("base_tool")
+logger = get_logger("base_tool")
# 工具注册表
TOOL_REGISTRY = {}
@@ -36,12 +36,11 @@ class BaseTool:
"function": {"name": cls.name, "description": cls.description, "parameters": cls.parameters},
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
"""执行工具函数
Args:
function_args: 工具调用参数
- message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
@@ -63,7 +62,7 @@ def register_tool(tool_class: Type[BaseTool]):
raise ValueError(f"工具类 {tool_class.__name__} 没有定义 name 属性")
TOOL_REGISTRY[tool_name] = tool_class
- logger.info(f"已注册工具: {tool_name}")
+ logger.info(f"已注册: {tool_name}")
def discover_tools():
diff --git a/src/do_tool/tool_can_use/compare_numbers_tool.py b/src/do_tool/tool_can_use/compare_numbers_tool.py
index 48cee5157..1fbd812a0 100644
--- a/src/do_tool/tool_can_use/compare_numbers_tool.py
+++ b/src/do_tool/tool_can_use/compare_numbers_tool.py
@@ -19,7 +19,7 @@ class CompareNumbersTool(BaseTool):
"required": ["num1", "num2"],
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
"""执行比较两个数的大小
Args:
diff --git a/src/do_tool/tool_can_use/get_knowledge.py b/src/do_tool/tool_can_use/get_knowledge.py
index b78c07750..bd4ce86b4 100644
--- a/src/do_tool/tool_can_use/get_knowledge.py
+++ b/src/do_tool/tool_can_use/get_knowledge.py
@@ -1,10 +1,10 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.chat.utils import get_embedding
from src.common.database import db
-from src.common.logger import get_module_logger
+from src.common.logger_manager import get_logger
from typing import Dict, Any, Union
-logger = get_module_logger("get_knowledge_tool")
+logger = get_logger("get_knowledge_tool")
class SearchKnowledgeTool(BaseTool):
@@ -21,7 +21,7 @@ class SearchKnowledgeTool(BaseTool):
"required": ["query"],
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
"""执行知识库搜索
Args:
@@ -32,7 +32,7 @@ class SearchKnowledgeTool(BaseTool):
Dict: 工具执行结果
"""
try:
- query = function_args.get("query", message_txt)
+ query = function_args.get("query")
threshold = function_args.get("threshold", 0.4)
# 调用知识库搜索
@@ -49,8 +49,9 @@ class SearchKnowledgeTool(BaseTool):
logger.error(f"知识库搜索工具执行失败: {str(e)}")
return {"name": "search_knowledge", "content": f"知识库搜索失败: {str(e)}"}
+ @staticmethod
def get_info_from_db(
- self, query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False
+ query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False
) -> Union[str, list]:
"""从数据库中获取相关信息
diff --git a/src/do_tool/tool_can_use/get_memory.py b/src/do_tool/tool_can_use/get_memory.py
index 6a3c1c391..b38423ed0 100644
--- a/src/do_tool/tool_can_use/get_memory.py
+++ b/src/do_tool/tool_can_use/get_memory.py
@@ -9,18 +9,18 @@ logger = get_module_logger("mid_chat_mem_tool")
class GetMemoryTool(BaseTool):
"""从记忆系统中获取相关记忆的工具"""
- name = "mid_chat_mem"
+ name = "get_memory"
description = "从记忆系统中获取相关记忆"
parameters = {
"type": "object",
"properties": {
- "text": {"type": "string", "description": "要查询的相关文本"},
+ "topic": {"type": "string", "description": "要查询的相关主题,用逗号隔开"},
"max_memory_num": {"type": "integer", "description": "最大返回记忆数量"},
},
- "required": ["text"],
+ "required": ["topic"],
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
"""执行记忆获取
Args:
@@ -31,12 +31,15 @@ class GetMemoryTool(BaseTool):
Dict: 工具执行结果
"""
try:
- text = function_args.get("text", message_txt)
+ topic = function_args.get("topic")
max_memory_num = function_args.get("max_memory_num", 2)
+ # 将主题字符串转换为列表
+ topic_list = topic.split(",")
+
# 调用记忆系统
- related_memory = await HippocampusManager.get_instance().get_memory_from_text(
- text=text, max_memory_num=max_memory_num, max_memory_length=2, max_depth=3, fast_retrieval=False
+ related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
+ valid_keywords=topic_list, max_memory_num=max_memory_num, max_memory_length=2, max_depth=3
)
memory_info = ""
@@ -45,14 +48,16 @@ class GetMemoryTool(BaseTool):
memory_info += memory[1] + "\n"
if memory_info:
- content = f"你记得这些事情: {memory_info}"
- else:
- content = f"你不太记得有关{text}的记忆,你对此不太了解"
+ content = f"你记得这些事情: {memory_info}\n"
+ content += "以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
- return {"name": "mid_chat_mem", "content": content}
+ else:
+ content = f"{topic}的记忆,你记不太清"
+
+ return {"name": "get_memory", "content": content}
except Exception as e:
logger.error(f"记忆获取工具执行失败: {str(e)}")
- return {"name": "mid_chat_mem", "content": f"记忆获取失败: {str(e)}"}
+ return {"name": "get_memory", "content": f"记忆获取失败: {str(e)}"}
# 注册工具
diff --git a/src/do_tool/tool_can_use/get_time_date.py b/src/do_tool/tool_can_use/get_time_date.py
index c3c9c8376..6104026ef 100644
--- a/src/do_tool/tool_can_use/get_time_date.py
+++ b/src/do_tool/tool_can_use/get_time_date.py
@@ -1,9 +1,9 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
-from src.common.logger import get_module_logger
+from src.common.logger_manager import get_logger
from typing import Dict, Any
from datetime import datetime
-logger = get_module_logger("get_time_date")
+logger = get_logger("get_time_date")
class GetCurrentDateTimeTool(BaseTool):
@@ -17,7 +17,7 @@ class GetCurrentDateTimeTool(BaseTool):
"required": [],
}
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
"""执行获取当前时间、日期、年份和星期
Args:
diff --git a/src/do_tool/tool_can_use/lpmm_get_knowledge.py b/src/do_tool/tool_can_use/lpmm_get_knowledge.py
new file mode 100644
index 000000000..4dba1bc71
--- /dev/null
+++ b/src/do_tool/tool_can_use/lpmm_get_knowledge.py
@@ -0,0 +1,139 @@
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.plugins.chat.utils import get_embedding
+
+# from src.common.database import db
+from src.common.logger_manager import get_logger
+from typing import Dict, Any
+from src.plugins.knowledge.knowledge_lib import qa_manager
+
+
+logger = get_logger("lpmm_get_knowledge_tool")
+
+
+class SearchKnowledgeFromLPMMTool(BaseTool):
+ """从LPMM知识库中搜索相关信息的工具"""
+
+ name = "lpmm_search_knowledge"
+ description = "从知识库中搜索相关信息,如果你需要知识,就使用这个工具"
+ parameters = {
+ "type": "object",
+ "properties": {
+ "query": {"type": "string", "description": "搜索查询关键词"},
+ "threshold": {"type": "number", "description": "相似度阈值,0.0到1.0之间"},
+ },
+ "required": ["query"],
+ }
+
+ async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
+ """执行知识库搜索
+
+ Args:
+ function_args: 工具参数
+ message_txt: 原始消息文本
+
+ Returns:
+ Dict: 工具执行结果
+ """
+ try:
+ query = function_args.get("query")
+ # threshold = function_args.get("threshold", 0.4)
+
+ # 调用知识库搜索
+ embedding = await get_embedding(query, request_type="info_retrieval")
+ if embedding:
+ knowledge_info = qa_manager.get_knowledge(query)
+ logger.debug(f"知识库查询结果: {knowledge_info}")
+ if knowledge_info:
+ content = f"你知道这些知识: {knowledge_info}"
+ else:
+ content = f"你不太了解有关{query}的知识"
+ return {"name": "search_knowledge", "content": content}
+ return {"name": "search_knowledge", "content": f"无法获取关于'{query}'的嵌入向量"}
+ except Exception as e:
+ logger.error(f"知识库搜索工具执行失败: {str(e)}")
+ return {"name": "search_knowledge", "content": f"知识库搜索失败: {str(e)}"}
+
+ # def get_info_from_db(
+ # self, query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False
+ # ) -> Union[str, list]:
+ # """从数据库中获取相关信息
+
+ # Args:
+ # query_embedding: 查询的嵌入向量
+ # limit: 最大返回结果数
+ # threshold: 相似度阈值
+ # return_raw: 是否返回原始结果
+
+ # Returns:
+ # Union[str, list]: 格式化的信息字符串或原始结果列表
+ # """
+ # if not query_embedding:
+ # return "" if not return_raw else []
+
+ # # 使用余弦相似度计算
+ # pipeline = [
+ # {
+ # "$addFields": {
+ # "dotProduct": {
+ # "$reduce": {
+ # "input": {"$range": [0, {"$size": "$embedding"}]},
+ # "initialValue": 0,
+ # "in": {
+ # "$add": [
+ # "$$value",
+ # {
+ # "$multiply": [
+ # {"$arrayElemAt": ["$embedding", "$$this"]},
+ # {"$arrayElemAt": [query_embedding, "$$this"]},
+ # ]
+ # },
+ # ]
+ # },
+ # }
+ # },
+ # "magnitude1": {
+ # "$sqrt": {
+ # "$reduce": {
+ # "input": "$embedding",
+ # "initialValue": 0,
+ # "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
+ # }
+ # }
+ # },
+ # "magnitude2": {
+ # "$sqrt": {
+ # "$reduce": {
+ # "input": query_embedding,
+ # "initialValue": 0,
+ # "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
+ # }
+ # }
+ # },
+ # }
+ # },
+ # {"$addFields": {"similarity": {"$divide": ["$dotProduct", {"$multiply": ["$magnitude1", "$magnitude2"]}]}}},
+ # {
+ # "$match": {
+ # "similarity": {"$gte": threshold} # 只保留相似度大于等于阈值的结果
+ # }
+ # },
+ # {"$sort": {"similarity": -1}},
+ # {"$limit": limit},
+ # {"$project": {"content": 1, "similarity": 1}},
+ # ]
+
+ # results = list(db.knowledges.aggregate(pipeline))
+ # logger.debug(f"知识库查询结果数量: {len(results)}")
+
+ # if not results:
+ # return "" if not return_raw else []
+
+ # if return_raw:
+ # return results
+ # else:
+ # # 返回所有找到的内容,用换行分隔
+ # return "\n".join(str(result["content"]) for result in results)
+
+
+# 注册工具
+# register_tool(SearchKnowledgeTool)
diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py
index b14927be8..88289fe01 100644
--- a/src/do_tool/tool_use.py
+++ b/src/do_tool/tool_use.py
@@ -1,66 +1,60 @@
-from src.plugins.models.utils_model import LLM_request
-from src.plugins.config.config import global_config
-from src.plugins.chat.chat_stream import ChatStream
-from src.common.database import db
-import time
+from src.plugins.models.utils_model import LLMRequest
+from src.config.config import global_config
import json
-from src.common.logger import get_module_logger, TOOL_USE_STYLE_CONFIG, LogConfig
+from src.common.logger_manager import get_logger
from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance
-from src.heart_flow.sub_heartflow import SubHeartflow
+import traceback
+from src.plugins.person_info.relationship_manager import relationship_manager
+from src.plugins.chat.utils import parse_text_timestamps
+from src.plugins.chat.chat_stream import ChatStream
+from src.heart_flow.observation import ChattingObservation
-tool_use_config = LogConfig(
- # 使用消息发送专用样式
- console_format=TOOL_USE_STYLE_CONFIG["console_format"],
- file_format=TOOL_USE_STYLE_CONFIG["file_format"],
-)
-logger = get_module_logger("tool_use", config=tool_use_config)
+logger = get_logger("tool_use")
class ToolUser:
def __init__(self):
- self.llm_model_tool = LLM_request(
+ self.llm_model_tool = LLMRequest(
model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
)
+ @staticmethod
async def _build_tool_prompt(
- self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
+ message_txt: str, chat_stream: ChatStream = None, observation: ChattingObservation = None
):
"""构建工具使用的提示词
Args:
message_txt: 用户消息文本
- sender_name: 发送者名称
- chat_stream: 聊天流对象
+ subheartflow: 子心流对象
Returns:
str: 构建好的提示词
"""
- if subheartflow:
- mid_memory_info = subheartflow.observations[0].mid_memory_info
- # print(f"intol111111111111111111111111111111111222222222222mid_memory_info:{mid_memory_info}")
- else:
- mid_memory_info = ""
- new_messages = list(
- db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15)
- )
- new_messages_str = ""
- for msg in new_messages:
- if "detailed_plain_text" in msg:
- new_messages_str += f"{msg['detailed_plain_text']}"
+ if observation:
+ mid_memory_info = observation.mid_memory_info
+ # print(f"intol111111111111111111111111111111111222222222222mid_memory_info:{mid_memory_info}")
# 这些信息应该从调用者传入,而不是从self获取
bot_name = global_config.BOT_NICKNAME
prompt = ""
prompt += mid_memory_info
prompt += "你正在思考如何回复群里的消息。\n"
- prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
- prompt += f"注意你就是{bot_name},{bot_name}指的就是你。"
+ prompt += "之前群里进行了如下讨论:\n"
+ prompt += message_txt
+ # prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
+ prompt += f"注意你就是{bot_name},{bot_name}是你的名字。根据之前的聊天记录补充问题信息,搜索时避开你的名字。\n"
+ # prompt += "必须调用 'lpmm_get_knowledge' 工具来获取知识。\n"
+ prompt += "你现在需要对群里的聊天内容进行回复,请你思考应该使用什么工具,然后选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
- prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
return prompt
- def _define_tools(self):
+ @staticmethod
+ def _define_tools():
"""获取所有已注册工具的定义
Returns:
@@ -68,7 +62,8 @@ class ToolUser:
"""
return get_all_tool_definitions()
- async def _execute_tool_call(self, tool_call, message_txt: str):
+ @staticmethod
+ async def _execute_tool_call(tool_call):
"""执行特定的工具调用
Args:
@@ -89,7 +84,7 @@ class ToolUser:
return None
# 执行工具
- result = await tool_instance.execute(function_args, message_txt)
+ result = await tool_instance.execute(function_args)
if result:
# 直接使用 function_name 作为 tool_type
tool_type = function_name
@@ -106,22 +101,25 @@ class ToolUser:
logger.error(f"执行工具调用时发生错误: {str(e)}")
return None
- async def use_tool(
- self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
- ):
+ async def use_tool(self, message_txt: str, chat_stream: ChatStream = None, observation: ChattingObservation = None):
"""使用工具辅助思考,判断是否需要额外信息
Args:
message_txt: 用户消息文本
sender_name: 发送者名称
chat_stream: 聊天流对象
+ observation: 观察对象(可选)
Returns:
dict: 工具使用结果,包含结构化的信息
"""
try:
# 构建提示词
- prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, subheartflow)
+ prompt = await self._build_tool_prompt(
+ message_txt=message_txt,
+ chat_stream=chat_stream,
+ observation=observation,
+ )
# 定义可用工具
tools = self._define_tools()
@@ -131,7 +129,6 @@ class ToolUser:
payload = {
"model": self.llm_model_tool.model_name,
"messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
"tools": tools,
"temperature": 0.2,
}
@@ -156,13 +153,15 @@ class ToolUser:
tool_calls_str = ""
for tool_call in tool_calls:
tool_calls_str += f"{tool_call['function']['name']}\n"
- logger.info(f"根据:\n{prompt}\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}")
+ logger.info(
+ f"根据:\n{prompt}\n\n内容:{content}\n\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}"
+ )
tool_results = []
structured_info = {} # 动态生成键
# 执行所有工具调用
for tool_call in tool_calls:
- result = await self._execute_tool_call(tool_call, message_txt)
+ result = await self._execute_tool_call(tool_call)
if result:
tool_results.append(result)
# 使用工具名称作为键
@@ -173,7 +172,7 @@ class ToolUser:
# 如果有工具结果,返回结构化的信息
if structured_info:
- logger.info(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}")
+ logger.debug(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}")
return {"used_tools": True, "structured_info": structured_info}
else:
# 没有工具调用
@@ -187,6 +186,7 @@ class ToolUser:
except Exception as e:
logger.error(f"工具调用过程中出错: {str(e)}")
+ logger.error(f"工具调用过程中出错: {traceback.format_exc()}")
return {
"used_tools": False,
"error": str(e),
diff --git a/src/gui/logger_gui.py b/src/gui/logger_gui.py
deleted file mode 100644
index ad6edafb8..000000000
--- a/src/gui/logger_gui.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# import customtkinter as ctk
-# import subprocess
-# import threading
-# import queue
-# import re
-# import os
-# import signal
-# from collections import deque
-# import sys
-
-# # 设置应用的外观模式和默认颜色主题
-# ctk.set_appearance_mode("dark")
-# ctk.set_default_color_theme("blue")
-
-
-# class LogViewerApp(ctk.CTk):
-# """日志查看器应用的主类,继承自customtkinter的CTk类"""
-
-# def __init__(self):
-# """初始化日志查看器应用的界面和状态"""
-# super().__init__()
-# self.title("日志查看器")
-# self.geometry("1200x800")
-
-# # 标记GUI是否运行中
-# self.is_running = True
-
-# # 程序关闭时的清理操作
-# self.protocol("WM_DELETE_WINDOW", self._on_closing)
-
-# # 初始化进程、日志队列、日志数据等变量
-# self.process = None
-# self.log_queue = queue.Queue()
-# self.log_data = deque(maxlen=10000) # 使用固定长度队列
-# self.available_levels = set()
-# self.available_modules = set()
-# self.sorted_modules = []
-# self.module_checkboxes = {} # 存储模块复选框的字典
-
-# # 日志颜色配置
-# self.color_config = {
-# "time": "#888888",
-# "DEBUG": "#2196F3",
-# "INFO": "#4CAF50",
-# "WARNING": "#FF9800",
-# "ERROR": "#F44336",
-# "module": "#D4D0AB",
-# "default": "#FFFFFF",
-# }
-
-# # 列可见性配置
-# self.column_visibility = {"show_time": True, "show_level": True, "show_module": True}
-
-# # 选中的日志等级和模块
-# self.selected_levels = set()
-# self.selected_modules = set()
-
-# # 创建界面组件并启动日志队列处理
-# self.create_widgets()
-# self.after(100, self.process_log_queue)
-
-# def create_widgets(self):
-# """创建应用界面的各个组件"""
-# self.grid_columnconfigure(0, weight=1)
-# self.grid_rowconfigure(1, weight=1)
-
-# # 控制面板
-# control_frame = ctk.CTkFrame(self)
-# control_frame.grid(row=0, column=0, sticky="ew", padx=10, pady=5)
-
-# self.start_btn = ctk.CTkButton(control_frame, text="启动", command=self.start_process)
-# self.start_btn.pack(side="left", padx=5)
-
-# self.stop_btn = ctk.CTkButton(control_frame, text="停止", command=self.stop_process, state="disabled")
-# self.stop_btn.pack(side="left", padx=5)
-
-# self.clear_btn = ctk.CTkButton(control_frame, text="清屏", command=self.clear_logs)
-# self.clear_btn.pack(side="left", padx=5)
-
-# column_filter_frame = ctk.CTkFrame(control_frame)
-# column_filter_frame.pack(side="left", padx=20)
-
-# self.time_check = ctk.CTkCheckBox(column_filter_frame, text="显示时间", command=self.refresh_logs)
-# self.time_check.pack(side="left", padx=5)
-# self.time_check.select()
-
-# self.level_check = ctk.CTkCheckBox(column_filter_frame, text="显示等级", command=self.refresh_logs)
-# self.level_check.pack(side="left", padx=5)
-# self.level_check.select()
-
-# self.module_check = ctk.CTkCheckBox(column_filter_frame, text="显示模块", command=self.refresh_logs)
-# self.module_check.pack(side="left", padx=5)
-# self.module_check.select()
-
-# # 筛选面板
-# filter_frame = ctk.CTkFrame(self)
-# filter_frame.grid(row=0, column=1, rowspan=2, sticky="ns", padx=5)
-
-# ctk.CTkLabel(filter_frame, text="日志等级筛选").pack(pady=5)
-# self.level_scroll = ctk.CTkScrollableFrame(filter_frame, width=150, height=200)
-# self.level_scroll.pack(fill="both", expand=True, padx=5)
-
-# ctk.CTkLabel(filter_frame, text="模块筛选").pack(pady=5)
-# self.module_filter_entry = ctk.CTkEntry(filter_frame, placeholder_text="输入模块过滤词")
-# self.module_filter_entry.pack(pady=5)
-# self.module_filter_entry.bind("", self.update_module_filter)
-
-# self.module_scroll = ctk.CTkScrollableFrame(filter_frame, width=300, height=200)
-# self.module_scroll.pack(fill="both", expand=True, padx=5)
-
-# self.log_text = ctk.CTkTextbox(self, wrap="word")
-# self.log_text.grid(row=1, column=0, sticky="nsew", padx=10, pady=5)
-
-# self.init_text_tags()
-
-# def update_module_filter(self, event):
-# """根据模块过滤词更新模块复选框的显示"""
-# filter_text = self.module_filter_entry.get().strip().lower()
-# for module, checkbox in self.module_checkboxes.items():
-# if filter_text in module.lower():
-# checkbox.pack(anchor="w", padx=5, pady=2)
-# else:
-# checkbox.pack_forget()
-
-# def update_filters(self, level, module):
-# """更新日志等级和模块的筛选器"""
-# if level not in self.available_levels:
-# self.available_levels.add(level)
-# self.add_checkbox(self.level_scroll, level, "level")
-
-# module_key = self.get_module_key(module)
-# if module_key not in self.available_modules:
-# self.available_modules.add(module_key)
-# self.sorted_modules = sorted(self.available_modules, key=lambda x: x.lower())
-# self.rebuild_module_checkboxes()
-
-# def rebuild_module_checkboxes(self):
-# """重新构建模块复选框"""
-# # 清空现有复选框
-# for widget in self.module_scroll.winfo_children():
-# widget.destroy()
-# self.module_checkboxes.clear()
-
-# # 重建排序后的复选框
-# for module in self.sorted_modules:
-# self.add_checkbox(self.module_scroll, module, "module")
-
-# def add_checkbox(self, parent, text, type_):
-# """在指定父组件中添加复选框"""
-
-# def update_filter():
-# current = cb.get()
-# if type_ == "level":
-# (self.selected_levels.add if current else self.selected_levels.discard)(text)
-# else:
-# (self.selected_modules.add if current else self.selected_modules.discard)(text)
-# self.refresh_logs()
-
-# cb = ctk.CTkCheckBox(parent, text=text, command=update_filter)
-# cb.select() # 初始选中
-
-# # 手动同步初始状态到集合(关键修复)
-# if type_ == "level":
-# self.selected_levels.add(text)
-# else:
-# self.selected_modules.add(text)
-
-# if type_ == "module":
-# self.module_checkboxes[text] = cb
-# cb.pack(anchor="w", padx=5, pady=2)
-# return cb
-
-# def check_filter(self, entry):
-# """检查日志条目是否符合当前筛选条件"""
-# level_ok = not self.selected_levels or entry["level"] in self.selected_levels
-# module_key = self.get_module_key(entry["module"])
-# module_ok = not self.selected_modules or module_key in self.selected_modules
-# return level_ok and module_ok
-
-# def init_text_tags(self):
-# """初始化日志文本的颜色标签"""
-# for tag, color in self.color_config.items():
-# self.log_text.tag_config(tag, foreground=color)
-# self.log_text.tag_config("default", foreground=self.color_config["default"])
-
-# def start_process(self):
-# """启动日志进程并开始读取输出"""
-# self.process = subprocess.Popen(
-# ["nb", "run"],
-# stdout=subprocess.PIPE,
-# stderr=subprocess.STDOUT,
-# text=True,
-# bufsize=1,
-# encoding="utf-8",
-# errors="ignore",
-# )
-# self.start_btn.configure(state="disabled")
-# self.stop_btn.configure(state="normal")
-# threading.Thread(target=self.read_output, daemon=True).start()
-
-# def stop_process(self):
-# """停止日志进程并清理相关资源"""
-# if self.process:
-# try:
-# if hasattr(self.process, "pid"):
-# if os.name == "nt":
-# subprocess.run(
-# ["taskkill", "/F", "/T", "/PID", str(self.process.pid)], check=True, capture_output=True
-# )
-# else:
-# os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
-# except (subprocess.CalledProcessError, ProcessLookupError, OSError) as e:
-# print(f"终止进程失败: {e}")
-# finally:
-# self.process = None
-# self.log_queue.queue.clear()
-# self.start_btn.configure(state="normal")
-# self.stop_btn.configure(state="disabled")
-# self.refresh_logs()
-
-# def read_output(self):
-# """读取日志进程的输出并放入队列"""
-# try:
-# while self.process and self.process.poll() is None and self.is_running:
-# line = self.process.stdout.readline()
-# if line:
-# self.log_queue.put(line)
-# else:
-# break # 避免空循环
-# self.process.stdout.close() # 确保关闭文件描述符
-# except ValueError: # 处理可能的I/O操作异常
-# pass
-
-# def process_log_queue(self):
-# """处理日志队列中的日志条目"""
-# while not self.log_queue.empty():
-# line = self.log_queue.get()
-# self.process_log_line(line)
-
-# # 仅在GUI仍在运行时继续处理队列
-# if self.is_running:
-# self.after(100, self.process_log_queue)
-
-# def process_log_line(self, line):
-# """解析单行日志并更新日志数据和筛选器"""
-# match = re.match(
-# r"""^
-# (?:(?P