diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml
index 931624fb1..9c8cba5dc 100644
--- a/.github/workflows/ruff.yml
+++ b/.github/workflows/ruff.yml
@@ -9,6 +9,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+ ref: ${{ github.head_ref || github.ref_name }}
- uses: astral-sh/ruff-action@v3
- run: ruff check --fix
- run: ruff format
diff --git a/.gitignore b/.gitignore
index c2fb389ec..3e9b98685 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,8 @@ message_queue_window.bat
message_queue_window.txt
queue_update.txt
memory_graph.gml
+/src/do_tool/tool_can_use/auto_create_tool.py
+/src/do_tool/tool_can_use/execute_python_code_tool.py
.env
.env.*
.cursor
@@ -28,6 +30,9 @@ config/bot_config.toml
config/bot_config.toml.bak
src/plugins/remote/client_uuid.json
run_none.bat
+(测试版)麦麦生成人格.bat
+(临时版)麦麦开始学习.bat
+src/plugins/utils/statistic.py
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
@@ -237,3 +242,4 @@ logs
/config/*
run_none.bat
config/old/bot_config_20250405_212257.toml
+
diff --git a/README.md b/README.md
index f2ab0b75d..325e3ad22 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@
-## 新版0.6.0部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
+## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
## 📝 项目简介
@@ -62,7 +62,7 @@
### 📢 版本信息
-**最新版本: v0.6.0** ([查看更新日志](changelogs/changelog.md))
+**最新版本: v0.6.2** ([查看更新日志](changelogs/changelog.md))
> [!WARNING]
> 请阅读教程后更新!!!!!!!
> 请阅读教程后更新!!!!!!!
@@ -86,7 +86,7 @@
### ⚠️ 重要提示
-- 升级到v0.6.0版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
+- 升级到v0.6.x版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
- 本版本基于MaiCore重构,通过nonebot插件与QQ平台交互
- 项目处于活跃开发阶段,功能和API可能随时调整
@@ -108,28 +108,29 @@
- [📚 核心Wiki文档](https://docs.mai-mai.org) - 项目最全面的文档中心,你可以了解麦麦有关的一切
### 最新版本部署教程(MaiCore版本)
-- [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy.html) - 基于MaiCore的新版本部署方式(与旧版本不兼容)
+- [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html) - 基于MaiCore的新版本部署方式(与旧版本不兼容)
## 🎯 功能介绍
| 模块 | 主要功能 | 特点 |
|------|---------|------|
-| 💬 聊天系统 | • 思维流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
-| 🧠 思维流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动 | 智能化决策 |
-| 🧠 记忆系统 2.0 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
-| 😊 表情包系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
+| 💬 聊天系统 | • 心流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
+| 🧠 心流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动
• 工具调用能力 | 智能化决策 |
+| 🧠 记忆系统 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
+| 😊 表情系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
| 📅 日程系统 | • 动态日程生成
• 自定义想象力
• 思维流联动 | 智能规划 |
-| 👥 关系系统 2.0 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
+| 👥 关系系统 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
| 📊 统计系统 | • 使用数据统计
• LLM调用记录
• 实时控制台显示 | 数据可视 |
| 🔧 系统功能 | • 优雅关闭机制
• 自动数据保存
• 异常处理完善 | 稳定可靠 |
+| 🛠️ 工具系统 | • 知识获取工具
• 自动注册机制
• 多工具支持 | 扩展功能 |
## 📐 项目架构
```mermaid
graph TD
A[MaiCore] --> B[对话系统]
- A --> C[思维流系统]
+ A --> C[心流系统]
A --> D[记忆系统]
A --> E[情感系统]
B --> F[多模型支持]
diff --git a/changelogs/changelog.md b/changelogs/changelog.md
index 6b9898b5c..0ddb486bf 100644
--- a/changelogs/changelog.md
+++ b/changelogs/changelog.md
@@ -1,5 +1,58 @@
# Changelog
+## [0.6.2] - 2025-4-14
+
+### 摘要
+- MaiBot 0.6.2 版本发布!
+- 优化了心流的观察系统,优化提示词和表现,现在心流表现更好!
+- 新增工具调用能力,可以更好地获取信息
+- 本次更新主要围绕工具系统、心流系统、消息处理和代码优化展开,新增多个工具类,优化了心流系统的逻辑,改进了消息处理流程,并修复了多个问题。
+
+### 🌟 核心功能增强
+#### 工具系统
+- 新增了知识获取工具系统,支持通过心流调用获取多种知识
+- 新增了工具系统使用指南,详细说明工具结构、自动注册机制和添加步骤
+- 新增了多个实用工具类,包括心情调整工具`ChangeMoodTool`、关系查询工具`RelationshipTool`、数值比较工具`CompareNumbersTool`、日程获取工具`GetCurrentTaskTool`、上下文压缩工具`CompressContextTool`和知识获取工具`GetKnowledgeTool`
+- 更新了`ToolUser`类,支持自动获取已注册工具定义并调用`execute`方法
+- 需要配置支持工具调用的模型才能使用完整功能
+
+#### 心流系统
+- 新增了上下文压缩缓存功能,可以有更持久的记忆
+- 新增了心流系统的README.md文件,详细介绍了系统架构、主要功能和工作流程。
+- 优化了心流系统的逻辑,包括子心流自动清理和合理配置更新间隔。
+- 改进了心流观察系统,优化了提示词设计和系统表现,使心流运行更加稳定高效。
+- 更新了`Heartflow`类的方法和属性,支持异步生成提示词并提升生成质量。
+
+#### 消息处理
+- 改进了消息处理流程,包括回复检查、消息生成和发送逻辑。
+- 新增了`ReplyGenerator`类,用于根据观察信息和对话信息生成回复。
+- 优化了消息队列管理系统,支持按时间顺序处理消息。
+
+#### 现在可以启用更好的表情包发送系统
+
+### 💻 系统架构优化
+
+#### 部署支持
+- 更新了Docker部署文档,优化了服务配置和挂载路径。
+- 完善了Linux和Windows脚本支持。
+
+### 🐛 问题修复
+- 修复了消息处理器中的正则表达式匹配问题。
+- 修复了图像处理中的帧大小和拼接问题。
+- 修复了私聊时产生`reply`消息的bug。
+- 修复了配置文件加载时的版本兼容性问题。
+
+### 📚 文档更新
+- 更新了`README.md`文件,包括Python版本要求和协议信息。
+- 新增了工具系统和心流系统的详细文档。
+- 优化了部署相关文档的完整性。
+
+### 🔧 其他改进
+- 新增了崩溃日志记录器,记录崩溃信息到日志文件。
+- 优化了统计信息输出,在控制台显示详细统计信息。
+- 改进了异常处理机制,提升系统稳定性。
+- 现可配置部分模型的temp参数
+
## [0.6.0] - 2025-4-4
### 摘要
diff --git a/changelogs/changelog_config.md b/changelogs/changelog_config.md
index 32912f691..e438ea31e 100644
--- a/changelogs/changelog_config.md
+++ b/changelogs/changelog_config.md
@@ -22,7 +22,7 @@
## [0.0.11] - 2025-3-12
### Added
- 新增了 `schedule` 配置项,用于配置日程表生成功能
-- 新增了 `response_spliter` 配置项,用于控制回复分割
+- 新增了 `response_splitter` 配置项,用于控制回复分割
- 新增了 `experimental` 配置项,用于实验性功能开关
- 新增了 `llm_observation` 和 `llm_sub_heartflow` 模型配置
- 新增了 `llm_heartflow` 模型配置
diff --git a/requirements.txt b/requirements.txt
index 0fcb31f83..45fb7e6e5 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/scripts/run.sh b/scripts/run.sh
index 342a23feb..b7ecbc849 100644
--- a/scripts/run.sh
+++ b/scripts/run.sh
@@ -1,10 +1,10 @@
#!/bin/bash
-# MaiCore & Nonebot adapter一键安装脚本 by Cookie_987
+# MaiCore & NapCat Adapter一键安装脚本 by Cookie_987
# 适用于Arch/Ubuntu 24.10/Debian 12/CentOS 9
# 请小心使用任何一键脚本!
-INSTALLER_VERSION="0.0.2-refactor"
+INSTALLER_VERSION="0.0.3-refactor"
LANG=C.UTF-8
# 如无法访问GitHub请修改此处镜像地址
@@ -31,7 +31,7 @@ DEFAULT_INSTALL_DIR="/opt/maicore"
# 服务名称
SERVICE_NAME="maicore"
SERVICE_NAME_WEB="maicore-web"
-SERVICE_NAME_NBADAPTER="maicore-nonebot-adapter"
+SERVICE_NAME_NBADAPTER="maibot-napcat-adapter"
IS_INSTALL_MONGODB=false
IS_INSTALL_NAPCAT=false
@@ -59,9 +59,9 @@ show_menu() {
"1" "启动MaiCore" \
"2" "停止MaiCore" \
"3" "重启MaiCore" \
- "4" "启动Nonebot adapter" \
- "5" "停止Nonebot adapter" \
- "6" "重启Nonebot adapter" \
+ "4" "启动NapCat Adapter" \
+ "5" "停止NapCat Adapter" \
+ "6" "重启NapCat Adapter" \
"7" "拉取最新MaiCore仓库" \
"8" "切换分支" \
"9" "退出" 3>&1 1>&2 2>&3)
@@ -83,15 +83,15 @@ show_menu() {
;;
4)
systemctl start ${SERVICE_NAME_NBADAPTER}
- whiptail --msgbox "✅Nonebot adapter已启动" 10 60
+ whiptail --msgbox "✅NapCat Adapter已启动" 10 60
;;
5)
systemctl stop ${SERVICE_NAME_NBADAPTER}
- whiptail --msgbox "🛑Nonebot adapter已停止" 10 60
+ whiptail --msgbox "🛑NapCat Adapter已停止" 10 60
;;
6)
systemctl restart ${SERVICE_NAME_NBADAPTER}
- whiptail --msgbox "🔄Nonebot adapter已重启" 10 60
+ whiptail --msgbox "🔄NapCat Adapter已重启" 10 60
;;
7)
update_dependencies
@@ -357,8 +357,8 @@ run_installation() {
# Python版本检查
check_python() {
PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
- if ! python3 -c "import sys; exit(0) if sys.version_info >= (3,9) else exit(1)"; then
- whiptail --title "⚠️ [4/6] Python 版本过低" --msgbox "检测到 Python 版本为 $PYTHON_VERSION,需要 3.9 或以上!\n请升级 Python 后重新运行本脚本。" 10 60
+ if ! python3 -c "import sys; exit(0) if sys.version_info >= (3,10) else exit(1)"; then
+ whiptail --title "⚠️ [4/6] Python 版本过低" --msgbox "检测到 Python 版本为 $PYTHON_VERSION,需要 3.10 或以上!\n请升级 Python 后重新运行本脚本。" 10 60
exit 1
fi
}
@@ -410,7 +410,7 @@ run_installation() {
# 确认安装
confirm_install() {
local confirm_msg="请确认以下更改:\n\n"
- confirm_msg+="📂 安装MaiCore、Nonebot Adapter到: $INSTALL_DIR\n"
+ confirm_msg+="📂 安装MaiCore、NapCat Adapter到: $INSTALL_DIR\n"
confirm_msg+="🔀 分支: $BRANCH\n"
[[ $IS_INSTALL_DEPENDENCIES == true ]] && confirm_msg+="📦 安装依赖:${missing_packages[@]}\n"
[[ $IS_INSTALL_MONGODB == true || $IS_INSTALL_NAPCAT == true ]] && confirm_msg+="📦 安装额外组件:\n"
@@ -499,50 +499,28 @@ EOF
}
echo -e "${GREEN}克隆 nonebot-plugin-maibot-adapters 仓库...${RESET}"
- git clone $GITHUB_REPO/MaiM-with-u/nonebot-plugin-maibot-adapters.git || {
- echo -e "${RED}克隆 nonebot-plugin-maibot-adapters 仓库失败!${RESET}"
+ git clone $GITHUB_REPO/MaiM-with-u/MaiBot-Napcat-Adapter.git || {
+ echo -e "${RED}克隆 MaiBot-Napcat-Adapter.git 仓库失败!${RESET}"
exit 1
}
echo -e "${GREEN}安装Python依赖...${RESET}"
pip install -r MaiBot/requirements.txt
- pip install nb-cli
- pip install nonebot-adapter-onebot
- pip install 'nonebot2[fastapi]'
+ cd MaiBot
+ pip install uv
+ uv pip install -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt
+ cd ..
echo -e "${GREEN}安装maim_message依赖...${RESET}"
cd maim_message
- pip install -e .
+ uv pip install -i https://mirrors.aliyun.com/pypi/simple -e .
cd ..
- echo -e "${GREEN}部署Nonebot adapter...${RESET}"
- cd MaiBot
- mkdir nonebot-maibot-adapter
- cd nonebot-maibot-adapter
- cat > pyproject.toml <=3.9, <4.0"
-
-[tool.nonebot]
-adapters = [
- { name = "OneBot V11", module_name = "nonebot.adapters.onebot.v11" }
-]
-plugins = []
-plugin_dirs = ["src/plugins"]
-builtin_plugins = []
-EOF
-
- echo "Manually created by run.sh" > README.md
- mkdir src
- cp -r ../../nonebot-plugin-maibot-adapters/nonebot_plugin_maibot_adapters src/plugins/nonebot_plugin_maibot_adapters
+ echo -e "${GREEN}部署MaiBot Napcat Adapter...${RESET}"
+ cd MaiBot-Napcat-Adapter
+ uv pip install -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt
cd ..
- cd ..
-
echo -e "${GREEN}同意协议...${RESET}"
@@ -590,13 +568,13 @@ EOF
cat > /etc/systemd/system/${SERVICE_NAME_NBADAPTER}.service < /etc/maicore_install.conf
diff --git a/src/common/logger.py b/src/common/logger.py
index 0a8839d2f..7365e34a6 100644
--- a/src/common/logger.py
+++ b/src/common/logger.py
@@ -102,10 +102,28 @@ MOOD_STYLE_CONFIG = {
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"),
},
"simple": {
- "console_format": ("{time:MM-DD HH:mm} | 心情 | {message}"),
+ "console_format": ("{time:MM-DD HH:mm} | 心情 | {message}"),
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"),
},
}
+# tool use
+TOOL_USE_STYLE_CONFIG = {
+ "advanced": {
+ "console_format": (
+ "{time:YYYY-MM-DD HH:mm:ss} | "
+ "{level: <8} | "
+ "{extra[module]: <12} | "
+ "工具使用 | "
+ "{message}"
+ ),
+ "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}"),
+ },
+ "simple": {
+ "console_format": ("{time:MM-DD HH:mm} | 工具使用 | {message}"),
+ "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}"),
+ },
+}
+
# relationship
RELATION_STYLE_CONFIG = {
@@ -308,6 +326,7 @@ SUB_HEARTFLOW_STYLE_CONFIG = (
) # noqa: E501
WILLING_STYLE_CONFIG = WILLING_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else WILLING_STYLE_CONFIG["advanced"]
CONFIG_STYLE_CONFIG = CONFIG_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CONFIG_STYLE_CONFIG["advanced"]
+TOOL_USE_STYLE_CONFIG = TOOL_USE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else TOOL_USE_STYLE_CONFIG["advanced"]
def is_registered_module(record: dict) -> bool:
diff --git a/src/do_tool/tool_can_use/change_mood.py b/src/do_tool/tool_can_use/change_mood.py
new file mode 100644
index 000000000..53410068f
--- /dev/null
+++ b/src/do_tool/tool_can_use/change_mood.py
@@ -0,0 +1,57 @@
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.plugins.config.config import global_config
+from src.common.logger import get_module_logger
+from src.plugins.moods.moods import MoodManager
+from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
+
+from typing import Dict, Any
+
+logger = get_module_logger("change_mood_tool")
+
+
+class ChangeMoodTool(BaseTool):
+ """改变心情的工具"""
+
+ name = "change_mood"
+ description = "根据收到的内容和自身回复的内容,改变心情,当你回复了别人的消息,你可以使用这个工具"
+ parameters = {
+ "type": "object",
+ "properties": {
+ "text": {"type": "string", "description": "引起你改变心情的文本"},
+ "response_set": {"type": "list", "description": "你对文本的回复"},
+ },
+ "required": ["text", "response_set"],
+ }
+
+ async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
+ """执行心情改变
+
+ Args:
+ function_args: 工具参数
+ message_processed_plain_text: 原始消息文本
+ response_set: 原始消息文本
+
+ Returns:
+ Dict: 工具执行结果
+ """
+ try:
+ response_set = function_args.get("response_set")
+ message_processed_plain_text = function_args.get("text")
+
+ mood_manager = MoodManager.get_instance()
+ gpt = ResponseGenerator()
+
+ if response_set is None:
+ response_set = ["你还没有回复"]
+
+ ori_response = ",".join(response_set)
+ _stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text)
+ mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
+ return {"name": "change_mood", "content": f"你的心情刚刚变化了,现在的心情是: {emotion}"}
+ except Exception as e:
+ logger.error(f"心情改变工具执行失败: {str(e)}")
+ return {"name": "change_mood", "content": f"心情改变失败: {str(e)}"}
+
+
+# 注册工具
+# register_tool(ChangeMoodTool)
diff --git a/src/do_tool/tool_can_use/change_relationship.py b/src/do_tool/tool_can_use/change_relationship.py
new file mode 100644
index 000000000..9aa084855
--- /dev/null
+++ b/src/do_tool/tool_can_use/change_relationship.py
@@ -0,0 +1,44 @@
+# from src.plugins.person_info.relationship_manager import relationship_manager
+from src.common.logger import get_module_logger
+from src.do_tool.tool_can_use.base_tool import BaseTool
+# from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
+
+
+logger = get_module_logger("relationship_tool")
+
+
+class RelationshipTool(BaseTool):
+ name = "change_relationship"
+ description = "根据收到的文本和回复内容,修改与特定用户的关系值,当你回复了别人的消息,你可以使用这个工具"
+ parameters = {
+ "type": "object",
+ "properties": {
+ "text": {"type": "string", "description": "收到的文本"},
+ "changed_value": {"type": "number", "description": "变更值"},
+ "reason": {"type": "string", "description": "变更原因"},
+ },
+ "required": ["text", "changed_value", "reason"],
+ }
+
+ async def execute(self, args: dict, message_txt: str) -> dict:
+ """执行工具功能
+
+ Args:
+ args: 包含工具参数的字典
+ text: 原始消息文本
+ changed_value: 变更值
+ reason: 变更原因
+
+ Returns:
+ dict: 包含执行结果的字典
+ """
+ try:
+ text = args.get("text")
+ changed_value = args.get("changed_value")
+ reason = args.get("reason")
+
+ return {"content": f"因为你刚刚因为{reason},所以你和发[{text}]这条消息的人的关系值变化为{changed_value}"}
+
+ except Exception as e:
+ logger.error(f"修改关系值时发生错误: {str(e)}")
+ return {"content": f"修改关系值失败: {str(e)}"}
diff --git a/src/do_tool/tool_can_use/compare_numbers_tool.py b/src/do_tool/tool_can_use/compare_numbers_tool.py
new file mode 100644
index 000000000..48cee5157
--- /dev/null
+++ b/src/do_tool/tool_can_use/compare_numbers_tool.py
@@ -0,0 +1,50 @@
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.common.logger import get_module_logger
+from typing import Dict, Any
+
+logger = get_module_logger("compare_numbers_tool")
+
+
+class CompareNumbersTool(BaseTool):
+ """比较两个数大小的工具"""
+
+ name = "compare_numbers"
+ description = "比较两个数的大小,返回较大的数"
+ parameters = {
+ "type": "object",
+ "properties": {
+ "num1": {"type": "number", "description": "第一个数字"},
+ "num2": {"type": "number", "description": "第二个数字"},
+ },
+ "required": ["num1", "num2"],
+ }
+
+ async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ """执行比较两个数的大小
+
+ Args:
+ function_args: 工具参数
+ message_txt: 原始消息文本
+
+ Returns:
+ Dict: 工具执行结果
+ """
+ try:
+ num1 = function_args.get("num1")
+ num2 = function_args.get("num2")
+
+ if num1 > num2:
+ result = f"{num1} 大于 {num2}"
+ elif num1 < num2:
+ result = f"{num1} 小于 {num2}"
+ else:
+ result = f"{num1} 等于 {num2}"
+
+ return {"name": self.name, "content": result}
+ except Exception as e:
+ logger.error(f"比较数字失败: {str(e)}")
+ return {"name": self.name, "content": f"比较数字失败: {str(e)}"}
+
+
+# 注册工具
+# register_tool(CompareNumbersTool)
diff --git a/src/do_tool/tool_can_use/fibonacci_sequence_tool.py b/src/do_tool/tool_can_use/fibonacci_sequence_tool.py
deleted file mode 100644
index 4609b18a0..000000000
--- a/src/do_tool/tool_can_use/fibonacci_sequence_tool.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
-from src.common.logger import get_module_logger
-from typing import Dict, Any
-
-logger = get_module_logger("fibonacci_sequence_tool")
-
-
-class FibonacciSequenceTool(BaseTool):
- """生成斐波那契数列的工具"""
-
- name = "fibonacci_sequence"
- description = "生成指定长度的斐波那契数列"
- parameters = {
- "type": "object",
- "properties": {"n": {"type": "integer", "description": "斐波那契数列的长度", "minimum": 1}},
- "required": ["n"],
- }
-
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
- """执行工具功能
-
- Args:
- function_args: 工具参数
- message_txt: 原始消息文本
-
- Returns:
- Dict: 工具执行结果
- """
- try:
- n = function_args.get("n")
- if n <= 0:
- raise ValueError("参数n必须大于0")
-
- sequence = []
- a, b = 0, 1
- for _ in range(n):
- sequence.append(a)
- a, b = b, a + b
-
- return {"name": self.name, "content": sequence}
- except Exception as e:
- logger.error(f"fibonacci_sequence工具执行失败: {str(e)}")
- return {"name": self.name, "content": f"执行失败: {str(e)}"}
-
-
-# 注册工具
-register_tool(FibonacciSequenceTool)
diff --git a/src/do_tool/tool_can_use/generate_buddha_emoji_tool.py b/src/do_tool/tool_can_use/generate_buddha_emoji_tool.py
deleted file mode 100644
index e704b6015..000000000
--- a/src/do_tool/tool_can_use/generate_buddha_emoji_tool.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
-from src.common.logger import get_module_logger
-from typing import Dict, Any
-
-logger = get_module_logger("generate_buddha_emoji_tool")
-
-
-class GenerateBuddhaEmojiTool(BaseTool):
- """生成佛祖颜文字的工具类"""
-
- name = "generate_buddha_emoji"
- description = "生成一个佛祖的颜文字表情"
- parameters = {
- "type": "object",
- "properties": {
- # 无参数
- },
- "required": [],
- }
-
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
- """执行工具功能,生成佛祖颜文字
-
- Args:
- function_args: 工具参数
- message_txt: 原始消息文本
-
- Returns:
- Dict: 工具执行结果
- """
- try:
- buddha_emoji = "这是一个佛祖emoji:༼ つ ◕_◕ ༽つ"
-
- return {"name": self.name, "content": buddha_emoji}
- except Exception as e:
- logger.error(f"generate_buddha_emoji工具执行失败: {str(e)}")
- return {"name": self.name, "content": f"执行失败: {str(e)}"}
-
-
-# 注册工具
-register_tool(GenerateBuddhaEmojiTool)
diff --git a/src/do_tool/tool_can_use/generate_cmd_tutorial_tool.py b/src/do_tool/tool_can_use/generate_cmd_tutorial_tool.py
deleted file mode 100644
index 3a9f9bba1..000000000
--- a/src/do_tool/tool_can_use/generate_cmd_tutorial_tool.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
-from src.common.logger import get_module_logger
-from typing import Dict, Any
-
-logger = get_module_logger("generate_cmd_tutorial_tool")
-
-
-class GenerateCmdTutorialTool(BaseTool):
- """生成Windows CMD基本操作教程的工具"""
-
- name = "generate_cmd_tutorial"
- description = "生成关于Windows命令提示符(CMD)的基本操作教程,包括常用命令和使用方法"
- parameters = {"type": "object", "properties": {}, "required": []}
-
- async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
- """执行工具功能
-
- Args:
- function_args: 工具参数
- message_txt: 原始消息文本
-
- Returns:
- Dict: 工具执行结果
- """
- try:
- tutorial_content = """
-# Windows CMD 基本操作教程
-
-## 1. 基本导航命令
-- `dir`: 列出当前目录下的文件和文件夹
-- `cd <目录名>`: 进入指定目录
-- `cd..`: 返回上一级目录
-- `cd\\`: 返回根目录
-
-## 2. 文件操作命令
-- `copy <源文件> <目标位置>`: 复制文件
-- `move <源文件> <目标位置>`: 移动文件
-- `del <文件名>`: 删除文件
-- `ren <旧文件名> <新文件名>`: 重命名文件
-
-## 3. 系统信息命令
-- `systeminfo`: 显示系统配置信息
-- `hostname`: 显示计算机名称
-- `ver`: 显示Windows版本
-
-## 4. 网络相关命令
-- `ipconfig`: 显示网络配置信息
-- `ping <主机名或IP>`: 测试网络连接
-- `tracert <主机名或IP>`: 跟踪网络路径
-
-## 5. 实用技巧
-- 按Tab键可以自动补全文件名或目录名
-- 使用`> <文件名>`可以将命令输出重定向到文件
-- 使用`| more`可以分页显示长输出
-
-注意:使用命令时要小心,特别是删除操作。
-"""
-
- return {"name": self.name, "content": tutorial_content}
- except Exception as e:
- logger.error(f"generate_cmd_tutorial工具执行失败: {str(e)}")
- return {"name": self.name, "content": f"执行失败: {str(e)}"}
-
-
-# 注册工具
-register_tool(GenerateCmdTutorialTool)
diff --git a/src/do_tool/tool_can_use/get_current_task.py b/src/do_tool/tool_can_use/get_current_task.py
index 1975c40b0..d5660f6ab 100644
--- a/src/do_tool/tool_can_use/get_current_task.py
+++ b/src/do_tool/tool_can_use/get_current_task.py
@@ -1,7 +1,8 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
+from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.schedule.schedule_generator import bot_schedule
from src.common.logger import get_module_logger
from typing import Dict, Any
+from datetime import datetime
logger = get_module_logger("get_current_task_tool")
@@ -9,19 +10,19 @@ logger = get_module_logger("get_current_task_tool")
class GetCurrentTaskTool(BaseTool):
"""获取当前正在做的事情/最近的任务工具"""
- name = "get_current_task"
- description = "获取当前正在做的事情/最近的任务"
+ name = "get_schedule"
+ description = "获取当前正在做的事情,或者某个时间点/时间段的日程信息"
parameters = {
"type": "object",
"properties": {
- "num": {"type": "integer", "description": "要获取的任务数量"},
- "time_info": {"type": "boolean", "description": "是否包含时间信息"},
+ "start_time": {"type": "string", "description": "开始时间,格式为'HH:MM',填写current则获取当前任务"},
+ "end_time": {"type": "string", "description": "结束时间,格式为'HH:MM',填写current则获取当前任务"},
},
- "required": [],
+ "required": ["start_time", "end_time"],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
- """执行获取当前任务
+ """执行获取当前任务或指定时间段的日程信息
Args:
function_args: 工具参数
@@ -30,25 +31,29 @@ class GetCurrentTaskTool(BaseTool):
Returns:
Dict: 工具执行结果
"""
- try:
- # 获取参数,如果没有提供则使用默认值
- num = function_args.get("num", 1)
- time_info = function_args.get("time_info", False)
+ start_time = function_args.get("start_time")
+ end_time = function_args.get("end_time")
- # 调用日程系统获取当前任务
- current_task = bot_schedule.get_current_num_task(num=num, time_info=time_info)
-
- # 格式化返回结果
+ # 如果 start_time 或 end_time 为 "current",则获取当前任务
+ if start_time == "current" or end_time == "current":
+ current_task = bot_schedule.get_current_num_task(num=1, time_info=True)
+ current_time = datetime.now().strftime("%H:%M:%S")
+ current_date = datetime.now().strftime("%Y-%m-%d")
if current_task:
- task_info = current_task
+ task_info = f"{current_date} {current_time},你在{current_task}"
else:
- task_info = "当前没有正在进行的任务"
+ task_info = f"{current_time} {current_date},没在做任何事情"
+ # 如果提供了时间范围,则获取该时间段的日程信息
+ elif start_time and end_time:
+ tasks = await bot_schedule.get_task_from_time_to_time(start_time, end_time)
+ if tasks:
+ task_list = []
+ for task in tasks:
+ task_time = task[0].strftime("%H:%M")
+ task_content = task[1]
+ task_list.append(f"{task_time}时,{task_content}")
+ task_info = "\n".join(task_list)
+ else:
+ task_info = f"在 {start_time} 到 {end_time} 之间没有找到日程信息"
- return {"name": "get_current_task", "content": f"当前任务信息: {task_info}"}
- except Exception as e:
- logger.error(f"获取当前任务工具执行失败: {str(e)}")
- return {"name": "get_current_task", "content": f"获取当前任务失败: {str(e)}"}
-
-
-# 注册工具
-register_tool(GetCurrentTaskTool)
+ return {"name": "get_current_task", "content": f"日程信息: {task_info}"}
diff --git a/src/do_tool/tool_can_use/get_knowledge.py b/src/do_tool/tool_can_use/get_knowledge.py
index 0b492f11a..b78c07750 100644
--- a/src/do_tool/tool_can_use/get_knowledge.py
+++ b/src/do_tool/tool_can_use/get_knowledge.py
@@ -1,4 +1,4 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
+from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.chat.utils import get_embedding
from src.common.database import db
from src.common.logger import get_module_logger
@@ -132,4 +132,4 @@ class SearchKnowledgeTool(BaseTool):
# 注册工具
-register_tool(SearchKnowledgeTool)
+# register_tool(SearchKnowledgeTool)
diff --git a/src/do_tool/tool_can_use/get_memory.py b/src/do_tool/tool_can_use/get_memory.py
index 16af4c644..6a3c1c391 100644
--- a/src/do_tool/tool_can_use/get_memory.py
+++ b/src/do_tool/tool_can_use/get_memory.py
@@ -1,15 +1,15 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
+from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.common.logger import get_module_logger
from typing import Dict, Any
-logger = get_module_logger("get_memory_tool")
+logger = get_module_logger("mid_chat_mem_tool")
class GetMemoryTool(BaseTool):
"""从记忆系统中获取相关记忆的工具"""
- name = "get_memory"
+ name = "mid_chat_mem"
description = "从记忆系统中获取相关记忆"
parameters = {
"type": "object",
@@ -49,11 +49,11 @@ class GetMemoryTool(BaseTool):
else:
content = f"你不太记得有关{text}的记忆,你对此不太了解"
- return {"name": "get_memory", "content": content}
+ return {"name": "mid_chat_mem", "content": content}
except Exception as e:
logger.error(f"记忆获取工具执行失败: {str(e)}")
- return {"name": "get_memory", "content": f"记忆获取失败: {str(e)}"}
+ return {"name": "mid_chat_mem", "content": f"记忆获取失败: {str(e)}"}
# 注册工具
-register_tool(GetMemoryTool)
+# register_tool(GetMemoryTool)
diff --git a/src/do_tool/tool_can_use/get_time_date.py b/src/do_tool/tool_can_use/get_time_date.py
new file mode 100644
index 000000000..c3c9c8376
--- /dev/null
+++ b/src/do_tool/tool_can_use/get_time_date.py
@@ -0,0 +1,38 @@
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.common.logger import get_module_logger
+from typing import Dict, Any
+from datetime import datetime
+
+logger = get_module_logger("get_time_date")
+
+
+class GetCurrentDateTimeTool(BaseTool):
+ """获取当前时间、日期、年份和星期的工具"""
+
+ name = "get_current_date_time"
+ description = "当有人询问或者涉及到具体时间或者日期的时候,必须使用这个工具"
+ parameters = {
+ "type": "object",
+ "properties": {},
+ "required": [],
+ }
+
+ async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ """执行获取当前时间、日期、年份和星期
+
+ Args:
+ function_args: 工具参数(此工具不使用)
+ message_txt: 原始消息文本(此工具不使用)
+
+ Returns:
+ Dict: 工具执行结果
+ """
+ current_time = datetime.now().strftime("%H:%M:%S")
+ current_date = datetime.now().strftime("%Y-%m-%d")
+ current_year = datetime.now().strftime("%Y")
+ current_weekday = datetime.now().strftime("%A")
+
+ return {
+ "name": "get_current_date_time",
+ "content": f"当前时间: {current_time}, 日期: {current_date}, 年份: {current_year}, 星期: {current_weekday}",
+ }
diff --git a/src/do_tool/tool_can_use/mid_chat_mem.py b/src/do_tool/tool_can_use/mid_chat_mem.py
new file mode 100644
index 000000000..26d26704a
--- /dev/null
+++ b/src/do_tool/tool_can_use/mid_chat_mem.py
@@ -0,0 +1,40 @@
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.common.logger import get_module_logger
+from typing import Dict, Any
+
+logger = get_module_logger("get_mid_memory_tool")
+
+
+class GetMidMemoryTool(BaseTool):
+ """从记忆系统中获取相关记忆的工具"""
+
+ name = "mid_chat_mem"
+ description = "之前的聊天内容中获取具体信息,当最新消息提到,或者你需要回复的消息中提到,你可以使用这个工具"
+ parameters = {
+ "type": "object",
+ "properties": {
+ "id": {"type": "integer", "description": "要查询的聊天记录id"},
+ },
+ "required": ["id"],
+ }
+
+ async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
+ """执行记忆获取
+
+ Args:
+ function_args: 工具参数
+ message_txt: 原始消息文本
+
+ Returns:
+ Dict: 工具执行结果
+ """
+ try:
+ id = function_args.get("id")
+ return {"name": "mid_chat_mem", "content": str(id)}
+ except Exception as e:
+ logger.error(f"聊天记录获取工具执行失败: {str(e)}")
+ return {"name": "mid_chat_mem", "content": f"聊天记录获取失败: {str(e)}"}
+
+
+# 注册工具
+# register_tool(GetMemoryTool)
diff --git a/src/do_tool/tool_can_use/send_emoji.py b/src/do_tool/tool_can_use/send_emoji.py
new file mode 100644
index 000000000..9cd48f0e4
--- /dev/null
+++ b/src/do_tool/tool_can_use/send_emoji.py
@@ -0,0 +1,25 @@
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.common.logger import get_module_logger
+
+from typing import Dict, Any
+
+logger = get_module_logger("send_emoji_tool")
+
+
+class SendEmojiTool(BaseTool):
+ """发送表情包的工具"""
+
+ name = "send_emoji"
+ description = "当你觉得需要表达情感,或者帮助表达,可以使用这个工具发送表情包"
+ parameters = {
+ "type": "object",
+ "properties": {"text": {"type": "string", "description": "要发送的表情包描述"}},
+ "required": ["text"],
+ }
+
+ async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
+ text = function_args.get("text", message_txt)
+ return {
+ "name": "send_emoji",
+ "content": text,
+ }
diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py
index 51bc37568..b14927be8 100644
--- a/src/do_tool/tool_use.py
+++ b/src/do_tool/tool_use.py
@@ -4,19 +4,27 @@ from src.plugins.chat.chat_stream import ChatStream
from src.common.database import db
import time
import json
-from src.common.logger import get_module_logger
+from src.common.logger import get_module_logger, TOOL_USE_STYLE_CONFIG, LogConfig
from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance
+from src.heart_flow.sub_heartflow import SubHeartflow
-logger = get_module_logger("tool_use")
+tool_use_config = LogConfig(
+ # 使用消息发送专用样式
+ console_format=TOOL_USE_STYLE_CONFIG["console_format"],
+ file_format=TOOL_USE_STYLE_CONFIG["file_format"],
+)
+logger = get_module_logger("tool_use", config=tool_use_config)
class ToolUser:
def __init__(self):
self.llm_model_tool = LLM_request(
- model=global_config.llm_heartflow, temperature=0.2, max_tokens=1000, request_type="tool_use"
+ model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
)
- async def _build_tool_prompt(self, message_txt: str, sender_name: str, chat_stream: ChatStream):
+ async def _build_tool_prompt(
+ self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
+ ):
"""构建工具使用的提示词
Args:
@@ -27,6 +35,12 @@ class ToolUser:
Returns:
str: 构建好的提示词
"""
+ if subheartflow:
+ mid_memory_info = subheartflow.observations[0].mid_memory_info
+ # print(f"intol111111111111111111111111111111111222222222222mid_memory_info:{mid_memory_info}")
+ else:
+ mid_memory_info = ""
+
new_messages = list(
db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15)
)
@@ -38,11 +52,12 @@ class ToolUser:
# 这些信息应该从调用者传入,而不是从self获取
bot_name = global_config.BOT_NICKNAME
prompt = ""
+ prompt += mid_memory_info
prompt += "你正在思考如何回复群里的消息。\n"
prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
prompt += f"注意你就是{bot_name},{bot_name}指的就是你。"
- prompt += "你现在需要对群里的聊天内容进行回复,现在请你思考,你是否需要额外的信息,或者一些工具来帮你回复,比如回忆或者搜寻已有的知识,或者了解你现在正在做什么,请输出你需要的工具,或者你需要的额外信息。"
+ prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
return prompt
def _define_tools(self):
@@ -76,10 +91,14 @@ class ToolUser:
# 执行工具
result = await tool_instance.execute(function_args, message_txt)
if result:
+ # 直接使用 function_name 作为 tool_type
+ tool_type = function_name
+
return {
"tool_call_id": tool_call["id"],
"role": "tool",
"name": function_name,
+ "type": tool_type,
"content": result["content"],
}
return None
@@ -87,7 +106,9 @@ class ToolUser:
logger.error(f"执行工具调用时发生错误: {str(e)}")
return None
- async def use_tool(self, message_txt: str, sender_name: str, chat_stream: ChatStream):
+ async def use_tool(
+ self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
+ ):
"""使用工具辅助思考,判断是否需要额外信息
Args:
@@ -96,14 +117,15 @@ class ToolUser:
chat_stream: 聊天流对象
Returns:
- dict: 工具使用结果
+ dict: 工具使用结果,包含结构化的信息
"""
try:
# 构建提示词
- prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream)
+ prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, subheartflow)
# 定义可用工具
tools = self._define_tools()
+ logger.trace(f"工具定义: {tools}")
# 使用llm_model_tool发送带工具定义的请求
payload = {
@@ -114,7 +136,7 @@ class ToolUser:
"temperature": 0.2,
}
- logger.debug(f"发送工具调用请求,模型: {self.llm_model_tool.model_name}")
+ logger.trace(f"发送工具调用请求,模型: {self.llm_model_tool.model_name}")
# 发送请求获取模型是否需要调用工具
response = await self.llm_model_tool._execute_request(
endpoint="/chat/completions", payload=payload, prompt=prompt
@@ -123,36 +145,40 @@ class ToolUser:
# 根据返回值数量判断是否有工具调用
if len(response) == 3:
content, reasoning_content, tool_calls = response
- logger.info(f"工具思考: {tool_calls}")
+ # logger.info(f"工具思考: {tool_calls}")
+ # logger.debug(f"工具思考: {content}")
# 检查响应中工具调用是否有效
if not tool_calls:
- logger.info("模型返回了空的tool_calls列表")
+ logger.debug("模型返回了空的tool_calls列表")
return {"used_tools": False}
- logger.info(f"模型请求调用{len(tool_calls)}个工具")
+ tool_calls_str = ""
+ for tool_call in tool_calls:
+ tool_calls_str += f"{tool_call['function']['name']}\n"
+ logger.info(f"根据:\n{prompt}\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}")
tool_results = []
- collected_info = ""
+ structured_info = {} # 动态生成键
# 执行所有工具调用
for tool_call in tool_calls:
result = await self._execute_tool_call(tool_call, message_txt)
if result:
tool_results.append(result)
- # 将工具结果添加到收集的信息中
- collected_info += f"\n{result['name']}返回结果: {result['content']}\n"
+ # 使用工具名称作为键
+ tool_name = result["name"]
+ if tool_name not in structured_info:
+ structured_info[tool_name] = []
+ structured_info[tool_name].append({"name": result["name"], "content": result["content"]})
- # 如果有工具结果,直接返回收集的信息
- if collected_info:
- logger.info(f"工具调用收集到信息: {collected_info}")
- return {
- "used_tools": True,
- "collected_info": collected_info,
- }
+ # 如果有工具结果,返回结构化的信息
+ if structured_info:
+ logger.info(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}")
+ return {"used_tools": True, "structured_info": structured_info}
else:
# 没有工具调用
content, reasoning_content = response
- logger.info("模型没有请求调用任何工具")
+ logger.debug("模型没有请求调用任何工具")
# 如果没有工具调用或处理失败,直接返回原始思考
return {
diff --git a/src/heart_flow/README.md b/src/heart_flow/README.md
new file mode 100644
index 000000000..5e442d8f7
--- /dev/null
+++ b/src/heart_flow/README.md
@@ -0,0 +1,82 @@
+# 心流系统 (Heart Flow System)
+
+心流系统是一个模拟AI机器人内心思考和情感流动的核心系统。它通过多层次的心流结构,使AI能够对外界信息进行观察、思考和情感反应,从而产生更自然的对话和行为。
+
+## 系统架构
+
+### 1. 主心流 (Heartflow)
+- 位于 `heartflow.py`
+- 作为整个系统的主控制器
+- 负责管理和协调多个子心流
+- 维护AI的整体思维状态
+- 定期进行全局思考更新
+
+### 2. 子心流 (SubHeartflow)
+- 位于 `sub_heartflow.py`
+- 处理具体的对话场景(如群聊)
+- 维护特定场景下的思维状态
+- 通过观察者模式接收和处理信息
+- 能够进行独立的思考和回复判断
+
+### 3. 观察系统 (Observation)
+- 位于 `observation.py`
+- 负责收集和处理外部信息
+- 支持多种观察类型(如聊天观察)
+- 对信息进行实时总结和更新
+
+## 主要功能
+
+### 思维系统
+- 定期进行思维更新
+- 维护短期记忆和思维连续性
+- 支持多层次的思维处理
+
+### 情感系统
+- 情绪状态管理
+- 回复意愿判断
+- 情感因素影响决策
+
+### 交互系统
+- 群聊消息处理
+- 多场景并行处理
+- 智能回复生成
+
+## 工作流程
+
+1. 主心流启动并创建必要的子心流
+2. 子心流通过观察者接收外部信息
+3. 系统进行信息处理和思维更新
+4. 根据情感状态和思维结果决定是否回复
+5. 生成合适的回复并更新思维状态
+
+## 使用说明
+
+### 创建新的子心流
+```python
+heartflow = Heartflow()
+subheartflow = heartflow.create_subheartflow(chat_id)
+```
+
+### 添加观察者
+```python
+observation = ChattingObservation(chat_id)
+subheartflow.add_observation(observation)
+```
+
+### 启动心流系统
+```python
+await heartflow.heartflow_start_working()
+```
+
+## 配置说明
+
+系统的主要配置参数:
+- `sub_heart_flow_stop_time`: 子心流停止时间
+- `sub_heart_flow_freeze_time`: 子心流冻结时间
+- `heart_flow_update_interval`: 心流更新间隔
+
+## 注意事项
+
+1. 子心流会在长时间不活跃后自动清理
+2. 需要合理配置更新间隔以平衡性能和响应速度
+3. 观察系统会限制消息处理数量以避免过载
\ No newline at end of file
diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py
index aef23f964..cc225be8f 100644
--- a/src/heart_flow/observation.py
+++ b/src/heart_flow/observation.py
@@ -4,6 +4,10 @@ from datetime import datetime
from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config
from src.common.database import db
+from src.common.logger import get_module_logger
+import traceback
+
+logger = get_module_logger("observation")
# 所有观察的基类
@@ -27,30 +31,59 @@ class ChattingObservation(Observation):
self.name = global_config.BOT_NICKNAME
self.nick_name = global_config.BOT_ALIAS_NAMES
- self.observe_times = 0
+ self.max_now_obs_len = global_config.observation_context_size
+ self.overlap_len = global_config.compressed_length
+ self.mid_memorys = []
+ self.max_mid_memory_len = global_config.compress_length_limit
+ self.mid_memory_info = ""
+ self.now_message_info = ""
- self.summary_count = 0 # 30秒内的更新次数
- self.max_update_in_30s = 2 # 30秒内最多更新2次
- self.last_summary_time = 0 # 上次更新summary的时间
-
- self.sub_observe = None
+ self.updating_old = False
self.llm_summary = LLM_request(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
# 进行一次观察 返回观察结果observe_info
+ def get_observe_info(self, ids=None):
+ if ids:
+ mid_memory_str = ""
+ for id in ids:
+ print(f"id:{id}")
+ try:
+ for mid_memory in self.mid_memorys:
+ if mid_memory["id"] == id:
+ mid_memory_by_id = mid_memory
+ msg_str = ""
+ for msg in mid_memory_by_id["messages"]:
+ msg_str += f"{msg['detailed_plain_text']}"
+ time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
+ # mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
+ mid_memory_str += f"{msg_str}\n"
+ except Exception as e:
+ logger.error(f"获取mid_memory_id失败: {e}")
+ traceback.print_exc()
+ # print(f"获取mid_memory_id失败: {e}")
+ return self.now_message_info
+
+ return mid_memory_str + "现在群里正在聊:\n" + self.now_message_info
+
+ else:
+ return self.now_message_info
+
async def observe(self):
- # 查找新消息,限制最多30条
+ # 查找新消息
new_messages = list(
- db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}})
- .sort("time", 1)
- .limit(15)
- ) # 按时间正序排列,最多15条
+ db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}).sort("time", 1)
+ ) # 按时间正序排列
if not new_messages:
return self.observe_info # 没有新消息,返回上次观察结果
+ self.last_observe_time = new_messages[-1]["time"]
+
+ self.talking_message.extend(new_messages)
+
# 将新消息转换为字符串格式
new_messages_str = ""
for msg in new_messages:
@@ -60,83 +93,56 @@ class ChattingObservation(Observation):
# print(f"new_messages_str:{new_messages_str}")
# 将新消息添加到talking_message,同时保持列表长度不超过20条
- self.talking_message.extend(new_messages)
- if len(self.talking_message) > 15:
- self.talking_message = self.talking_message[-15:] # 只保留最新的15条
- self.translate_message_list_to_str()
- # 更新观察次数
- # self.observe_times += 1
- self.last_observe_time = new_messages[-1]["time"]
+ if len(self.talking_message) > self.max_now_obs_len and not self.updating_old:
+ self.updating_old = True
+ # 计算需要保留的消息数量
+ keep_messages_count = self.max_now_obs_len - self.overlap_len
+ # 提取所有超出保留数量的最老消息
+ oldest_messages = self.talking_message[:-keep_messages_count]
+ self.talking_message = self.talking_message[-keep_messages_count:]
+ oldest_messages_str = "\n".join([msg["detailed_plain_text"] for msg in oldest_messages])
+ oldest_timestamps = [msg["time"] for msg in oldest_messages]
- # 检查是否需要更新summary
- # current_time = int(datetime.now().timestamp())
- # if current_time - self.last_summary_time >= 30: # 如果超过30秒,重置计数
- # self.summary_count = 0
- # self.last_summary_time = current_time
+ # 调用 LLM 总结主题
+ prompt = f"请总结以下聊天记录的主题:\n{oldest_messages_str}\n主题,用一句话概括包括人物事件和主要信息,不要分点:"
+ try:
+ summary, _ = await self.llm_summary.generate_response_async(prompt)
+ except Exception as e:
+ print(f"总结主题失败: {e}")
+ summary = "无法总结主题"
- # if self.summary_count < self.max_update_in_30s: # 如果30秒内更新次数小于2次
- # await self.update_talking_summary(new_messages_str)
- # print(f"更新聊天总结:{self.observe_info}11111111111111")
- # self.summary_count += 1
- updated_observe_info = await self.update_talking_summary(new_messages_str)
- print(f"更新聊天总结:{updated_observe_info}11111111111111")
- self.observe_info = updated_observe_info
+ mid_memory = {
+ "id": str(int(datetime.now().timestamp())),
+ "theme": summary,
+ "messages": oldest_messages,
+ "timestamps": oldest_timestamps,
+ "chat_id": self.chat_id,
+ "created_at": datetime.now().timestamp(),
+ }
+ # print(f"mid_memory:{mid_memory}")
+ # 存入内存中的 mid_memorys
+ self.mid_memorys.append(mid_memory)
+ if len(self.mid_memorys) > self.max_mid_memory_len:
+ self.mid_memorys.pop(0)
- return updated_observe_info
+ mid_memory_str = "之前聊天的内容概括是:\n"
+ for mid_memory in self.mid_memorys:
+ time_diff = int((datetime.now().timestamp() - mid_memory["created_at"]) / 60)
+ mid_memory_str += f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory['id']}):{mid_memory['theme']}\n"
+ self.mid_memory_info = mid_memory_str
- async def carefully_observe(self):
- # 查找新消息,限制最多40条
- new_messages = list(
- db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}})
- .sort("time", 1)
- .limit(30)
- ) # 按时间正序排列,最多30条
+ self.updating_old = False
- if not new_messages:
- return self.observe_info # 没有新消息,返回上次观察结果
+ # print(f"处理后self.talking_message:{self.talking_message}")
- # 将新消息转换为字符串格式
- new_messages_str = ""
- for msg in new_messages:
- if "detailed_plain_text" in msg:
- new_messages_str += f"{msg['detailed_plain_text']}\n"
+ now_message_str = ""
+ now_message_str += self.translate_message_list_to_str(talking_message=self.talking_message)
+ self.now_message_info = now_message_str
- # 将新消息添加到talking_message,同时保持列表长度不超过30条
- self.talking_message.extend(new_messages)
- if len(self.talking_message) > 30:
- self.talking_message = self.talking_message[-30:] # 只保留最新的30条
- self.translate_message_list_to_str()
-
- # 更新观察次数
- self.observe_times += 1
- self.last_observe_time = new_messages[-1]["time"]
-
- updated_observe_info = await self.update_talking_summary(new_messages_str)
- self.observe_info = updated_observe_info
- return updated_observe_info
+ logger.debug(f"压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.now_message_info}")
async def update_talking_summary(self, new_messages_str):
- # 基于已经有的talking_summary,和新的talking_message,生成一个summary
- # print(f"更新聊天总结:{self.talking_summary}")
- # 开始构建prompt
- # prompt_personality = "你"
- # # person
- # individuality = Individuality.get_instance()
-
- # personality_core = individuality.personality.personality_core
- # prompt_personality += personality_core
-
- # personality_sides = individuality.personality.personality_sides
- # random.shuffle(personality_sides)
- # prompt_personality += f",{personality_sides[0]}"
-
- # identity_detail = individuality.identity.identity_detail
- # random.shuffle(identity_detail)
- # prompt_personality += f",{identity_detail[0]}"
-
- # personality_info = prompt_personality
-
prompt = ""
# prompt += f"{personality_info}"
prompt += f"你的名字叫:{self.name}\n,标识'{self.name}'的都是你自己说的话"
@@ -155,7 +161,9 @@ class ChattingObservation(Observation):
# print(f"prompt:{prompt}")
# print(f"self.observe_info:{self.observe_info}")
- def translate_message_list_to_str(self):
- self.talking_message_str = ""
- for message in self.talking_message:
- self.talking_message_str += message["detailed_plain_text"]
+ def translate_message_list_to_str(self, talking_message):
+ talking_message_str = ""
+ for message in talking_message:
+ talking_message_str += message["detailed_plain_text"]
+
+ return talking_message_str
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index ce1dd10a1..c7ff4524f 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -1,10 +1,11 @@
-from .observation import Observation
+from .observation import Observation, ChattingObservation
import asyncio
from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config
-import re
import time
+from src.plugins.chat.message import UserInfo
+from src.plugins.chat.utils import parse_text_timestamps
# from src.plugins.schedule.schedule_generator import bot_schedule
# from src.plugins.memory_system.Hippocampus import HippocampusManager
@@ -18,7 +19,6 @@ import random
from src.plugins.chat.chat_stream import ChatStream
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.chat.utils import get_recent_group_speaker
-from src.do_tool.tool_use import ToolUser
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
subheartflow_config = LogConfig(
@@ -32,23 +32,25 @@ logger = get_module_logger("subheartflow", config=subheartflow_config)
def init_prompt():
prompt = ""
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
- prompt += "{collected_info}\n"
+ prompt += "{extra_info}\n"
+ # prompt += "{prompt_schedule}\n"
prompt += "{relation_prompt_all}\n"
prompt += "{prompt_personality}\n"
- prompt += "刚刚你的想法是{current_thinking_info}。如果有新的内容,记得转换话题\n"
+ prompt += "刚刚你的想法是{current_thinking_info}。可以适当转换话题\n"
prompt += "-----------------------------------\n"
- prompt += "现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
+ prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n"
prompt += "你现在{mood_info}\n"
prompt += "你注意到{sender_name}刚刚说:{message_txt}\n"
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白"
- prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话\n"
+ prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n"
prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{bot_name},{bot_name}指的就是你。"
Prompt(prompt, "sub_heartflow_prompt_before")
prompt = ""
# prompt += f"你现在正在做的事情是:{schedule_info}\n"
+ prompt += "{extra_info}\n"
prompt += "{prompt_personality}\n"
- prompt += "现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
+ prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n"
prompt += "刚刚你的想法是{current_thinking_info}。"
prompt += "你现在看到了网友们发的新消息:{message_new_info}\n"
prompt += "你刚刚回复了群友们:{reply_info}"
@@ -78,7 +80,10 @@ class SubHeartflow:
self.past_mind = []
self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request(
- model=global_config.llm_sub_heartflow, temperature=0.2, max_tokens=600, request_type="sub_heart_flow"
+ model=global_config.llm_sub_heartflow,
+ temperature=global_config.llm_sub_heartflow["temp"],
+ max_tokens=600,
+ request_type="sub_heart_flow",
)
self.main_heartflow_info = ""
@@ -91,14 +96,12 @@ class SubHeartflow:
self.is_active = False
- self.observations: list[Observation] = []
+ self.observations: list[ChattingObservation] = []
self.running_knowledges = []
self.bot_name = global_config.BOT_NICKNAME
- self.tool_user = ToolUser()
-
def add_observation(self, observation: Observation):
"""添加一个新的observation对象到列表中,如果已存在相同id的observation则不添加"""
# 查找是否存在相同id的observation
@@ -151,25 +154,24 @@ class SubHeartflow:
observation = self.observations[0]
await observation.observe()
- async def do_thinking_before_reply(self, message_txt: str, sender_name: str, chat_stream: ChatStream):
+ async def do_thinking_before_reply(
+ self, message_txt: str, sender_info: UserInfo, chat_stream: ChatStream, extra_info: str, obs_id: int = None
+ ):
current_thinking_info = self.current_mind
mood_info = self.current_state.mood
# mood_info = "你很生气,很愤怒"
observation = self.observations[0]
- chat_observe_info = observation.observe_info
- # print(f"chat_observe_info:{chat_observe_info}")
+ if obs_id:
+ print(f"11111111111有id,开始获取观察信息{obs_id}")
+ chat_observe_info = observation.get_observe_info(obs_id)
+ else:
+ chat_observe_info = observation.get_observe_info()
- # 首先尝试使用工具获取更多信息
- tool_result = await self.tool_user.use_tool(message_txt, sender_name, chat_stream)
-
- # 如果工具被使用且获得了结果,将收集到的信息合并到思考中
- collected_info = ""
- if tool_result.get("used_tools", False):
- logger.info("使用工具收集了信息")
-
- # 如果有收集到的信息,将其添加到当前思考中
- if "collected_info" in tool_result:
- collected_info = tool_result["collected_info"]
+ extra_info_prompt = ""
+ for tool_name, tool_data in extra_info.items():
+ extra_info_prompt += f"{tool_name} 相关信息:\n"
+ for item in tool_data:
+ extra_info_prompt += f"- {item['name']}: {item['content']}\n"
# 开始构建prompt
prompt_personality = f"你的名字是{self.bot_name},你"
@@ -206,8 +208,10 @@ class SubHeartflow:
# f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
# )
relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format(
- relation_prompt, sender_name
+ relation_prompt, sender_info.user_nickname
)
+
+ sender_name_sign = f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>"
# prompt = ""
# # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
@@ -225,17 +229,24 @@ class SubHeartflow:
# prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
# prompt += f"记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{self.bot_name},{self.bot_name}指的就是你。"
+ time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
+
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
- collected_info,
+ extra_info_prompt,
+ # prompt_schedule,
relation_prompt_all,
prompt_personality,
current_thinking_info,
+ time_now,
chat_observe_info,
mood_info,
- sender_name,
+ sender_name_sign,
message_txt,
self.bot_name,
)
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
@@ -250,7 +261,7 @@ class SubHeartflow:
logger.info(f"麦麦的思考前脑内状态:{self.current_mind}")
return self.current_mind, self.past_mind
- async def do_thinking_after_reply(self, reply_content, chat_talking_prompt):
+ async def do_thinking_after_reply(self, reply_content, chat_talking_prompt, extra_info):
# print("麦麦回复之后脑袋转起来了")
# 开始构建prompt
@@ -261,6 +272,12 @@ class SubHeartflow:
personality_core = individuality.personality.personality_core
prompt_personality += personality_core
+ extra_info_prompt = ""
+ for tool_name, tool_data in extra_info.items():
+ extra_info_prompt += f"{tool_name} 相关信息:\n"
+ for item in tool_data:
+ extra_info_prompt += f"- {item['name']}: {item['content']}\n"
+
personality_sides = individuality.personality.personality_sides
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
@@ -277,21 +294,22 @@ class SubHeartflow:
message_new_info = chat_talking_prompt
reply_info = reply_content
- # schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False)
+
+ time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
- # prompt = ""
- # # prompt += f"你现在正在做的事情是:{schedule_info}\n"
- # prompt += f"{prompt_personality}\n"
- # prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
- # prompt += f"刚刚你的想法是{current_thinking_info}。"
- # prompt += f"你现在看到了网友们发的新消息:{message_new_info}\n"
- # prompt += f"你刚刚回复了群友们:{reply_info}"
- # prompt += f"你现在{mood_info}"
- # prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白"
- # prompt += "不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,关注你回复的内容,不要思考太多:"
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format(
- prompt_personality, chat_observe_info, current_thinking_info, message_new_info, reply_info, mood_info
+ extra_info_prompt,
+ prompt_personality,
+ time_now,
+ chat_observe_info,
+ current_thinking_info,
+ message_new_info,
+ reply_info,
+ mood_info,
)
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
@@ -305,48 +323,6 @@ class SubHeartflow:
self.last_reply_time = time.time()
- async def judge_willing(self):
- # 开始构建prompt
- prompt_personality = "你"
- # person
- individuality = Individuality.get_instance()
-
- personality_core = individuality.personality.personality_core
- prompt_personality += personality_core
-
- personality_sides = individuality.personality.personality_sides
- random.shuffle(personality_sides)
- prompt_personality += f",{personality_sides[0]}"
-
- identity_detail = individuality.identity.identity_detail
- random.shuffle(identity_detail)
- prompt_personality += f",{identity_detail[0]}"
-
- # print("麦麦闹情绪了1")
- current_thinking_info = self.current_mind
- mood_info = self.current_state.mood
- # print("麦麦闹情绪了2")
- prompt = ""
- prompt += f"{prompt_personality}\n"
- prompt += "现在你正在上网,和qq群里的网友们聊天"
- prompt += f"你现在的想法是{current_thinking_info}。"
- prompt += f"你现在{mood_info}。"
- prompt += "现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。"
- prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
- try:
- response, reasoning_content = await self.llm_model.generate_response_async(prompt)
- # 解析willing值
- willing_match = re.search(r"<(\d+)>", response)
- except Exception as e:
- logger.error(f"意愿判断获取失败: {e}")
- willing_match = None
- if willing_match:
- self.current_state.willing = int(willing_match.group(1))
- else:
- self.current_state.willing = 0
-
- return self.current_state.willing
-
def update_current_mind(self, response):
self.past_mind.append(self.current_mind)
self.current_mind = response
diff --git a/src/plugins/PFC/action_planner.py b/src/plugins/PFC/action_planner.py
index 61afc1bd3..cc904662d 100644
--- a/src/plugins/PFC/action_planner.py
+++ b/src/plugins/PFC/action_planner.py
@@ -24,7 +24,10 @@ class ActionPlanner:
def __init__(self, stream_id: str):
self.llm = LLM_request(
- model=global_config.llm_normal, temperature=0.2, max_tokens=1000, request_type="action_planning"
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=1000,
+ request_type="action_planning",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME
diff --git a/src/plugins/PFC/chat_states.py b/src/plugins/PFC/chat_states.py
index 0253ea6dd..dc7b728b0 100644
--- a/src/plugins/PFC/chat_states.py
+++ b/src/plugins/PFC/chat_states.py
@@ -100,15 +100,15 @@ class NotificationManager:
"""
print(1145145511114445551111444)
if target not in self._handlers:
- print("没11有target")
+ # print("没11有target")
self._handlers[target] = {}
if notification_type not in self._handlers[target]:
- print("没11有notification_type")
+ # print("没11有notification_type")
self._handlers[target][notification_type] = []
- print(self._handlers[target][notification_type])
- print(f"注册1111111111111111111111处理器: {target} {notification_type} {handler}")
+ # print(self._handlers[target][notification_type])
+ # print(f"注册1111111111111111111111处理器: {target} {notification_type} {handler}")
self._handlers[target][notification_type].append(handler)
- print(self._handlers[target][notification_type])
+ # print(self._handlers[target][notification_type])
def unregister_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler):
"""注销通知处理器
diff --git a/src/plugins/PFC/message_sender.py b/src/plugins/PFC/message_sender.py
index 76b07945f..bc4499ed9 100644
--- a/src/plugins/PFC/message_sender.py
+++ b/src/plugins/PFC/message_sender.py
@@ -3,7 +3,8 @@ from src.common.logger import get_module_logger
from ..chat.chat_stream import ChatStream
from ..chat.message import Message
from ..message.message_base import Seg
-from src.plugins.chat.message import MessageSending
+from src.plugins.chat.message import MessageSending, MessageSet
+from src.plugins.chat.message_sender import message_manager
logger = get_module_logger("message_sender")
@@ -39,9 +40,11 @@ class DirectMessageSender:
message_sending = MessageSending(segments=segments)
# 发送消息
- await chat_stream.send_message(message_sending)
- logger.info(f"消息已发送: {content}")
+ message_set = MessageSet(chat_stream, message_sending.message_id)
+ message_set.add_message(message_sending)
+ message_manager.add_message(message_set)
+ logger.info(f"PFC消息已发送: {content}")
except Exception as e:
- logger.error(f"发送消息失败: {str(e)}")
+ logger.error(f"PFC消息发送失败: {str(e)}")
raise
diff --git a/src/plugins/PFC/message_storage.py b/src/plugins/PFC/message_storage.py
index 55bccb14e..b57f5d2b5 100644
--- a/src/plugins/PFC/message_storage.py
+++ b/src/plugins/PFC/message_storage.py
@@ -50,21 +50,18 @@ class MessageStorage(ABC):
class MongoDBMessageStorage(MessageStorage):
"""MongoDB消息存储实现"""
- def __init__(self):
- self.db = db
-
async def get_messages_after(self, chat_id: str, message_time: float) -> List[Dict[str, Any]]:
query = {"chat_id": chat_id}
# print(f"storage_check_message: {message_time}")
query["time"] = {"$gt": message_time}
- return list(self.db.messages.find(query).sort("time", 1))
+ return list(db.messages.find(query).sort("time", 1))
async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
query = {"chat_id": chat_id, "time": {"$lt": time_point}}
- messages = list(self.db.messages.find(query).sort("time", -1).limit(limit))
+ messages = list(db.messages.find(query).sort("time", -1).limit(limit))
# 将消息按时间正序排列
messages.reverse()
@@ -73,7 +70,7 @@ class MongoDBMessageStorage(MessageStorage):
async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
query = {"chat_id": chat_id, "time": {"$gt": after_time}}
- return self.db.messages.find_one(query) is not None
+ return db.messages.find_one(query) is not None
# # 创建一个内存消息存储实现,用于测试
diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py
index 727a8f1ba..a53258888 100644
--- a/src/plugins/PFC/pfc.py
+++ b/src/plugins/PFC/pfc.py
@@ -299,6 +299,12 @@ class DirectMessageSender:
self.logger = get_module_logger("direct_sender")
self.storage = MessageStorage()
+ async def send_via_ws(self, message: MessageSending) -> None:
+ try:
+ await global_api.send_message(message)
+ except Exception as e:
+ raise ValueError(f"未找到平台:{message.message_info.platform} 的url配置,请检查配置文件") from e
+
async def send_message(
self,
chat_stream: ChatStream,
@@ -335,21 +341,22 @@ class DirectMessageSender:
# 处理消息
await message.process()
+ message_json = message.to_dict()
+
# 发送消息
try:
- message_json = message.to_dict()
- end_point = global_config.api_urls.get(chat_stream.platform, None)
-
- if not end_point:
- raise ValueError(f"未找到平台:{chat_stream.platform} 的url配置")
-
- await global_api.send_message_REST(end_point, message_json)
-
- # 存储消息
- await self.storage.store_message(message, message.chat_stream)
-
- self.logger.info(f"直接发送消息成功: {content[:30]}...")
-
+ end_point = global_config.api_urls.get(message.message_info.platform, None)
+ if end_point:
+ # logger.info(f"发送消息到{end_point}")
+ # logger.info(message_json)
+ try:
+ await global_api.send_message_REST(end_point, message_json)
+ except Exception as e:
+ logger.error(f"REST方式发送失败,出现错误: {str(e)}")
+ logger.info("尝试使用ws发送")
+ await self.send_via_ws(message)
+ else:
+ await self.send_via_ws(message)
+ logger.success(f"PFC消息已发送: {content}")
except Exception as e:
- self.logger.error(f"直接发送消息失败: {str(e)}")
- raise
+ logger.error(f"PFC消息发送失败: {str(e)}")
diff --git a/src/plugins/PFC/pfc_KnowledgeFetcher.py b/src/plugins/PFC/pfc_KnowledgeFetcher.py
index b4041bb34..9c5c55076 100644
--- a/src/plugins/PFC/pfc_KnowledgeFetcher.py
+++ b/src/plugins/PFC/pfc_KnowledgeFetcher.py
@@ -13,7 +13,10 @@ class KnowledgeFetcher:
def __init__(self):
self.llm = LLM_request(
- model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="knowledge_fetch"
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=1000,
+ request_type="knowledge_fetch",
)
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py
index e65b64014..85a067d23 100644
--- a/src/plugins/PFC/reply_generator.py
+++ b/src/plugins/PFC/reply_generator.py
@@ -16,7 +16,10 @@ class ReplyGenerator:
def __init__(self, stream_id: str):
self.llm = LLM_request(
- model=global_config.llm_normal, temperature=0.2, max_tokens=300, request_type="reply_generation"
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=300,
+ request_type="reply_generation",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME
diff --git a/src/plugins/chat/__init__.py b/src/plugins/chat/__init__.py
index 6d2455202..8d9aa1f8e 100644
--- a/src/plugins/chat/__init__.py
+++ b/src/plugins/chat/__init__.py
@@ -3,7 +3,6 @@ from ..person_info.relationship_manager import relationship_manager
from .chat_stream import chat_manager
from .message_sender import message_manager
from ..storage.storage import MessageStorage
-from .auto_speak import auto_speak_manager
__all__ = [
@@ -12,5 +11,4 @@ __all__ = [
"chat_manager",
"message_manager",
"MessageStorage",
- "auto_speak_manager",
]
diff --git a/src/plugins/chat/auto_speak.py b/src/plugins/chat/auto_speak.py
deleted file mode 100644
index ac76a2714..000000000
--- a/src/plugins/chat/auto_speak.py
+++ /dev/null
@@ -1,184 +0,0 @@
-import time
-import asyncio
-import random
-from random import random as random_float
-from typing import Dict
-from ..config.config import global_config
-from .message import MessageSending, MessageThinking, MessageSet, MessageRecv
-from ..message.message_base import UserInfo, Seg
-from .message_sender import message_manager
-from ..moods.moods import MoodManager
-from ..chat_module.reasoning_chat.reasoning_generator import ResponseGenerator
-from src.common.logger import get_module_logger
-from src.heart_flow.heartflow import heartflow
-from ...common.database import db
-
-logger = get_module_logger("auto_speak")
-
-
-class AutoSpeakManager:
- def __init__(self):
- self._last_auto_speak_time: Dict[str, float] = {} # 记录每个聊天流上次自主发言的时间
- self.mood_manager = MoodManager.get_instance()
- self.gpt = ResponseGenerator() # 添加gpt实例
- self._started = False
- self._check_task = None
- self.db = db
-
- async def get_chat_info(self, chat_id: str) -> dict:
- """从数据库获取聊天流信息"""
- chat_info = await self.db.chat_streams.find_one({"stream_id": chat_id})
- return chat_info
-
- async def start_auto_speak_check(self):
- """启动自动发言检查任务"""
- if not self._started:
- self._check_task = asyncio.create_task(self._periodic_check())
- self._started = True
- logger.success("自动发言检查任务已启动")
-
- async def _periodic_check(self):
- """定期检查是否需要自主发言"""
- while True and global_config.enable_think_flow:
- # 获取所有活跃的子心流
- active_subheartflows = []
- for chat_id, subheartflow in heartflow._subheartflows.items():
- if (
- subheartflow.is_active and subheartflow.current_state.willing > 0
- ): # 只考虑活跃且意愿值大于0.5的子心流
- active_subheartflows.append((chat_id, subheartflow))
- logger.debug(
- f"发现活跃子心流 - 聊天ID: {chat_id}, 意愿值: {subheartflow.current_state.willing:.2f}"
- )
-
- if not active_subheartflows:
- logger.debug("当前没有活跃的子心流")
- await asyncio.sleep(20) # 添加异步等待
- continue
-
- # 随机选择一个活跃的子心流
- chat_id, subheartflow = random.choice(active_subheartflows)
- logger.info(f"随机选择子心流 - 聊天ID: {chat_id}, 意愿值: {subheartflow.current_state.willing:.2f}")
-
- # 检查是否应该自主发言
- if await self.check_auto_speak(subheartflow):
- logger.info(f"准备自主发言 - 聊天ID: {chat_id}")
- # 生成自主发言
- bot_user_info = UserInfo(
- user_id=global_config.BOT_QQ,
- user_nickname=global_config.BOT_NICKNAME,
- platform="qq", # 默认使用qq平台
- )
-
- # 创建一个空的MessageRecv对象作为上下文
- message = MessageRecv(
- {
- "message_info": {
- "user_info": {"user_id": chat_id, "user_nickname": "", "platform": "qq"},
- "group_info": None,
- "platform": "qq",
- "time": time.time(),
- },
- "processed_plain_text": "",
- "raw_message": "",
- "is_emoji": False,
- }
- )
-
- await self.generate_auto_speak(
- subheartflow, message, bot_user_info, message.message_info["user_info"], message.message_info
- )
- else:
- logger.debug(f"不满足自主发言条件 - 聊天ID: {chat_id}")
-
- # 每分钟检查一次
- await asyncio.sleep(20)
-
- # await asyncio.sleep(5) # 发生错误时等待5秒再继续
-
- async def check_auto_speak(self, subheartflow) -> bool:
- """检查是否应该自主发言"""
- if not subheartflow:
- return False
-
- current_time = time.time()
- chat_id = subheartflow.observe_chat_id
-
- # 获取上次自主发言时间
- if chat_id not in self._last_auto_speak_time:
- self._last_auto_speak_time[chat_id] = 0
- last_speak_time = self._last_auto_speak_time.get(chat_id, 0)
-
- # 如果距离上次自主发言不到5分钟,不发言
- if current_time - last_speak_time < 30:
- logger.debug(
- f"距离上次发言时间太短 - 聊天ID: {chat_id}, 剩余时间: {30 - (current_time - last_speak_time):.1f}秒"
- )
- return False
-
- # 获取当前意愿值
- current_willing = subheartflow.current_state.willing
-
- if current_willing > 0.1 and random_float() < 0.5:
- self._last_auto_speak_time[chat_id] = current_time
- logger.info(f"满足自主发言条件 - 聊天ID: {chat_id}, 意愿值: {current_willing:.2f}")
- return True
-
- logger.debug(f"不满足自主发言条件 - 聊天ID: {chat_id}, 意愿值: {current_willing:.2f}")
- return False
-
- async def generate_auto_speak(self, subheartflow, message, bot_user_info: UserInfo, userinfo, messageinfo):
- """生成自主发言内容"""
- thinking_time_point = round(time.time(), 2)
- think_id = "mt" + str(thinking_time_point)
- thinking_message = MessageThinking(
- message_id=think_id,
- chat_stream=None, # 不需要chat_stream
- bot_user_info=bot_user_info,
- reply=message,
- thinking_start_time=thinking_time_point,
- )
-
- message_manager.add_message(thinking_message)
-
- # 生成自主发言内容
- try:
- response, raw_content = await self.gpt.generate_response(message)
- except Exception as e:
- logger.error(f"生成自主发言内容时发生错误: {e}")
- return False
-
- if response:
- message_set = MessageSet(None, think_id) # 不需要chat_stream
- mark_head = False
-
- for msg in response:
- message_segment = Seg(type="text", data=msg)
- bot_message = MessageSending(
- message_id=think_id,
- chat_stream=None, # 不需要chat_stream
- bot_user_info=bot_user_info,
- sender_info=userinfo,
- message_segment=message_segment,
- reply=message,
- is_head=not mark_head,
- is_emoji=False,
- thinking_start_time=thinking_time_point,
- )
- if not mark_head:
- mark_head = True
- message_set.add_message(bot_message)
-
- message_manager.add_message(message_set)
-
- # 更新情绪和关系
- stance, emotion = await self.gpt._get_emotion_tags(raw_content, message.processed_plain_text)
- self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
-
- return True
-
- return False
-
-
-# 创建全局AutoSpeakManager实例
-auto_speak_manager = AutoSpeakManager()
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index c2126eee2..3dc732274 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -38,7 +38,7 @@ class ChatBot:
async def _ensure_started(self):
"""确保所有任务已启动"""
if not self._started:
- logger.info("确保ChatBot所有任务已启动")
+ logger.trace("确保ChatBot所有任务已启动")
self._started = True
@@ -65,10 +65,6 @@ class ChatBot:
- 没有思维流相关的状态管理
- 更简单直接的回复逻辑
- 3. pfc_chatting模式:仅进行消息处理
- - 不进行任何回复
- - 只处理和存储消息
-
所有模式都包含:
- 消息过滤
- 记忆激活
@@ -84,7 +80,7 @@ class ChatBot:
message = MessageRecv(message_data)
groupinfo = message.message_info.group_info
userinfo = message.message_info.user_info
- logger.debug(f"处理消息:{str(message_data)[:120]}...")
+ logger.trace(f"处理消息:{str(message_data)[:120]}...")
if userinfo.user_id in global_config.ban_user_id:
logger.debug(f"用户{userinfo.user_id}被禁止回复")
diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py
index 5dc688c03..9f55b5741 100644
--- a/src/plugins/chat/message.py
+++ b/src/plugins/chat/message.py
@@ -142,14 +142,18 @@ class MessageRecv(Message):
def _generate_detailed_text(self) -> str:
"""生成详细文本,包含时间和用户信息"""
- time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ time = self.message_info.time
user_info = self.message_info.user_info
+ # name = (
+ # f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
+ # if user_info.user_cardname != None
+ # else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ # )
name = (
- f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
- if user_info.user_cardname != None
- else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
)
- return f"[{time_str}] {name}: {self.processed_plain_text}\n"
+ return f"[{time}] {name}: {self.processed_plain_text}\n"
@dataclass
@@ -239,14 +243,18 @@ class MessageProcessBase(Message):
def _generate_detailed_text(self) -> str:
"""生成详细文本,包含时间和用户信息"""
- time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ time = self.message_info.time
user_info = self.message_info.user_info
+ # name = (
+ # f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
+ # if user_info.user_cardname != None
+ # else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ # )
name = (
- f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
- if user_info.user_cardname != None
- else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
)
- return f"[{time_str}] {name}: {self.processed_plain_text}\n"
+ return f"[{time}] {name}: {self.processed_plain_text}\n"
@dataclass
diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py
index f62e015b4..21e490433 100644
--- a/src/plugins/chat/message_buffer.py
+++ b/src/plugins/chat/message_buffer.py
@@ -153,11 +153,11 @@ class MessageBuffer:
# 更新当前消息的processed_plain_text
if combined_text and combined_text[0] != message.processed_plain_text and is_update:
if type == "text":
- message.processed_plain_text = "".join(combined_text)
+ message.processed_plain_text = ",".join(combined_text)
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容到当前消息")
elif type == "emoji":
combined_text.pop()
- message.processed_plain_text = "".join(combined_text)
+ message.processed_plain_text = ",".join(combined_text)
message.is_emoji = False
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容,覆盖当前emoji消息")
diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py
index 9f547ed10..c223bbe4d 100644
--- a/src/plugins/chat/message_sender.py
+++ b/src/plugins/chat/message_sender.py
@@ -70,9 +70,9 @@ class Message_Sender:
thinking_start_time=message.thinking_start_time,
is_emoji=message.is_emoji,
)
- logger.debug(f"{message.processed_plain_text},{typing_time},计算输入时间结束")
+ logger.trace(f"{message.processed_plain_text},{typing_time},计算输入时间结束")
await asyncio.sleep(typing_time)
- logger.debug(f"{message.processed_plain_text},{typing_time},等待输入时间结束")
+ logger.trace(f"{message.processed_plain_text},{typing_time},等待输入时间结束")
message_json = message.to_dict()
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 5c0c4df8d..0172289ff 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -334,27 +334,35 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]:
- # processed_response = process_text_with_typos(content)
- # 对西文字符段落的回复长度设置为汉字字符的两倍
- max_length = global_config.response_max_length
+ # 提取被 () 或 [] 包裹的内容
+ pattern = re.compile(r"[\(\[].*?[\)\]]")
+ _extracted_contents = pattern.findall(text)
+ # 去除 () 和 [] 及其包裹的内容
+ cleaned_text = pattern.sub("", text)
+ logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
+
+ # 对清理后的文本进行进一步处理
+ max_length = global_config.response_max_length * 2
max_sentence_num = global_config.response_max_sentence_num
- if len(text) > max_length and not is_western_paragraph(text):
- logger.warning(f"回复过长 ({len(text)} 字符),返回默认回复")
+ if len(cleaned_text) > max_length and not is_western_paragraph(cleaned_text):
+ logger.warning(f"回复过长 ({len(cleaned_text)} 字符),返回默认回复")
return ["懒得说"]
- elif len(text) > 200:
- logger.warning(f"回复过长 ({len(text)} 字符),返回默认回复")
+ elif len(cleaned_text) > 200:
+ logger.warning(f"回复过长 ({len(cleaned_text)} 字符),返回默认回复")
return ["懒得说"]
- # 处理长消息
+
typo_generator = ChineseTypoGenerator(
error_rate=global_config.chinese_typo_error_rate,
min_freq=global_config.chinese_typo_min_freq,
tone_error_rate=global_config.chinese_typo_tone_error_rate,
word_replace_rate=global_config.chinese_typo_word_replace_rate,
)
- if global_config.enable_response_spliter:
- split_sentences = split_into_sentences_w_remove_punctuation(text)
+
+ if global_config.enable_response_splitter:
+ split_sentences = split_into_sentences_w_remove_punctuation(cleaned_text)
else:
- split_sentences = [text]
+ split_sentences = [cleaned_text]
+
sentences = []
for sentence in split_sentences:
if global_config.chinese_typo_enable:
@@ -364,12 +372,13 @@ def process_llm_response(text: str) -> List[str]:
sentences.append(typo_corrections)
else:
sentences.append(sentence)
- # 检查分割后的消息数量是否过多(超过3条)
if len(sentences) > max_sentence_num:
logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
return [f"{global_config.BOT_NICKNAME}不知道哦"]
+ # sentences.extend(extracted_contents)
+
return sentences
@@ -630,3 +639,141 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
except Exception as e:
logger.error(f"计算消息数量时出错: {str(e)}")
return 0, 0
+
+
+def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
+ """将时间戳转换为人类可读的时间格式
+
+ Args:
+ timestamp: 时间戳
+ mode: 转换模式,"normal"为标准格式,"relative"为相对时间格式
+
+ Returns:
+ str: 格式化后的时间字符串
+ """
+ if mode == "normal":
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
+ elif mode == "relative":
+ now = time.time()
+ diff = now - timestamp
+
+ if diff < 20:
+ return "刚刚:"
+ elif diff < 60:
+ return f"{int(diff)}秒前:"
+ elif diff < 1800:
+ return f"{int(diff / 60)}分钟前:"
+ elif diff < 3600:
+ return f"{int(diff / 60)}分钟前:\n"
+ elif diff < 86400:
+ return f"{int(diff / 3600)}小时前:\n"
+ elif diff < 604800:
+ return f"{int(diff / 86400)}天前:\n"
+ else:
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":"
+
+def parse_text_timestamps(text: str, mode: str = "normal") -> str:
+ """解析文本中的时间戳并转换为可读时间格式
+
+ Args:
+ text: 包含时间戳的文本,时间戳应以[]包裹
+ mode: 转换模式,传递给translate_timestamp_to_human_readable,"normal"或"relative"
+
+ Returns:
+ str: 替换后的文本
+
+ 转换规则:
+ - normal模式: 将文本中所有时间戳转换为可读格式
+ - lite模式:
+ - 第一个和最后一个时间戳必须转换
+ - 以5秒为间隔划分时间段,每段最多转换一个时间戳
+ - 不转换的时间戳替换为空字符串
+ """
+ # 匹配[数字]或[数字.数字]格式的时间戳
+ pattern = r'\[(\d+(?:\.\d+)?)\]'
+
+ # 找出所有匹配的时间戳
+ matches = list(re.finditer(pattern, text))
+
+ if not matches:
+ return text
+
+ # normal模式: 直接转换所有时间戳
+ if mode == "normal":
+ result_text = text
+ for match in matches:
+ timestamp = float(match.group(1))
+ readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
+ # 由于替换会改变文本长度,需要使用正则替换而非直接替换
+ pattern_instance = re.escape(match.group(0))
+ result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
+ return result_text
+ else:
+ # lite模式: 按5秒间隔划分并选择性转换
+ result_text = text
+
+ # 提取所有时间戳及其位置
+ timestamps = [(float(m.group(1)), m) for m in matches]
+ timestamps.sort(key=lambda x: x[0]) # 按时间戳升序排序
+
+ if not timestamps:
+ return text
+
+ # 获取第一个和最后一个时间戳
+ first_timestamp, first_match = timestamps[0]
+ last_timestamp, last_match = timestamps[-1]
+
+ # 将时间范围划分成5秒间隔的时间段
+ time_segments = {}
+
+ # 对所有时间戳按15秒间隔分组
+ for ts, match in timestamps:
+ segment_key = int(ts // 15) # 将时间戳除以15取整,作为时间段的键
+ if segment_key not in time_segments:
+ time_segments[segment_key] = []
+ time_segments[segment_key].append((ts, match))
+
+ # 记录需要转换的时间戳
+ to_convert = []
+
+ # 从每个时间段中选择一个时间戳进行转换
+ for segment, segment_timestamps in time_segments.items():
+ # 选择这个时间段中的第一个时间戳
+ to_convert.append(segment_timestamps[0])
+
+ # 确保第一个和最后一个时间戳在转换列表中
+ first_in_list = False
+ last_in_list = False
+
+ for ts, match in to_convert:
+ if ts == first_timestamp:
+ first_in_list = True
+ if ts == last_timestamp:
+ last_in_list = True
+
+ if not first_in_list:
+ to_convert.append((first_timestamp, first_match))
+ if not last_in_list:
+ to_convert.append((last_timestamp, last_match))
+
+ # 创建需要转换的时间戳集合,用于快速查找
+ to_convert_set = {match.group(0) for _, match in to_convert}
+
+ # 首先替换所有不需要转换的时间戳为空字符串
+ for ts, match in timestamps:
+ if match.group(0) not in to_convert_set:
+ pattern_instance = re.escape(match.group(0))
+ result_text = re.sub(pattern_instance, "", result_text, count=1)
+
+ # 按照时间戳原始顺序排序,避免替换时位置错误
+ to_convert.sort(key=lambda x: x[1].start())
+
+ # 执行替换
+ # 由于替换会改变文本长度,从后向前替换
+ to_convert.reverse()
+ for ts, match in to_convert:
+ readable_time = translate_timestamp_to_human_readable(ts, "relative")
+ pattern_instance = re.escape(match.group(0))
+ result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
+
+ return result_text
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index eea1cc8b8..2ce218a6f 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -19,6 +19,7 @@ from ...chat.chat_stream import chat_manager
from ...person_info.relationship_manager import relationship_manager
from ...chat.message_buffer import message_buffer
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+from ...utils.timer_calculater import Timer
# 定义日志配置
chat_config = LogConfig(
@@ -173,12 +174,10 @@ class ReasoningChat:
await self.storage.store_message(message, chat)
# 记忆激活
- timer1 = time.time()
- interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
- message.processed_plain_text, fast_retrieval=True
- )
- timer2 = time.time()
- timing_results["记忆激活"] = timer2 - timer1
+ with Timer("记忆激活", timing_results):
+ interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
+ message.processed_plain_text, fast_retrieval=True
+ )
# 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message)
@@ -228,10 +227,8 @@ class ReasoningChat:
await willing_manager.before_generate_reply_handle(message.message_info.message_id)
# 创建思考消息
- timer1 = time.time()
- thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
- timer2 = time.time()
- timing_results["创建思考消息"] = timer2 - timer1
+ with Timer("创建思考消息", timing_results):
+ thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
logger.debug(f"创建捕捉器,thinking_id:{thinking_id}")
@@ -239,11 +236,9 @@ class ReasoningChat:
info_catcher.catch_decide_to_response(message)
# 生成回复
- timer1 = time.time()
try:
- response_set = await self.gpt.generate_response(message, thinking_id)
- timer2 = time.time()
- timing_results["生成回复"] = timer2 - timer1
+ with Timer("生成回复", timing_results):
+ response_set = await self.gpt.generate_response(message, thinking_id)
info_catcher.catch_after_generate_response(timing_results["生成回复"])
except Exception as e:
@@ -255,26 +250,20 @@ class ReasoningChat:
return
# 发送消息
- timer1 = time.time()
- first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
- timer2 = time.time()
- timing_results["发送消息"] = timer2 - timer1
+ with Timer("发送消息", timing_results):
+ first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
info_catcher.done_catch()
# 处理表情包
- timer1 = time.time()
- await self._handle_emoji(message, chat, response_set)
- timer2 = time.time()
- timing_results["处理表情包"] = timer2 - timer1
+ with Timer("处理表情包", timing_results):
+ await self._handle_emoji(message, chat, response_set)
# 更新关系情绪
- timer1 = time.time()
- await self._update_relationship(message, response_set)
- timer2 = time.time()
- timing_results["更新关系情绪"] = timer2 - timer1
+ with Timer("更新关系情绪", timing_results):
+ await self._update_relationship(message, response_set)
# 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
index 83abe71cf..46602b5d7 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
@@ -1,4 +1,3 @@
-import time
from typing import List, Optional, Tuple, Union
import random
@@ -7,6 +6,7 @@ from ...config.config import global_config
from ...chat.message import MessageThinking
from .reasoning_prompt_builder import prompt_builder
from ...chat.utils import process_llm_response
+from ...utils.timer_calculater import Timer
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
@@ -29,7 +29,10 @@ class ResponseGenerator:
request_type="response_reasoning",
)
self.model_normal = LLM_request(
- model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_reasoning"
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=256,
+ request_type="response_reasoning",
)
self.model_sum = LLM_request(
@@ -82,15 +85,14 @@ class ResponseGenerator:
logger.debug("开始使用生成回复-2")
# 构建prompt
- timer1 = time.time()
- prompt = await prompt_builder._build_prompt(
- message.chat_stream,
- message_txt=message.processed_plain_text,
- sender_name=sender_name,
- stream_id=message.chat_stream.stream_id,
- )
- timer2 = time.time()
- logger.info(f"构建prompt时间: {timer2 - timer1}秒")
+ with Timer() as t_build_prompt:
+ prompt = await prompt_builder._build_prompt(
+ message.chat_stream,
+ message_txt=message.processed_plain_text,
+ sender_name=sender_name,
+ stream_id=message.chat_stream.stream_id,
+ )
+ logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py
index 5c356af7f..4bdc307b7 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_prompt_builder.py
@@ -175,7 +175,7 @@ class PromptBuilder:
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。"""
- logger.info("开始构建prompt")
+ logger.debug("开始构建prompt")
# prompt = f"""
# {relation_prompt_all}
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
index 34c9860f0..1e8e844eb 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
@@ -20,6 +20,8 @@ from ...chat.chat_stream import chat_manager
from ...person_info.relationship_manager import relationship_manager
from ...chat.message_buffer import message_buffer
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+from ...utils.timer_calculater import Timer
+from src.do_tool.tool_use import ToolUser
# 定义日志配置
chat_config = LogConfig(
@@ -36,6 +38,7 @@ class ThinkFlowChat:
self.gpt = ResponseGenerator()
self.mood_manager = MoodManager.get_instance()
self.mood_manager.start_mood_update()
+ self.tool_user = ToolUser()
async def _create_thinking_message(self, message, chat, userinfo, messageinfo):
"""创建思考消息"""
@@ -105,49 +108,35 @@ class ThinkFlowChat:
message_manager.add_message(message_set)
return first_bot_msg
- async def _handle_emoji(self, message, chat, response):
+ async def _handle_emoji(self, message, chat, response, send_emoji=""):
"""处理表情包"""
- if random() < global_config.emoji_chance:
+ if send_emoji:
+ emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji)
+ else:
emoji_raw = await emoji_manager.get_emoji_for_text(response)
- # print("11111111111111")
- # logger.info(emoji_raw)
- if emoji_raw:
- emoji_path, description = emoji_raw
- emoji_cq = image_path_to_base64(emoji_path)
+ if emoji_raw:
+ emoji_path, description = emoji_raw
+ emoji_cq = image_path_to_base64(emoji_path)
- # logger.info(emoji_cq)
+ thinking_time_point = round(message.message_info.time, 2)
- thinking_time_point = round(message.message_info.time, 2)
-
- message_segment = Seg(type="emoji", data=emoji_cq)
- bot_message = MessageSending(
- message_id="mt" + str(thinking_time_point),
- chat_stream=chat,
- bot_user_info=UserInfo(
- user_id=global_config.BOT_QQ,
- user_nickname=global_config.BOT_NICKNAME,
- platform=message.message_info.platform,
- ),
- sender_info=message.message_info.user_info,
- message_segment=message_segment,
- reply=message,
- is_head=False,
- is_emoji=True,
- )
-
- # logger.info("22222222222222")
- message_manager.add_message(bot_message)
-
- async def _update_using_response(self, message, response_set):
- """更新心流状态"""
- stream_id = message.chat_stream.stream_id
- chat_talking_prompt = ""
- if stream_id:
- chat_talking_prompt = get_recent_group_detailed_plain_text(
- stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
+ message_segment = Seg(type="emoji", data=emoji_cq)
+ bot_message = MessageSending(
+ message_id="mt" + str(thinking_time_point),
+ chat_stream=chat,
+ bot_user_info=UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=message.message_info.platform,
+ ),
+ sender_info=message.message_info.user_info,
+ message_segment=message_segment,
+ reply=message,
+ is_head=False,
+ is_emoji=True,
)
- await heartflow.get_subheartflow(stream_id).do_thinking_after_reply(response_set, chat_talking_prompt)
+ message_manager.add_message(bot_message)
async def _update_relationship(self, message: MessageRecv, response_set):
"""更新关系情绪"""
@@ -183,26 +172,24 @@ class ThinkFlowChat:
heartflow.create_subheartflow(chat.stream_id)
await message.process()
- logger.debug(f"消息处理成功{message.processed_plain_text}")
+ logger.trace(f"消息处理成功{message.processed_plain_text}")
# 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
message.raw_message, chat, userinfo
):
return
- logger.debug(f"过滤词/正则表达式过滤成功{message.processed_plain_text}")
+ logger.trace(f"过滤词/正则表达式过滤成功{message.processed_plain_text}")
await self.storage.store_message(message, chat)
- logger.debug(f"存储成功{message.processed_plain_text}")
+ logger.trace(f"存储成功{message.processed_plain_text}")
# 记忆激活
- timer1 = time.time()
- interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
- message.processed_plain_text, fast_retrieval=True
- )
- timer2 = time.time()
- timing_results["记忆激活"] = timer2 - timer1
- logger.debug(f"记忆激活: {interested_rate}")
+ with Timer("记忆激活", timing_results):
+ interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
+ message.processed_plain_text, fast_retrieval=True
+ )
+ logger.trace(f"记忆激活: {interested_rate}")
# 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message)
@@ -225,13 +212,6 @@ class ThinkFlowChat:
logger.info("触发缓冲,已炸飞消息列")
return
- # 计算回复意愿
- # current_willing_old = willing_manager.get_willing(chat_stream=chat)
- # # current_willing_new = (heartflow.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4
- # # current_willing = (current_willing_old + current_willing_new) / 2
- # # 有点bug
- # current_willing = current_willing_old
-
# 获取回复概率
is_willing = False
if reply_probability != 1:
@@ -255,6 +235,7 @@ class ThinkFlowChat:
do_reply = False
if random() < reply_probability:
try:
+
do_reply = True
# 回复前处理
@@ -262,49 +243,110 @@ class ThinkFlowChat:
# 创建思考消息
try:
- timer1 = time.time()
- thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
- timer2 = time.time()
- timing_results["创建思考消息"] = timer2 - timer1
+ with Timer("创建思考消息", timing_results):
+ thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
except Exception as e:
logger.error(f"心流创建思考消息失败: {e}")
- logger.debug(f"创建捕捉器,thinking_id:{thinking_id}")
+ logger.trace(f"创建捕捉器,thinking_id:{thinking_id}")
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
info_catcher.catch_decide_to_response(message)
+ # 观察
try:
- # 观察
- timer1 = time.time()
- await heartflow.get_subheartflow(chat.stream_id).do_observe()
- timer2 = time.time()
- timing_results["观察"] = timer2 - timer1
+ with Timer("观察", timing_results):
+ await heartflow.get_subheartflow(chat.stream_id).do_observe()
except Exception as e:
logger.error(f"心流观察失败: {e}")
+ logger.error(traceback.format_exc())
info_catcher.catch_after_observe(timing_results["观察"])
+ # 思考前使用工具
+ update_relationship = ""
+ get_mid_memory_id = []
+ tool_result_info = {}
+ send_emoji = ""
+ try:
+ with Timer("思考前使用工具", timing_results):
+ tool_result = await self.tool_user.use_tool(
+ message.processed_plain_text,
+ message.message_info.user_info.user_nickname,
+ chat,
+ heartflow.get_subheartflow(chat.stream_id),
+ )
+ # 如果工具被使用且获得了结果,将收集到的信息合并到思考中
+ # collected_info = ""
+ if tool_result.get("used_tools", False):
+ if "structured_info" in tool_result:
+ tool_result_info = tool_result["structured_info"]
+ # collected_info = ""
+ get_mid_memory_id = []
+ update_relationship = ""
+
+ # 动态解析工具结果
+ for tool_name, tool_data in tool_result_info.items():
+ # tool_result_info += f"\n{tool_name} 相关信息:\n"
+ # for item in tool_data:
+ # tool_result_info += f"- {item['name']}: {item['content']}\n"
+
+ # 特殊判定:mid_chat_mem
+ if tool_name == "mid_chat_mem":
+ for mid_memory in tool_data:
+ get_mid_memory_id.append(mid_memory["content"])
+
+ # 特殊判定:change_mood
+ if tool_name == "change_mood":
+ for mood in tool_data:
+ self.mood_manager.update_mood_from_emotion(
+ mood["content"], global_config.mood_intensity_factor
+ )
+
+ # 特殊判定:change_relationship
+ if tool_name == "change_relationship":
+ update_relationship = tool_data[0]["content"]
+
+ if tool_name == "send_emoji":
+ send_emoji = tool_data[0]["content"]
+
+ except Exception as e:
+ logger.error(f"思考前工具调用失败: {e}")
+ logger.error(traceback.format_exc())
+
+ # 处理关系更新
+ if update_relationship:
+ stance, emotion = await self.gpt._get_emotion_tags_with_reason(
+ "你还没有回复", message.processed_plain_text, update_relationship
+ )
+ await relationship_manager.calculate_update_relationship_value(
+ chat_stream=message.chat_stream, label=emotion, stance=stance
+ )
+
# 思考前脑内状态
try:
- timer1 = time.time()
- current_mind, past_mind = await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(
- message_txt=message.processed_plain_text,
- sender_name=message.message_info.user_info.user_nickname,
- chat_stream=chat,
- )
- timer2 = time.time()
- timing_results["思考前脑内状态"] = timer2 - timer1
+ with Timer("思考前脑内状态", timing_results):
+ current_mind, past_mind = await heartflow.get_subheartflow(
+ chat.stream_id
+ ).do_thinking_before_reply(
+ message_txt=message.processed_plain_text,
+ sender_info=message.message_info.user_info,
+ chat_stream=chat,
+ obs_id=get_mid_memory_id,
+ extra_info=tool_result_info,
+ )
except Exception as e:
logger.error(f"心流思考前脑内状态失败: {e}")
+ logger.error(traceback.format_exc())
+ # 确保变量被定义,即使在错误情况下
+ current_mind = ""
+ past_mind = ""
info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"], past_mind, current_mind)
# 生成回复
- timer1 = time.time()
- response_set = await self.gpt.generate_response(message, thinking_id)
- timer2 = time.time()
- timing_results["生成回复"] = timer2 - timer1
+ with Timer("生成回复", timing_results):
+ response_set = await self.gpt.generate_response(message, thinking_id)
info_catcher.catch_after_generate_response(timing_results["生成回复"])
@@ -314,10 +356,8 @@ class ThinkFlowChat:
# 发送消息
try:
- timer1 = time.time()
- first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
- timer2 = time.time()
- timing_results["发送消息"] = timer2 - timer1
+ with Timer("发送消息", timing_results):
+ first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
except Exception as e:
logger.error(f"心流发送消息失败: {e}")
@@ -327,33 +367,69 @@ class ThinkFlowChat:
# 处理表情包
try:
- timer1 = time.time()
- await self._handle_emoji(message, chat, response_set)
- timer2 = time.time()
- timing_results["处理表情包"] = timer2 - timer1
+ with Timer("处理表情包", timing_results):
+ if global_config.emoji_chance == 1:
+ if send_emoji:
+ logger.info(f"麦麦决定发送表情包{send_emoji}")
+ await self._handle_emoji(message, chat, response_set, send_emoji)
+ else:
+ if random() < global_config.emoji_chance:
+ await self._handle_emoji(message, chat, response_set)
except Exception as e:
logger.error(f"心流处理表情包失败: {e}")
- # 更新心流
+ # 思考后脑内状态更新
try:
- timer1 = time.time()
- await self._update_using_response(message, response_set)
- timer2 = time.time()
- timing_results["更新心流"] = timer2 - timer1
- except Exception as e:
- logger.error(f"心流更新失败: {e}")
+ with Timer("思考后脑内状态更新", timing_results):
+ stream_id = message.chat_stream.stream_id
+ chat_talking_prompt = ""
+ if stream_id:
+ chat_talking_prompt = get_recent_group_detailed_plain_text(
+ stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
+ )
- # 更新关系情绪
- try:
- timer1 = time.time()
- await self._update_relationship(message, response_set)
- timer2 = time.time()
- timing_results["更新关系情绪"] = timer2 - timer1
+ await heartflow.get_subheartflow(stream_id).do_thinking_after_reply(
+ response_set, chat_talking_prompt, tool_result_info
+ )
except Exception as e:
- logger.error(f"心流更新关系情绪失败: {e}")
+ logger.error(f"心流思考后脑内状态更新失败: {e}")
+ logger.error(traceback.format_exc())
# 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
+
+ # 处理认识关系
+ try:
+ is_known = await relationship_manager.is_known_some_one(
+ message.message_info.platform,
+ message.message_info.user_info.user_id
+ )
+ if not is_known:
+ logger.info(f"首次认识用户: {message.message_info.user_info.user_nickname}")
+ await relationship_manager.first_knowing_some_one(
+ message.message_info.platform,
+ message.message_info.user_info.user_id,
+ message.message_info.user_info.user_nickname,
+ message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
+ ""
+ )
+ else:
+ logger.debug(f"已认识用户: {message.message_info.user_info.user_nickname}")
+ if not await relationship_manager.is_qved_name(
+ message.message_info.platform,
+ message.message_info.user_info.user_id
+ ):
+ logger.info(f"更新已认识但未取名的用户: {message.message_info.user_info.user_nickname}")
+ await relationship_manager.first_knowing_some_one(
+ message.message_info.platform,
+ message.message_info.user_info.user_id,
+ message.message_info.user_info.user_nickname,
+ message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
+ ""
+ )
+ except Exception as e:
+ logger.error(f"处理认识关系失败: {e}")
+ logger.error(traceback.format_exc())
except Exception as e:
logger.error(f"心流处理消息失败: {e}")
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
index df55ad80b..6f6c8bf26 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
@@ -1,4 +1,3 @@
-import time
from typing import List, Optional
import random
@@ -10,6 +9,7 @@ from .think_flow_prompt_builder import prompt_builder
from ...chat.utils import process_llm_response
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+from ...utils.timer_calculater import Timer
from src.plugins.moods.moods import MoodManager
@@ -26,7 +26,10 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
self.model_normal = LLM_request(
- model=global_config.llm_normal, temperature=0.15, max_tokens=256, request_type="response_heartflow"
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=256,
+ request_type="response_heartflow",
)
self.model_sum = LLM_request(
@@ -44,41 +47,44 @@ class ResponseGenerator:
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
- time1 = time.time()
-
- checked = False
- if random.random() > 0:
+ with Timer() as t_generate_response:
checked = False
- current_model = self.model_normal
- current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
- model_response = await self._generate_response_with_model(
- message, current_model, thinking_id, mode="normal"
- )
+ if random.random() > 0:
+ checked = False
+ current_model = self.model_normal
+ current_model.temperature = (
+ global_config.llm_normal["temp"] * arousal_multiplier
+ ) # 激活度越高,温度越高
+ model_response = await self._generate_response_with_model(
+ message, current_model, thinking_id, mode="normal"
+ )
- model_checked_response = model_response
- else:
- checked = True
- current_model = self.model_normal
- current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
- print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
- model_response = await self._generate_response_with_model(
- message, current_model, thinking_id, mode="simple"
- )
+ model_checked_response = model_response
+ else:
+ checked = True
+ current_model = self.model_normal
+ current_model.temperature = (
+ global_config.llm_normal["temp"] * arousal_multiplier
+ ) # 激活度越高,温度越高
+ print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
+ model_response = await self._generate_response_with_model(
+ message, current_model, thinking_id, mode="simple"
+ )
- current_model.temperature = 0.3
- model_checked_response = await self._check_response_with_model(
- message, model_response, current_model, thinking_id
- )
-
- time2 = time.time()
+ current_model.temperature = global_config.llm_normal["temp"]
+ model_checked_response = await self._check_response_with_model(
+ message, model_response, current_model, thinking_id
+ )
if model_response:
if checked:
logger.info(
- f"{global_config.BOT_NICKNAME}的回复是:{model_response},思忖后,回复是:{model_checked_response},生成回复时间: {time2 - time1}秒"
+ f"{global_config.BOT_NICKNAME}的回复是:{model_response},思忖后,回复是:{model_checked_response},生成回复时间: {t_generate_response.human_readable}"
)
else:
- logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response},生成回复时间: {time2 - time1}秒")
+ logger.info(
+ f"{global_config.BOT_NICKNAME}的回复是:{model_response},生成回复时间: {t_generate_response.human_readable}"
+ )
model_processed_response = await self._process_response(model_checked_response)
@@ -94,34 +100,28 @@ class ResponseGenerator:
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
- if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
- sender_name = (
- f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
- f"{message.chat_stream.user_info.user_cardname}"
- )
- elif message.chat_stream.user_info.user_nickname:
- sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
- else:
- sender_name = f"用户({message.chat_stream.user_info.user_id})"
+ # if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
+ # sender_name = (
+ # f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
+ # f"{message.chat_stream.user_info.user_cardname}"
+ # )
+ # elif message.chat_stream.user_info.user_nickname:
+ # sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
+ # else:
+ # sender_name = f"用户({message.chat_stream.user_info.user_id})"
+
+ sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
# 构建prompt
- timer1 = time.time()
- if mode == "normal":
- prompt = await prompt_builder._build_prompt(
- message.chat_stream,
- message_txt=message.processed_plain_text,
- sender_name=sender_name,
- stream_id=message.chat_stream.stream_id,
- )
- elif mode == "simple":
- prompt = await prompt_builder._build_prompt_simple(
- message.chat_stream,
- message_txt=message.processed_plain_text,
- sender_name=sender_name,
- stream_id=message.chat_stream.stream_id,
- )
- timer2 = time.time()
- logger.info(f"构建{mode}prompt时间: {timer2 - timer1}秒")
+ with Timer() as t_build_prompt:
+ if mode == "normal":
+ prompt = await prompt_builder._build_prompt(
+ message.chat_stream,
+ message_txt=message.processed_plain_text,
+ sender_name=sender_name,
+ stream_id=message.chat_stream.stream_id,
+ )
+ logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
@@ -136,50 +136,6 @@ class ResponseGenerator:
return content
- async def _check_response_with_model(
- self, message: MessageRecv, content: str, model: LLM_request, thinking_id: str
- ) -> str:
- _info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
-
- sender_name = ""
- if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
- sender_name = (
- f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
- f"{message.chat_stream.user_info.user_cardname}"
- )
- elif message.chat_stream.user_info.user_nickname:
- sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
- else:
- sender_name = f"用户({message.chat_stream.user_info.user_id})"
-
- # 构建prompt
- timer1 = time.time()
- prompt = await prompt_builder._build_prompt_check_response(
- message.chat_stream,
- message_txt=message.processed_plain_text,
- sender_name=sender_name,
- stream_id=message.chat_stream.stream_id,
- content=content,
- )
- timer2 = time.time()
- logger.info(f"构建check_prompt: {prompt}")
- logger.info(f"构建check_prompt时间: {timer2 - timer1}秒")
-
- try:
- checked_content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
-
- # info_catcher.catch_after_llm_generated(
- # prompt=prompt,
- # response=content,
- # reasoning_content=reasoning_content,
- # model_name=self.current_model_name)
-
- except Exception:
- logger.exception("检查回复时出错")
- return None
-
- return checked_content
-
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
"""提取情感标签,结合立场和情绪"""
try:
@@ -229,6 +185,57 @@ class ResponseGenerator:
logger.debug(f"获取情感标签时出错: {e}")
return "中立", "平静" # 出错时返回默认值
+ async def _get_emotion_tags_with_reason(self, content: str, processed_plain_text: str, reason: str):
+ """提取情感标签,结合立场和情绪"""
+ try:
+ # 构建提示词,结合回复内容、被回复的内容以及立场分析
+ prompt = f"""
+ 请严格根据以下对话内容,完成以下任务:
+ 1. 判断回复者对被回复者观点的直接立场:
+ - "支持":明确同意或强化被回复者观点
+ - "反对":明确反驳或否定被回复者观点
+ - "中立":不表达明确立场或无关回应
+ 2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
+ 3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒"
+ 4. 考虑回复者的人格设定为{global_config.personality_core}
+
+ 对话示例:
+ 被回复:「A就是笨」
+ 回复:「A明明很聪明」 → 反对-愤怒
+
+ 当前对话:
+ 被回复:「{processed_plain_text}」
+ 回复:「{content}」
+
+ 原因:「{reason}」
+
+ 输出要求:
+ - 只需输出"立场-情绪"结果,不要解释
+ - 严格基于文字直接表达的对立关系判断
+ """
+
+ # 调用模型生成结果
+ result, _, _ = await self.model_sum.generate_response(prompt)
+ result = result.strip()
+
+ # 解析模型输出的结果
+ if "-" in result:
+ stance, emotion = result.split("-", 1)
+ valid_stances = ["支持", "反对", "中立"]
+ valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"]
+ if stance in valid_stances and emotion in valid_emotions:
+ return stance, emotion # 返回有效的立场-情绪组合
+ else:
+ logger.debug(f"无效立场-情感组合:{result}")
+ return "中立", "平静" # 默认返回中立-平静
+ else:
+ logger.debug(f"立场-情感格式错误:{result}")
+ return "中立", "平静" # 格式错误时返回默认值
+
+ except Exception as e:
+ logger.debug(f"获取情感标签时出错: {e}")
+ return "中立", "平静" # 出错时返回默认值
+
async def _process_response(self, content: str) -> List[str]:
"""处理响应内容,返回处理后的内容和情感标签"""
if not content:
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
index cfc419738..29863ba72 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
@@ -8,7 +8,8 @@ from src.common.logger import get_module_logger
from ....individuality.individuality import Individuality
from src.heart_flow.heartflow import heartflow
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
-
+from src.plugins.person_info.relationship_manager import relationship_manager
+from src.plugins.chat.utils import parse_text_timestamps
logger = get_module_logger("prompt")
@@ -43,7 +44,7 @@ def init_prompt():
{chat_talking_prompt}
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
你刚刚脑子里在想:{current_mind_info}
-现在请你读读之前的聊天记录,然后给出日常,口语化且简短的回复内容,只给出文字的回复内容,不要有内心独白:
+现在请你读读之前的聊天记录,然后给出日常,口语化且简短的回复内容,请只对一个话题进行回复,只给出文字的回复内容,不要有内心独白:
""",
"heart_flow_prompt_simple",
)
@@ -52,7 +53,7 @@ def init_prompt():
你的名字叫{bot_name},{prompt_identity}。
{chat_target},你希望在群里回复:{content}。现在请你根据以下信息修改回复内容。将这个回复修改的更加日常且口语化的回复,平淡一些,回复尽量简短一些。不要回复的太有条理。
{prompt_ger},不要刻意突出自身学科背景,注意只输出回复内容。
-{moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""",
+{moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。""",
"heart_flow_prompt_response",
)
@@ -128,7 +129,7 @@ class PromptBuilder:
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。"""
- logger.info("开始构建prompt")
+ logger.debug("开始构建prompt")
# prompt = f"""
# {chat_target}
@@ -160,7 +161,10 @@ class PromptBuilder:
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
-
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
+
return prompt
async def _build_prompt_simple(
@@ -206,7 +210,7 @@ class PromptBuilder:
)
keywords_reaction_prompt += rule.get("reaction", "") + ","
- logger.info("开始构建prompt")
+ logger.debug("开始构建prompt")
# prompt = f"""
# 你的名字叫{global_config.BOT_NICKNAME},{prompt_personality}。
@@ -257,7 +261,7 @@ class PromptBuilder:
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。"""
- logger.info("开始构建check_prompt")
+ logger.debug("开始构建check_prompt")
# prompt = f"""
# 你的名字叫{global_config.BOT_NICKNAME},{prompt_identity}。
diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py
index 0fefb5c14..8238078c2 100644
--- a/src/plugins/config/config.py
+++ b/src/plugins/config/config.py
@@ -27,8 +27,8 @@ logger = get_module_logger("config", config=config_config)
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = True
-mai_version_main = "0.6.2"
-mai_version_fix = "snapshot-2"
+mai_version_main = "0.6.3"
+mai_version_fix = "snapshot-1"
if mai_version_fix:
if is_test:
@@ -196,6 +196,9 @@ class BotConfig:
sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
+ observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
+ compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
+ compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
# willing
willing_mode: str = "classical" # 意愿模式
@@ -253,8 +256,8 @@ class BotConfig:
chinese_typo_tone_error_rate = 0.2 # 声调错误概率
chinese_typo_word_replace_rate = 0.02 # 整词替换概率
- # response_spliter
- enable_response_spliter = True # 是否启用回复分割器
+ # response_splitter
+ enable_response_splitter = True # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 3 # 回复允许的最大句子数
@@ -440,6 +443,14 @@ class BotConfig:
config.heart_flow_update_interval = heartflow_config.get(
"heart_flow_update_interval", config.heart_flow_update_interval
)
+ if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
+ config.observation_context_size = heartflow_config.get(
+ "observation_context_size", config.observation_context_size
+ )
+ config.compressed_length = heartflow_config.get("compressed_length", config.compressed_length)
+ config.compress_length_limit = heartflow_config.get(
+ "compress_length_limit", config.compress_length_limit
+ )
def willing(parent: dict):
willing_config = parent["willing"]
@@ -477,7 +488,7 @@ class BotConfig:
"llm_emotion_judge",
"vlm",
"embedding",
- "moderation",
+ "llm_tool_use",
"llm_observation",
"llm_sub_heartflow",
"llm_heartflow",
@@ -489,7 +500,15 @@ class BotConfig:
# base_url 的例子: SILICONFLOW_BASE_URL
# key 的例子: SILICONFLOW_KEY
- cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0}
+ cfg_target = {
+ "name": "",
+ "base_url": "",
+ "key": "",
+ "stream": False,
+ "pri_in": 0,
+ "pri_out": 0,
+ "temp": 0.7,
+ }
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
cfg_target = cfg_item
@@ -502,6 +521,7 @@ class BotConfig:
stable_item.append("stream")
pricing_item = ["pri_in", "pri_out"]
+
# 从配置中原始拷贝稳定字段
for i in stable_item:
# 如果 字段 属于计费项 且获取不到,那默认值是 0
@@ -519,6 +539,13 @@ class BotConfig:
logger.error(f"{item} 中的必要字段不存在,请检查")
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
+ # 如果配置中有temp参数,就使用配置中的值
+ if "temp" in cfg_item:
+ cfg_target["temp"] = cfg_item["temp"]
+ else:
+ # 如果没有temp参数,就删除默认值
+ cfg_target.pop("temp", None)
+
provider = cfg_item.get("provider")
if provider is None:
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
@@ -604,13 +631,13 @@ class BotConfig:
"word_replace_rate", config.chinese_typo_word_replace_rate
)
- def response_spliter(parent: dict):
- response_spliter_config = parent["response_spliter"]
- config.enable_response_spliter = response_spliter_config.get(
- "enable_response_spliter", config.enable_response_spliter
+ def response_splitter(parent: dict):
+ response_splitter_config = parent["response_splitter"]
+ config.enable_response_splitter = response_splitter_config.get(
+ "enable_response_splitter", config.enable_response_splitter
)
- config.response_max_length = response_spliter_config.get("response_max_length", config.response_max_length)
- config.response_max_sentence_num = response_spliter_config.get(
+ config.response_max_length = response_splitter_config.get("response_max_length", config.response_max_length)
+ config.response_max_sentence_num = response_splitter_config.get(
"response_max_sentence_num", config.response_max_sentence_num
)
@@ -664,7 +691,7 @@ class BotConfig:
"keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False},
"chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False},
"platforms": {"func": platforms, "support": ">=1.0.0"},
- "response_spliter": {"func": response_spliter, "support": ">=0.0.11", "necessary": False},
+ "response_splitter": {"func": response_splitter, "support": ">=0.0.11", "necessary": False},
"experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False},
"heartflow": {"func": heartflow, "support": ">=1.0.2", "necessary": False},
}
diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py
index 4e52afeca..c2c090d58 100644
--- a/src/plugins/memory_system/Hippocampus.py
+++ b/src/plugins/memory_system/Hippocampus.py
@@ -436,7 +436,7 @@ class Hippocampus:
activation_values[neighbor] = new_activation
visited_nodes.add(neighbor)
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
- logger.debug(
+ logger.trace(
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
) # noqa: E501
@@ -1144,7 +1144,7 @@ class Hippocampus:
activation_values[neighbor] = new_activation
visited_nodes.add(neighbor)
nodes_to_process.append((neighbor, new_activation, current_depth + 1))
- logger.debug(
+ logger.trace(
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
) # noqa: E501
@@ -1301,7 +1301,7 @@ class Hippocampus:
# 对每个关键词进行扩散式检索
for keyword in valid_keywords:
- logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
+ logger.trace(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
# 初始化激活值
activation_values = {keyword: 1.0}
# 记录已访问的节点
@@ -1352,7 +1352,7 @@ class Hippocampus:
# 计算激活节点数与总节点数的比值
total_activation = sum(activate_map.values())
- logger.info(f"总激活值: {total_activation:.2f}")
+ logger.trace(f"总激活值: {total_activation:.2f}")
total_nodes = len(self.memory_graph.G.nodes())
# activated_nodes = len(activate_map)
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index a472b5bf7..604e74155 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -98,7 +98,7 @@ class LLM_request:
"timestamp": datetime.now(),
}
db.llm_usage.insert_one(usage_data)
- logger.debug(
+ logger.trace(
f"Token使用情况 - 模型: {self.model_name}, "
f"用户: {user_id}, 类型: {request_type}, "
f"提示词: {prompt_tokens}, 完成: {completion_tokens}, "
diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py
index 4c1f9c688..068c37d07 100644
--- a/src/plugins/person_info/person_info.py
+++ b/src/plugins/person_info/person_info.py
@@ -6,6 +6,9 @@ from typing import Any, Callable, Dict
import datetime
import asyncio
import numpy as np
+from src.plugins.models.utils_model import LLM_request
+from src.plugins.config.config import global_config
+from src.individuality.individuality import Individuality
import matplotlib
@@ -13,6 +16,8 @@ matplotlib.use("Agg")
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
+import json
+import re
"""
@@ -32,6 +37,8 @@ logger = get_module_logger("person_info")
person_info_default = {
"person_id": None,
+ "person_name": None,
+ "name_reason": None,
"platform": None,
"user_id": None,
"nickname": None,
@@ -48,16 +55,46 @@ person_info_default = {
class PersonInfoManager:
def __init__(self):
+ self.person_name_list = {}
+ self.qv_name_llm = LLM_request(
+ model=global_config.llm_normal,
+ max_tokens=256,
+ request_type="qv_name",
+ )
if "person_info" not in db.list_collection_names():
db.create_collection("person_info")
db.person_info.create_index("person_id", unique=True)
+
+ # 初始化时读取所有person_name
+ cursor = db.person_info.find(
+ {"person_name": {"$exists": True}},
+ {"person_id": 1, "person_name": 1, "_id": 0}
+ )
+ for doc in cursor:
+ if doc.get("person_name"):
+ self.person_name_list[doc["person_id"]] = doc["person_name"]
+ logger.debug(f"已加载 {len(self.person_name_list)} 个用户名称")
def get_person_id(self, platform: str, user_id: int):
"""获取唯一id"""
+ #如果platform中存在-,就截取-后面的部分
+ if "-" in platform:
+ platform = platform.split("-")[1]
+
components = [platform, str(user_id)]
key = "_".join(components)
return hashlib.md5(key.encode()).hexdigest()
+ def is_person_known(self, platform: str, user_id: int):
+ """判断是否认识某人"""
+ person_id = self.get_person_id(platform, user_id)
+ document = db.person_info.find_one({"person_id": person_id})
+ if document:
+ return True
+ else:
+ return False
+
+
async def create_person_info(self, person_id: str, data: dict = None):
"""创建一个项"""
if not person_id:
@@ -88,6 +125,109 @@ class PersonInfoManager:
Data[field_name] = value
logger.debug(f"更新时{person_id}不存在,已新建")
await self.create_person_info(person_id, Data)
+
+ async def has_one_field(self, person_id: str, field_name: str):
+ """判断是否存在某一个字段"""
+ document = db.person_info.find_one({"person_id": person_id}, {field_name: 1})
+ if document:
+ return True
+ else:
+ return False
+
+ def _extract_json_from_text(self, text: str) -> dict:
+ """从文本中提取JSON数据的高容错方法"""
+ try:
+
+ # 尝试直接解析
+ return json.loads(text)
+ except json.JSONDecodeError:
+ try:
+ # 尝试找到JSON格式的部分
+ json_pattern = r'\{[^{}]*\}'
+ matches = re.findall(json_pattern, text)
+ if matches:
+ return json.loads(matches[0])
+
+ # 如果上面都失败了,尝试提取键值对
+ nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
+ reason_pattern = r'"reason"[:\s]+"([^"]+)"'
+
+ nickname_match = re.search(nickname_pattern, text)
+ reason_match = re.search(reason_pattern, text)
+
+ if nickname_match:
+ return {
+ "nickname": nickname_match.group(1),
+ "reason": reason_match.group(1) if reason_match else "未提供理由"
+ }
+ except Exception as e:
+ logger.error(f"JSON提取失败: {str(e)}")
+
+ # 如果所有方法都失败了,返回空结果
+ return {"nickname": "", "reason": ""}
+
+ async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
+ """给某个用户取名"""
+ if not person_id:
+ logger.debug("取名失败:person_id不能为空")
+ return
+
+ old_name = await self.get_value(person_id, "person_name")
+ old_reason = await self.get_value(person_id, "name_reason")
+
+ max_retries = 5 # 最大重试次数
+ current_try = 0
+ existing_names = ""
+ while current_try < max_retries:
+ individuality = Individuality.get_instance()
+ prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
+ bot_name = individuality.personality.bot_nickname
+
+ qv_name_prompt = f"你是{bot_name},你{prompt_personality}"
+ qv_name_prompt += f"现在你想给一个用户取一个昵称,用户是的qq昵称是{user_nickname},"
+ qv_name_prompt += f"用户的qq群昵称名是{user_cardname},"
+ if user_avatar:
+ qv_name_prompt += f"用户的qq头像是{user_avatar},"
+ if old_name:
+ qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason},"
+
+ qv_name_prompt += "\n请根据以上用户信息,想想你叫他什么比较好,请最好使用用户的qq昵称,可以稍作修改"
+ if existing_names:
+ qv_name_prompt += f"\n请注意,以下名称已被使用,不要使用以下昵称:{existing_names}。\n"
+ qv_name_prompt += "请用json给出你的想法,并给出理由,示例如下:"
+ qv_name_prompt += '''{
+ "nickname": "昵称",
+ "reason": "理由"
+ }'''
+ logger.debug(f"取名提示词:{qv_name_prompt}")
+ response = await self.qv_name_llm.generate_response(qv_name_prompt)
+ logger.debug(f"取名回复:{response}")
+ result = self._extract_json_from_text(response[0])
+
+ if not result["nickname"]:
+ logger.error("生成的昵称为空,重试中...")
+ current_try += 1
+ continue
+
+ # 检查生成的昵称是否已存在
+ if result["nickname"] not in self.person_name_list.values():
+ # 更新数据库和内存中的列表
+ await self.update_one_field(person_id, "person_name", result["nickname"])
+ # await self.update_one_field(person_id, "nickname", user_nickname)
+ # await self.update_one_field(person_id, "avatar", user_avatar)
+ await self.update_one_field(person_id, "name_reason", result["reason"])
+
+ self.person_name_list[person_id] = result["nickname"]
+ logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}")
+ return result
+ else:
+ existing_names += f"{result['nickname']}、"
+
+ logger.debug(f"生成的昵称 {result['nickname']} 已存在,重试中...")
+ current_try += 1
+
+ logger.error(f"在{max_retries}次尝试后仍未能生成唯一昵称")
+ return None
async def del_one_document(self, person_id: str):
"""删除指定 person_id 的文档"""
@@ -117,7 +257,7 @@ class PersonInfoManager:
return document[field_name]
else:
default_value = copy.deepcopy(person_info_default[field_name])
- logger.debug(f"获取{person_id}的{field_name}失败,已返回默认值{default_value}")
+ logger.trace(f"获取{person_id}的{field_name}失败,已返回默认值{default_value}")
return default_value
async def get_values(self, person_id: str, field_names: list) -> dict:
@@ -264,17 +404,17 @@ class PersonInfoManager:
msg_interval = int(round(np.percentile(filtered, 80)))
await self.update_one_field(person_id, "msg_interval", msg_interval)
- logger.debug(f"用户{person_id}的msg_interval已经被更新为{msg_interval}")
+ logger.trace(f"用户{person_id}的msg_interval已经被更新为{msg_interval}")
except Exception as e:
- logger.debug(f"用户{person_id}消息间隔计算失败: {type(e).__name__}: {str(e)}")
+ logger.trace(f"用户{person_id}消息间隔计算失败: {type(e).__name__}: {str(e)}")
continue
# 其他...
if msg_interval_map:
- logger.info("已保存分布图到: logs/person_info")
+ logger.trace("已保存分布图到: logs/person_info")
current_time = datetime.datetime.now()
- logger.info(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
+ logger.trace(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
await asyncio.sleep(86400)
except Exception as e:
diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py
index 726bb1dbb..673e0b07f 100644
--- a/src/plugins/person_info/relationship_manager.py
+++ b/src/plugins/person_info/relationship_manager.py
@@ -4,6 +4,8 @@ import math
from bson.decimal128 import Decimal128
from .person_info import person_info_manager
import time
+import re
+import traceback
relationship_config = LogConfig(
# 使用关系专用样式
@@ -74,8 +76,63 @@ class RelationshipManager:
return mood_value * coefficient
else:
return mood_value / coefficient
+
+ async def is_known_some_one(self, platform , user_id):
+ """判断是否认识某人"""
+ is_known = person_info_manager.is_person_known(platform, user_id)
+ return is_known
- async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> None:
+ async def is_qved_name(self, platform , user_id):
+ """判断是否认识某人"""
+ person_id = person_info_manager.get_person_id(platform, user_id)
+ is_qved = await person_info_manager.has_one_field(person_id, "person_name")
+ old_name = await person_info_manager.get_value(person_id, "person_name")
+ print(f"old_name: {old_name}")
+ print(f"is_qved: {is_qved}")
+ if is_qved and old_name != None:
+ return True
+ else:
+ return False
+
+ async def first_knowing_some_one(self, platform , user_id, user_nickname, user_cardname, user_avatar):
+ """判断是否认识某人"""
+ person_id = person_info_manager.get_person_id(platform,user_id)
+ await person_info_manager.update_one_field(person_id, "nickname", user_nickname)
+ # await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname)
+ # await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar)
+ await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar)
+
+ async def convert_all_person_sign_to_person_name(self,input_text:str):
+ """将所有人的格式转换为person_name"""
+ try:
+ # 使用正则表达式匹配格式
+ all_person = person_info_manager.person_name_list
+
+ pattern = r'<([^:]+):(\d+):([^:]+):([^>]+)>'
+ matches = re.findall(pattern, input_text)
+
+ # 遍历匹配结果,将替换为person_name
+ result_text = input_text
+ for platform, user_id, nickname, cardname in matches:
+ person_id = person_info_manager.get_person_id(platform, user_id)
+ # 默认使用昵称作为人名
+ person_name = nickname.strip() if nickname.strip() else cardname.strip()
+
+ if person_id in all_person:
+ if all_person[person_id] != None:
+ person_name = all_person[person_id]
+
+ print(f"将<{platform}:{user_id}:{nickname}:{cardname}>替换为{person_name}")
+
+
+ result_text = result_text.replace(f"<{platform}:{user_id}:{nickname}:{cardname}>", person_name)
+
+ return result_text
+ except Exception as e:
+ logger.error(traceback.format_exc())
+ return input_text
+
+ async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> tuple:
"""计算并变更关系值
新的关系值变更计算方式:
将关系值限定在-1000到1000
@@ -84,6 +141,10 @@ class RelationshipManager:
2.关系越差,改善越难,关系越好,恶化越容易
3.人维护关系的精力往往有限,所以当高关系值用户越多,对于中高关系值用户增长越慢
4.连续正面或负面情感会正反馈
+
+ 返回:
+ 用户昵称,变更值,变更后关系等级
+
"""
stancedict = {
"支持": 0,
@@ -147,6 +208,7 @@ class RelationshipManager:
level_num = self.calculate_level_num(old_value + value)
relationship_level = ["厌恶", "冷漠", "一般", "友好", "喜欢", "暧昧"]
logger.info(
+ f"用户: {chat_stream.user_info.user_nickname}"
f"当前关系: {relationship_level[level_num]}, "
f"关系值: {old_value:.2f}, "
f"当前立场情感: {stance}-{label}, "
@@ -155,6 +217,97 @@ class RelationshipManager:
await person_info_manager.update_one_field(person_id, "relationship_value", old_value + value, data)
+ return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
+
+ async def calculate_update_relationship_value_with_reason(
+ self, chat_stream: ChatStream, label: str, stance: str, reason: str
+ ) -> tuple:
+ """计算并变更关系值
+ 新的关系值变更计算方式:
+ 将关系值限定在-1000到1000
+ 对于关系值的变更,期望:
+ 1.向两端逼近时会逐渐减缓
+ 2.关系越差,改善越难,关系越好,恶化越容易
+ 3.人维护关系的精力往往有限,所以当高关系值用户越多,对于中高关系值用户增长越慢
+ 4.连续正面或负面情感会正反馈
+
+ 返回:
+ 用户昵称,变更值,变更后关系等级
+
+ """
+ stancedict = {
+ "支持": 0,
+ "中立": 1,
+ "反对": 2,
+ }
+
+ valuedict = {
+ "开心": 1.5,
+ "愤怒": -2.0,
+ "悲伤": -0.5,
+ "惊讶": 0.6,
+ "害羞": 2.0,
+ "平静": 0.3,
+ "恐惧": -1.5,
+ "厌恶": -1.0,
+ "困惑": 0.5,
+ }
+
+ person_id = person_info_manager.get_person_id(chat_stream.user_info.platform, chat_stream.user_info.user_id)
+ data = {
+ "platform": chat_stream.user_info.platform,
+ "user_id": chat_stream.user_info.user_id,
+ "nickname": chat_stream.user_info.user_nickname,
+ "konw_time": int(time.time()),
+ }
+ old_value = await person_info_manager.get_value(person_id, "relationship_value")
+ old_value = self.ensure_float(old_value, person_id)
+
+ if old_value > 1000:
+ old_value = 1000
+ elif old_value < -1000:
+ old_value = -1000
+
+ value = valuedict[label]
+ if old_value >= 0:
+ if valuedict[label] >= 0 and stancedict[stance] != 2:
+ value = value * math.cos(math.pi * old_value / 2000)
+ if old_value > 500:
+ rdict = await person_info_manager.get_specific_value_list("relationship_value", lambda x: x > 700)
+ high_value_count = len(rdict)
+ if old_value > 700:
+ value *= 3 / (high_value_count + 2) # 排除自己
+ else:
+ value *= 3 / (high_value_count + 3)
+ elif valuedict[label] < 0 and stancedict[stance] != 0:
+ value = value * math.exp(old_value / 2000)
+ else:
+ value = 0
+ elif old_value < 0:
+ if valuedict[label] >= 0 and stancedict[stance] != 2:
+ value = value * math.exp(old_value / 2000)
+ elif valuedict[label] < 0 and stancedict[stance] != 0:
+ value = value * math.cos(math.pi * old_value / 2000)
+ else:
+ value = 0
+
+ self.positive_feedback_sys(label, stance)
+ value = self.mood_feedback(value)
+
+ level_num = self.calculate_level_num(old_value + value)
+ relationship_level = ["厌恶", "冷漠", "一般", "友好", "喜欢", "暧昧"]
+ logger.info(
+ f"用户: {chat_stream.user_info.user_nickname}"
+ f"当前关系: {relationship_level[level_num]}, "
+ f"关系值: {old_value:.2f}, "
+ f"当前立场情感: {stance}-{label}, "
+ f"变更: {value:+.5f}"
+ )
+
+ await person_info_manager.update_one_field(person_id, "relationship_value", old_value + value, data)
+
+ return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
+
async def build_relationship_info(self, person) -> str:
person_id = person_info_manager.get_person_id(person[0], person[1])
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
diff --git a/src/plugins/remote/remote.py b/src/plugins/remote/remote.py
index a2084435f..bc7437057 100644
--- a/src/plugins/remote/remote.py
+++ b/src/plugins/remote/remote.py
@@ -126,6 +126,7 @@ def main():
"""主函数,启动心跳线程"""
# 配置
SERVER_URL = "http://hyybuth.xyz:10058"
+ # SERVER_URL = "http://localhost:10058"
HEARTBEAT_INTERVAL = 300 # 5分钟(秒)
# 创建并启动心跳线程
diff --git a/src/plugins/utils/prompt_builder.py b/src/plugins/utils/prompt_builder.py
index e4d2e35c6..f3de24e4f 100644
--- a/src/plugins/utils/prompt_builder.py
+++ b/src/plugins/utils/prompt_builder.py
@@ -3,7 +3,7 @@ import re
from contextlib import asynccontextmanager
import asyncio
from src.common.logger import get_module_logger
-import traceback
+# import traceback
logger = get_module_logger("prompt_build")
@@ -94,14 +94,32 @@ global_prompt_manager = PromptManager()
class Prompt(str):
+ # 临时标记,作为类常量
+ _TEMP_LEFT_BRACE = "__ESCAPED_LEFT_BRACE__"
+ _TEMP_RIGHT_BRACE = "__ESCAPED_RIGHT_BRACE__"
+
+ @staticmethod
+ def _process_escaped_braces(template: str) -> str:
+ """处理模板中的转义花括号,将 \{ 和 \} 替换为临时标记"""
+ return template.replace("\\{", Prompt._TEMP_LEFT_BRACE).replace("\\}", Prompt._TEMP_RIGHT_BRACE)
+
+ @staticmethod
+ def _restore_escaped_braces(template: str) -> str:
+ """将临时标记还原为实际的花括号字符"""
+ return template.replace(Prompt._TEMP_LEFT_BRACE, "{").replace(Prompt._TEMP_RIGHT_BRACE, "}")
+
def __new__(cls, fstr: str, name: Optional[str] = None, args: Union[List[Any], tuple[Any, ...]] = None, **kwargs):
# 如果传入的是元组,转换为列表
if isinstance(args, tuple):
args = list(args)
should_register = kwargs.pop("_should_register", True)
+
+ # 预处理模板中的转义花括号
+ processed_fstr = cls._process_escaped_braces(fstr)
+
# 解析模板
template_args = []
- result = re.findall(r"\{(.*?)\}", fstr)
+ result = re.findall(r"\{(.*?)\}", processed_fstr)
for expr in result:
if expr and expr not in template_args:
template_args.append(expr)
@@ -142,8 +160,11 @@ class Prompt(str):
@classmethod
def _format_template(cls, template: str, args: List[Any] = None, kwargs: Dict[str, Any] = None) -> str:
+ # 预处理模板中的转义花括号
+ processed_template = cls._process_escaped_braces(template)
+
template_args = []
- result = re.findall(r"\{(.*?)\}", template)
+ result = re.findall(r"\{(.*?)\}", processed_template)
for expr in result:
if expr and expr not in template_args:
template_args.append(expr)
@@ -177,13 +198,15 @@ class Prompt(str):
try:
# 先用位置参数格式化
-
if args:
- template = template.format(**formatted_args)
+ processed_template = processed_template.format(**formatted_args)
# 再用关键字参数格式化
if kwargs:
- template = template.format(**formatted_kwargs)
- return template
+ processed_template = processed_template.format(**formatted_kwargs)
+
+ # 将临时标记还原为实际的花括号
+ result = cls._restore_escaped_braces(processed_template)
+ return result
except (IndexError, KeyError) as e:
raise ValueError(
f"格式化模板失败: {template}, args={formatted_args}, kwargs={formatted_kwargs} {str(e)}"
@@ -198,7 +221,7 @@ class Prompt(str):
_should_register=False,
**kwargs if kwargs else self._kwargs,
)
- # print(f"prompt build result: {ret} name: {ret.name} ")
+ # print(f"prompt build result: {ret} name: {ret.name} ")
return str(ret)
def __str__(self) -> str:
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index 4b9afff39..10133f2b7 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -138,6 +138,7 @@ class LLMStatistics:
# user_id = str(doc.get("user_info", {}).get("user_id", "unknown"))
chat_info = doc.get("chat_info", {})
user_info = doc.get("user_info", {})
+ user_id = str(user_info.get("user_id", "unknown"))
message_time = doc.get("time", 0)
group_info = chat_info.get("group_info") if chat_info else {}
# print(f"group_info: {group_info}")
diff --git a/src/plugins/utils/timer_calculater.py b/src/plugins/utils/timer_calculater.py
new file mode 100644
index 000000000..aa12f35ce
--- /dev/null
+++ b/src/plugins/utils/timer_calculater.py
@@ -0,0 +1,151 @@
+from time import perf_counter
+from functools import wraps
+from typing import Optional, Dict, Callable
+import asyncio
+
+"""
+# 更好的计时器
+
+使用形式:
+- 上下文
+- 装饰器
+- 直接实例化
+
+使用场景:
+- 使用Timer:在需要测量代码执行时间时(如性能测试、计时器工具),Timer类是更可靠、高精度的选择。
+- 使用time.time()的场景:当需要记录实际时间点(如日志、时间戳)时使用,但避免用它测量时间间隔。
+
+使用方式:
+
+【装饰器】
+time_dict = {}
+@Timer("计数", time_dict)
+def func():
+ pass
+print(time_dict)
+
+【上下文_1】
+def func():
+ with Timer() as t:
+ pass
+ print(t)
+ print(t.human_readable)
+
+【上下文_2】
+def func():
+ time_dict = {}
+ with Timer("计数", time_dict):
+ pass
+ print(time_dict)
+
+【直接实例化】
+a = Timer()
+print(a) # 直接输出当前 perf_counter 值
+
+参数:
+- name:计时器的名字,默认为 None
+- storage:计时器结果存储字典,默认为 None
+- auto_unit:自动选择单位(毫秒或秒),默认为 True(自动根据时间切换毫秒或秒)
+- do_type_check:是否进行类型检查,默认为 False(不进行类型检查)
+
+属性:human_readable
+
+自定义错误:TimerTypeError
+"""
+
+
+class TimerTypeError(TypeError):
+ """自定义类型错误"""
+
+ __slots__ = ()
+
+ def __init__(self, param, expected_type, actual_type):
+ super().__init__(f"参数 '{param}' 类型错误,期望 {expected_type},实际得到 {actual_type.__name__}")
+
+
+class Timer:
+ """
+ Timer 支持三种模式:
+ 1. 装饰器模式:用于测量函数/协程运行时间
+ 2. 上下文管理器模式:用于 with 语句块内部计时
+ 3. 直接实例化:如果不调用 __enter__,打印对象时将显示当前 perf_counter 的值
+ """
+
+ __slots__ = ("name", "storage", "elapsed", "auto_unit", "start")
+
+ def __init__(
+ self,
+ name: Optional[str] = None,
+ storage: Optional[Dict[str, float]] = None,
+ auto_unit: bool = True,
+ do_type_check: bool = False,
+ ):
+ if do_type_check:
+ self._validate_types(name, storage)
+
+ self.name = name
+ self.storage = storage
+ self.elapsed = None
+
+ self.auto_unit = auto_unit
+ self.start = None
+
+ def _validate_types(self, name, storage):
+ """类型检查"""
+ if name is not None and not isinstance(name, str):
+ raise TimerTypeError("name", "Optional[str]", type(name))
+
+ if storage is not None and not isinstance(storage, dict):
+ raise TimerTypeError("storage", "Optional[dict]", type(storage))
+
+ def __call__(self, func: Optional[Callable] = None) -> Callable:
+ """装饰器模式"""
+ if func is None:
+ return lambda f: Timer(name=self.name or f.__name__, storage=self.storage, auto_unit=self.auto_unit)(f)
+
+ @wraps(func)
+ async def async_wrapper(*args, **kwargs):
+ with self:
+ return await func(*args, **kwargs)
+
+ @wraps(func)
+ def sync_wrapper(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+
+ wrapper = async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
+ wrapper.__timer__ = self # 保留计时器引用
+ return wrapper
+
+ def __enter__(self):
+ """上下文管理器入口"""
+ self.start = perf_counter()
+ return self
+
+ def __exit__(self, *args):
+ self.elapsed = perf_counter() - self.start
+ self._record_time()
+ return False
+
+ def _record_time(self):
+ """记录时间"""
+ if self.storage is not None and self.name:
+ self.storage[self.name] = self.elapsed
+
+ @property
+ def human_readable(self) -> str:
+ """人类可读时间格式"""
+ if self.elapsed is None:
+ return "未计时"
+
+ if self.auto_unit:
+ return f"{self.elapsed * 1000:.2f}毫秒" if self.elapsed < 1 else f"{self.elapsed:.2f}秒"
+ return f"{self.elapsed:.4f}秒"
+
+ def __str__(self):
+ if self.start is not None:
+ if self.elapsed is None:
+ current_elapsed = perf_counter() - self.start
+ return f""
+ return f""
+ return f"{perf_counter()}"
diff --git a/src/tool_use/tool_use.py b/src/tool_use/tool_use.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/temp_utils_ui/temp_ui.py b/temp_utils_ui/temp_ui.py
deleted file mode 100644
index 3e0e1b5a5..000000000
--- a/temp_utils_ui/temp_ui.py
+++ /dev/null
@@ -1,1249 +0,0 @@
-import os
-import sys
-import toml
-import customtkinter as ctk
-from tkinter import messagebox, StringVar, filedialog
-import json
-import datetime
-import shutil
-
-# 设置主题
-ctk.set_appearance_mode("System") # 系统主题
-ctk.set_default_color_theme("blue") # 蓝色主题
-
-# 配置项的中文翻译映射
-SECTION_TRANSLATIONS = {
- "inner": "内部配置",
- "bot": "机器人设置",
- "groups": "群组设置",
- "personality": "人格设置",
- "identity": "身份设置",
- "schedule": "日程设置",
- "platforms": "平台设置",
- "response": "回复设置",
- "heartflow": "心流设置",
- "message": "消息设置",
- "willing": "意愿设置",
- "emoji": "表情设置",
- "memory": "记忆设置",
- "mood": "情绪设置",
- "keywords_reaction": "关键词反应",
- "chinese_typo": "中文错别字",
- "response_spliter": "回复分割器",
- "remote": "远程设置",
- "experimental": "实验功能",
- "model": "模型设置",
-}
-
-# 配置项的中文描述
-CONFIG_DESCRIPTIONS = {
- # bot设置
- "bot.qq": "机器人的QQ号码",
- "bot.nickname": "机器人的昵称",
- "bot.alias_names": "机器人的别名列表",
- # 群组设置
- "groups.talk_allowed": "允许机器人回复消息的群号列表",
- "groups.talk_frequency_down": "降低回复频率的群号列表",
- "groups.ban_user_id": "禁止回复和读取消息的QQ号列表",
- # 人格设置
- "personality.personality_core": "人格核心描述,建议20字以内",
- "personality.personality_sides": "人格特点列表",
- # 身份设置
- "identity.identity_detail": "身份细节描述列表",
- "identity.height": "身高(厘米)",
- "identity.weight": "体重(千克)",
- "identity.age": "年龄",
- "identity.gender": "性别",
- "identity.appearance": "外貌特征",
- # 日程设置
- "schedule.enable_schedule_gen": "是否启用日程表生成",
- "schedule.prompt_schedule_gen": "日程表生成提示词",
- "schedule.schedule_doing_update_interval": "日程表更新间隔(秒)",
- "schedule.schedule_temperature": "日程表温度,建议0.3-0.6",
- "schedule.time_zone": "时区设置",
- # 平台设置
- "platforms.nonebot-qq": "QQ平台适配器链接",
- # 回复设置
- "response.response_mode": "回复策略(heart_flow:心流,reasoning:推理)",
- "response.model_r1_probability": "主要回复模型使用概率",
- "response.model_v3_probability": "次要回复模型使用概率",
- # 心流设置
- "heartflow.sub_heart_flow_update_interval": "子心流更新频率(秒)",
- "heartflow.sub_heart_flow_freeze_time": "子心流冻结时间(秒)",
- "heartflow.sub_heart_flow_stop_time": "子心流停止时间(秒)",
- "heartflow.heart_flow_update_interval": "心流更新频率(秒)",
- # 消息设置
- "message.max_context_size": "获取的上下文数量",
- "message.emoji_chance": "使用表情包的概率",
- "message.thinking_timeout": "思考时间(秒)",
- "message.max_response_length": "回答的最大token数",
- "message.message_buffer": "是否启用消息缓冲器",
- "message.ban_words": "禁用词列表",
- "message.ban_msgs_regex": "禁用消息正则表达式列表",
- # 意愿设置
- "willing.willing_mode": "回复意愿模式",
- "willing.response_willing_amplifier": "回复意愿放大系数",
- "willing.response_interested_rate_amplifier": "回复兴趣度放大系数",
- "willing.down_frequency_rate": "降低回复频率的群组回复意愿降低系数",
- "willing.emoji_response_penalty": "表情包回复惩罚系数",
- # 表情设置
- "emoji.max_emoji_num": "表情包最大数量",
- "emoji.max_reach_deletion": "达到最大数量时是否删除表情包",
- "emoji.check_interval": "检查表情包的时间间隔",
- "emoji.auto_save": "是否保存表情包和图片",
- "emoji.enable_check": "是否启用表情包过滤",
- "emoji.check_prompt": "表情包过滤要求",
- # 记忆设置
- "memory.build_memory_interval": "记忆构建间隔(秒)",
- "memory.build_memory_distribution": "记忆构建分布参数",
- "memory.build_memory_sample_num": "采样数量",
- "memory.build_memory_sample_length": "采样长度",
- "memory.memory_compress_rate": "记忆压缩率",
- "memory.forget_memory_interval": "记忆遗忘间隔(秒)",
- "memory.memory_forget_time": "记忆遗忘时间(小时)",
- "memory.memory_forget_percentage": "记忆遗忘比例",
- "memory.memory_ban_words": "记忆禁用词列表",
- # 情绪设置
- "mood.mood_update_interval": "情绪更新间隔(秒)",
- "mood.mood_decay_rate": "情绪衰减率",
- "mood.mood_intensity_factor": "情绪强度因子",
- # 关键词反应
- "keywords_reaction.enable": "是否启用关键词反应功能",
- # 中文错别字
- "chinese_typo.enable": "是否启用中文错别字生成器",
- "chinese_typo.error_rate": "单字替换概率",
- "chinese_typo.min_freq": "最小字频阈值",
- "chinese_typo.tone_error_rate": "声调错误概率",
- "chinese_typo.word_replace_rate": "整词替换概率",
- # 回复分割器
- "response_spliter.enable_response_spliter": "是否启用回复分割器",
- "response_spliter.response_max_length": "回复允许的最大长度",
- "response_spliter.response_max_sentence_num": "回复允许的最大句子数",
- # 远程设置
- "remote.enable": "是否启用远程统计",
- # 实验功能
- "experimental.enable_friend_chat": "是否启用好友聊天",
- "experimental.pfc_chatting": "是否启用PFC聊天",
- # 模型设置
- "model.llm_reasoning.name": "推理模型名称",
- "model.llm_reasoning.provider": "推理模型提供商",
- "model.llm_reasoning.pri_in": "推理模型输入价格",
- "model.llm_reasoning.pri_out": "推理模型输出价格",
- "model.llm_normal.name": "回复模型名称",
- "model.llm_normal.provider": "回复模型提供商",
- "model.llm_normal.pri_in": "回复模型输入价格",
- "model.llm_normal.pri_out": "回复模型输出价格",
- "model.llm_emotion_judge.name": "表情判断模型名称",
- "model.llm_emotion_judge.provider": "表情判断模型提供商",
- "model.llm_emotion_judge.pri_in": "表情判断模型输入价格",
- "model.llm_emotion_judge.pri_out": "表情判断模型输出价格",
- "model.llm_topic_judge.name": "主题判断模型名称",
- "model.llm_topic_judge.provider": "主题判断模型提供商",
- "model.llm_topic_judge.pri_in": "主题判断模型输入价格",
- "model.llm_topic_judge.pri_out": "主题判断模型输出价格",
- "model.llm_summary_by_topic.name": "概括模型名称",
- "model.llm_summary_by_topic.provider": "概括模型提供商",
- "model.llm_summary_by_topic.pri_in": "概括模型输入价格",
- "model.llm_summary_by_topic.pri_out": "概括模型输出价格",
- "model.moderation.name": "内容审核模型名称",
- "model.moderation.provider": "内容审核模型提供商",
- "model.moderation.pri_in": "内容审核模型输入价格",
- "model.moderation.pri_out": "内容审核模型输出价格",
- "model.vlm.name": "图像识别模型名称",
- "model.vlm.provider": "图像识别模型提供商",
- "model.vlm.pri_in": "图像识别模型输入价格",
- "model.vlm.pri_out": "图像识别模型输出价格",
- "model.embedding.name": "嵌入模型名称",
- "model.embedding.provider": "嵌入模型提供商",
- "model.embedding.pri_in": "嵌入模型输入价格",
- "model.embedding.pri_out": "嵌入模型输出价格",
- "model.llm_observation.name": "观察模型名称",
- "model.llm_observation.provider": "观察模型提供商",
- "model.llm_observation.pri_in": "观察模型输入价格",
- "model.llm_observation.pri_out": "观察模型输出价格",
- "model.llm_sub_heartflow.name": "子心流模型名称",
- "model.llm_sub_heartflow.provider": "子心流模型提供商",
- "model.llm_sub_heartflow.pri_in": "子心流模型输入价格",
- "model.llm_sub_heartflow.pri_out": "子心流模型输出价格",
- "model.llm_heartflow.name": "心流模型名称",
- "model.llm_heartflow.provider": "心流模型提供商",
- "model.llm_heartflow.pri_in": "心流模型输入价格",
- "model.llm_heartflow.pri_out": "心流模型输出价格",
-}
-
-
-# 获取翻译
-def get_translation(key):
- return SECTION_TRANSLATIONS.get(key, key)
-
-
-# 获取配置项描述
-def get_description(key):
- return CONFIG_DESCRIPTIONS.get(key, "")
-
-
-# 获取根目录路径
-def get_root_dir():
- try:
- # 获取当前脚本所在目录
- if getattr(sys, "frozen", False):
- # 如果是打包后的应用
- current_dir = os.path.dirname(sys.executable)
- else:
- # 如果是脚本运行
- current_dir = os.path.dirname(os.path.abspath(__file__))
-
- # 获取根目录(假设当前脚本在temp_utils_ui目录下或者是可执行文件在根目录)
- if os.path.basename(current_dir) == "temp_utils_ui":
- root_dir = os.path.dirname(current_dir)
- else:
- root_dir = current_dir
-
- # 检查是否存在config目录
- config_dir = os.path.join(root_dir, "config")
- if not os.path.exists(config_dir):
- os.makedirs(config_dir, exist_ok=True)
-
- return root_dir
- except Exception as e:
- print(f"获取根目录路径失败: {e}")
- # 返回当前目录作为备选
- return os.getcwd()
-
-
-# 配置文件路径
-CONFIG_PATH = os.path.join(get_root_dir(), "config", "bot_config.toml")
-
-
-# 保存配置
-def save_config(config_data):
- try:
- # 首先备份原始配置文件
- if os.path.exists(CONFIG_PATH):
- # 创建备份目录
- backup_dir = os.path.join(os.path.dirname(CONFIG_PATH), "old")
- if not os.path.exists(backup_dir):
- os.makedirs(backup_dir)
-
- # 生成备份文件名(使用时间戳)
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
- backup_filename = f"bot_config_{timestamp}.toml.bak"
- backup_path = os.path.join(backup_dir, backup_filename)
-
- # 复制文件
- with open(CONFIG_PATH, "r", encoding="utf-8") as src:
- with open(backup_path, "w", encoding="utf-8") as dst:
- dst.write(src.read())
-
- # 保存新配置
- with open(CONFIG_PATH, "w", encoding="utf-8") as f:
- toml.dump(config_data, f)
- return True
- except Exception as e:
- print(f"保存配置失败: {e}")
- return False
-
-
-# 加载配置
-def load_config():
- try:
- if os.path.exists(CONFIG_PATH):
- with open(CONFIG_PATH, "r", encoding="utf-8") as f:
- return toml.load(f)
- else:
- print(f"配置文件不存在: {CONFIG_PATH}")
- return {}
- except Exception as e:
- print(f"加载配置失败: {e}")
- return {}
-
-
-# 多行文本输入框
-class ScrollableTextFrame(ctk.CTkFrame):
- def __init__(self, master, initial_text="", height=100, width=400, **kwargs):
- super().__init__(master, **kwargs)
-
- self.text_var = StringVar(value=initial_text)
-
- # 文本框
- self.text_box = ctk.CTkTextbox(self, height=height, width=width, wrap="word")
- self.text_box.pack(fill="both", expand=True, padx=5, pady=5)
- self.text_box.insert("1.0", initial_text)
-
- # 绑定更改事件
- self.text_box.bind("", self.update_var)
-
- def update_var(self, event=None):
- self.text_var.set(self.text_box.get("1.0", "end-1c"))
-
- def get(self):
- return self.text_box.get("1.0", "end-1c")
-
- def set(self, text):
- self.text_box.delete("1.0", "end")
- self.text_box.insert("1.0", text)
- self.update_var()
-
-
-# 配置UI
-class ConfigUI(ctk.CTk):
- def __init__(self):
- super().__init__()
-
- # 窗口设置
- self.title("麦麦配置修改器")
- self.geometry("1100x750")
-
- # 加载配置
- self.config_data = load_config()
- if not self.config_data:
- messagebox.showerror("错误", "无法加载配置文件!将创建空白配置文件。")
- # 如果配置加载失败,创建一个最小化的空配置
- self.config_data = {"inner": {"version": "1.0.0"}}
-
- # 保存原始配置,用于检测变更
- self.original_config = json.dumps(self.config_data, sort_keys=True)
-
- # 自动保存状态
- self.auto_save = ctk.BooleanVar(value=False)
-
- # 创建主框架
- self.main_frame = ctk.CTkFrame(self)
- self.main_frame.pack(padx=10, pady=10, fill="both", expand=True)
-
- # 创建顶部工具栏
- self.create_toolbar()
-
- # 创建标签和输入框的字典,用于后续保存配置
- self.config_vars = {}
-
- # 创建左侧导航和右侧内容区域
- self.create_split_view()
-
- # 创建底部状态栏
- self.status_label = ctk.CTkLabel(self, text="就绪", anchor="w")
- self.status_label.pack(fill="x", padx=10, pady=(0, 5))
-
- # 绑定关闭事件
- self.protocol("WM_DELETE_WINDOW", self.on_closing)
-
- # 设置最小窗口大小
- self.minsize(800, 600)
-
- # 居中显示窗口
- self.center_window()
-
- def center_window(self):
- """将窗口居中显示"""
- try:
- self.update_idletasks()
- width = self.winfo_width()
- height = self.winfo_height()
- x = (self.winfo_screenwidth() // 2) - (width // 2)
- y = (self.winfo_screenheight() // 2) - (height // 2)
- self.geometry(f"{width}x{height}+{x}+{y}")
- except Exception as e:
- print(f"居中窗口时出错: {e}")
- # 使用默认位置
- pass
-
- def create_toolbar(self):
- toolbar = ctk.CTkFrame(self.main_frame, height=40)
- toolbar.pack(fill="x", padx=5, pady=5)
-
- # 保存按钮
- save_btn = ctk.CTkButton(toolbar, text="保存配置", command=self.save_config, width=100)
- save_btn.pack(side="left", padx=5)
-
- # 自动保存选项
- auto_save_cb = ctk.CTkCheckBox(toolbar, text="自动保存", variable=self.auto_save)
- auto_save_cb.pack(side="left", padx=15)
-
- # 重新加载按钮
- reload_btn = ctk.CTkButton(toolbar, text="重新加载", command=self.reload_config, width=100)
- reload_btn.pack(side="left", padx=5)
-
- # 手动备份按钮
- backup_btn = ctk.CTkButton(toolbar, text="手动备份", command=self.backup_config, width=100)
- backup_btn.pack(side="left", padx=5)
-
- # 查看备份按钮
- view_backup_btn = ctk.CTkButton(toolbar, text="查看备份", command=self.view_backups, width=100)
- view_backup_btn.pack(side="left", padx=5)
-
- # 导入导出菜单按钮
- import_export_btn = ctk.CTkButton(toolbar, text="导入/导出", command=self.show_import_export_menu, width=100)
- import_export_btn.pack(side="left", padx=5)
-
- # 关于按钮
- about_btn = ctk.CTkButton(toolbar, text="关于", command=self.show_about, width=80)
- about_btn.pack(side="right", padx=5)
-
- def create_split_view(self):
- # 创建分隔视图框架
- split_frame = ctk.CTkFrame(self.main_frame)
- split_frame.pack(fill="both", expand=True, padx=5, pady=5)
-
- # 左侧分类列表
- self.category_frame = ctk.CTkFrame(split_frame, width=220)
- self.category_frame.pack(side="left", fill="y", padx=(0, 5), pady=0)
- self.category_frame.pack_propagate(False) # 固定宽度
-
- # 右侧内容区域
- self.content_frame = ctk.CTkScrollableFrame(split_frame)
- self.content_frame.pack(side="right", fill="both", expand=True)
-
- # 创建类别列表
- self.create_category_list()
-
- def create_category_list(self):
- # 标题和搜索框
- header_frame = ctk.CTkFrame(self.category_frame)
- header_frame.pack(fill="x", padx=5, pady=(10, 5))
-
- ctk.CTkLabel(header_frame, text="配置分类", font=("Arial", 14, "bold")).pack(side="left", padx=5, pady=5)
-
- # 搜索按钮
- search_btn = ctk.CTkButton(
- header_frame,
- text="🔍",
- width=30,
- command=self.show_search_dialog,
- fg_color="transparent",
- hover_color=("gray80", "gray30"),
- )
- search_btn.pack(side="right", padx=5, pady=5)
-
- # 分类按钮
- self.category_buttons = {}
- self.active_category = None
-
- # 分类按钮容器
- buttons_frame = ctk.CTkScrollableFrame(self.category_frame, height=600)
- buttons_frame.pack(fill="both", expand=True, padx=5, pady=5)
-
- for section in self.config_data:
- # 跳过inner部分,这个不应该被用户修改
- if section == "inner":
- continue
-
- # 获取翻译
- section_name = f"{section} ({get_translation(section)})"
-
- btn = ctk.CTkButton(
- buttons_frame,
- text=section_name,
- fg_color="transparent",
- text_color=("gray10", "gray90"),
- anchor="w",
- height=35,
- command=lambda s=section: self.show_category(s),
- )
- btn.pack(fill="x", padx=5, pady=2)
- self.category_buttons[section] = btn
-
- # 默认显示第一个分类
- first_section = next((s for s in self.config_data.keys() if s != "inner"), None)
- if first_section:
- self.show_category(first_section)
-
- def show_category(self, category):
- # 清除当前内容
- for widget in self.content_frame.winfo_children():
- widget.destroy()
-
- # 更新按钮状态
- for section, btn in self.category_buttons.items():
- if section == category:
- btn.configure(fg_color=("gray75", "gray25"))
- self.active_category = section
- else:
- btn.configure(fg_color="transparent")
-
- # 获取翻译
- category_name = f"{category} ({get_translation(category)})"
-
- # 添加标题
- ctk.CTkLabel(self.content_frame, text=f"{category_name} 配置", font=("Arial", 16, "bold")).pack(
- anchor="w", padx=10, pady=(5, 15)
- )
-
- # 添加配置项
- self.add_config_section(self.content_frame, category, self.config_data[category])
-
- def add_config_section(self, parent, section_path, section_data, indent=0):
- # 递归添加配置项
- for key, value in section_data.items():
- full_path = f"{section_path}.{key}" if indent > 0 else f"{section_path}.{key}"
-
- # 获取描述
- description = get_description(full_path)
-
- if isinstance(value, dict):
- # 如果是字典,创建一个分组框架并递归添加子项
- group_frame = ctk.CTkFrame(parent)
- group_frame.pack(fill="x", expand=True, padx=10, pady=10)
-
- # 添加标题
- header_frame = ctk.CTkFrame(group_frame, fg_color=("gray85", "gray25"))
- header_frame.pack(fill="x", padx=0, pady=0)
-
- label = ctk.CTkLabel(header_frame, text=f"{key}", font=("Arial", 13, "bold"), anchor="w")
- label.pack(anchor="w", padx=10, pady=5)
-
- # 如果有描述,添加提示图标
- if description:
- # 创建工具提示窗口显示函数
- def show_tooltip(event, text, widget):
- x, y, _, _ = widget.bbox("all")
- x += widget.winfo_rootx() + 25
- y += widget.winfo_rooty() + 25
-
- # 创建工具提示窗口
- tipwindow = ctk.CTkToplevel(widget)
- tipwindow.wm_overrideredirect(True)
- tipwindow.wm_geometry(f"+{x}+{y}")
- tipwindow.lift()
-
- label = ctk.CTkLabel(tipwindow, text=text, justify="left", wraplength=300)
- label.pack(padx=5, pady=5)
-
- # 自动关闭
- def close_tooltip():
- tipwindow.destroy()
-
- widget.after(3000, close_tooltip)
- return tipwindow
-
- # 在标题后添加提示图标
- tip_label = ctk.CTkLabel(
- header_frame, text="ℹ️", font=("Arial", 12), text_color="light blue", width=20
- )
- tip_label.pack(side="right", padx=5)
-
- # 绑定鼠标悬停事件
- tip_label.bind("", lambda e, t=description, w=tip_label: show_tooltip(e, t, w))
-
- # 添加内容
- content_frame = ctk.CTkFrame(group_frame)
- content_frame.pack(fill="x", expand=True, padx=5, pady=5)
-
- self.add_config_section(content_frame, full_path, value, indent + 1)
-
- elif isinstance(value, list):
- # 如果是列表,创建一个文本框用于编辑JSON格式的列表
- frame = ctk.CTkFrame(parent)
- frame.pack(fill="x", expand=True, padx=5, pady=5)
-
- # 标签和输入框在一行
- label_frame = ctk.CTkFrame(frame)
- label_frame.pack(fill="x", padx=5, pady=(5, 0))
-
- # 标签包含描述提示
- label_text = f"{key}:"
- if description:
- label_text = f"{key}: ({description})"
-
- label = ctk.CTkLabel(label_frame, text=label_text, font=("Arial", 12), anchor="w")
- label.pack(anchor="w", padx=5 + indent * 10, pady=0)
-
- # 添加提示信息
- info_label = ctk.CTkLabel(label_frame, text="(列表格式: JSON)", font=("Arial", 9), text_color="gray50")
- info_label.pack(anchor="w", padx=5 + indent * 10, pady=(0, 5))
-
- # 确定文本框高度,根据列表项数量决定
- list_height = max(100, min(len(value) * 20 + 40, 200))
-
- # 将列表转换为JSON字符串,美化格式
- json_str = json.dumps(value, ensure_ascii=False, indent=2)
-
- # 使用多行文本框
- text_frame = ScrollableTextFrame(frame, initial_text=json_str, height=list_height, width=550)
- text_frame.pack(fill="x", padx=10 + indent * 10, pady=5)
-
- self.config_vars[full_path] = (text_frame.text_var, "list")
-
- # 绑定变更事件,用于自动保存
- text_frame.text_box.bind("", lambda e, path=full_path: self.on_field_change(path))
-
- elif isinstance(value, bool):
- # 如果是布尔值,创建一个复选框
- frame = ctk.CTkFrame(parent)
- frame.pack(fill="x", expand=True, padx=5, pady=5)
-
- var = ctk.BooleanVar(value=value)
- self.config_vars[full_path] = (var, "bool")
-
- # 复选框文本包含描述
- checkbox_text = key
- if description:
- checkbox_text = f"{key} ({description})"
-
- checkbox = ctk.CTkCheckBox(
- frame, text=checkbox_text, variable=var, command=lambda path=full_path: self.on_field_change(path)
- )
- checkbox.pack(anchor="w", padx=10 + indent * 10, pady=5)
-
- elif isinstance(value, (int, float)):
- # 如果是数字,创建一个数字输入框
- frame = ctk.CTkFrame(parent)
- frame.pack(fill="x", expand=True, padx=5, pady=5)
-
- # 标签包含描述
- label_text = f"{key}:"
- if description:
- label_text = f"{key}: ({description})"
-
- label = ctk.CTkLabel(frame, text=label_text, font=("Arial", 12), anchor="w")
- label.pack(anchor="w", padx=10 + indent * 10, pady=(5, 0))
-
- var = StringVar(value=str(value))
- self.config_vars[full_path] = (var, "number", type(value))
-
- # 判断数值的长度,决定输入框宽度
- entry_width = max(200, min(len(str(value)) * 15, 300))
-
- entry = ctk.CTkEntry(frame, width=entry_width, textvariable=var)
- entry.pack(anchor="w", padx=10 + indent * 10, pady=5)
-
- # 绑定变更事件,用于自动保存
- entry.bind("", lambda e, path=full_path: self.on_field_change(path))
-
- else:
- # 对于字符串,创建一个文本输入框
- frame = ctk.CTkFrame(parent)
- frame.pack(fill="x", expand=True, padx=5, pady=5)
-
- # 标签包含描述
- label_text = f"{key}:"
- if description:
- label_text = f"{key}: ({description})"
-
- label = ctk.CTkLabel(frame, text=label_text, font=("Arial", 12), anchor="w")
- label.pack(anchor="w", padx=10 + indent * 10, pady=(5, 0))
-
- var = StringVar(value=str(value))
- self.config_vars[full_path] = (var, "string")
-
- # 判断文本长度,决定输入框的类型和大小
- text_len = len(str(value))
-
- if text_len > 80 or "\n" in str(value):
- # 对于长文本或多行文本,使用多行文本框
- text_height = max(80, min(str(value).count("\n") * 20 + 40, 150))
-
- text_frame = ScrollableTextFrame(frame, initial_text=str(value), height=text_height, width=550)
- text_frame.pack(fill="x", padx=10 + indent * 10, pady=5)
- self.config_vars[full_path] = (text_frame.text_var, "string")
-
- # 绑定变更事件,用于自动保存
- text_frame.text_box.bind("", lambda e, path=full_path: self.on_field_change(path))
- else:
- # 对于短文本,使用单行输入框
- # 根据内容长度动态调整输入框宽度
- entry_width = max(400, min(text_len * 10, 550))
-
- entry = ctk.CTkEntry(frame, width=entry_width, textvariable=var)
- entry.pack(anchor="w", padx=10 + indent * 10, pady=5, fill="x")
-
- # 绑定变更事件,用于自动保存
- entry.bind("", lambda e, path=full_path: self.on_field_change(path))
-
- def on_field_change(self, path):
- """当字段值改变时调用,用于自动保存"""
- if self.auto_save.get():
- self.save_config(show_message=False)
- self.status_label.configure(text=f"已自动保存更改 ({path})")
-
- def save_config(self, show_message=True):
- """保存配置文件"""
- # 更新配置数据
- updated = False
- _error_path = None
-
- for path, (var, var_type, *args) in self.config_vars.items():
- parts = path.split(".")
-
- # 如果路径有多层级
- target = self.config_data
- for p in parts[:-1]:
- if p not in target:
- target[p] = {}
- target = target[p]
-
- # 根据变量类型更新值
- try:
- if var_type == "bool":
- if target[parts[-1]] != var.get():
- target[parts[-1]] = var.get()
- updated = True
- elif var_type == "number":
- # 获取原始类型(int或float)
- num_type = args[0] if args else int
- new_value = num_type(var.get())
- if target[parts[-1]] != new_value:
- target[parts[-1]] = new_value
- updated = True
-
- elif var_type == "list":
- # 解析JSON字符串为列表
- new_value = json.loads(var.get())
- if json.dumps(target[parts[-1]], sort_keys=True) != json.dumps(new_value, sort_keys=True):
- target[parts[-1]] = new_value
- updated = True
-
- else:
- if target[parts[-1]] != var.get():
- target[parts[-1]] = var.get()
- updated = True
- except ValueError as e:
- if show_message:
- messagebox.showerror("格式错误", str(e))
- else:
- self.status_label.configure(text=f"保存失败: {e}")
- return False
-
- if not updated and show_message:
- self.status_label.configure(text="无更改,无需保存")
- return True
-
- # 保存配置
- if save_config(self.config_data):
- if show_message:
- messagebox.showinfo("成功", "配置已保存!")
- self.original_config = json.dumps(self.config_data, sort_keys=True)
- return True
- else:
- if show_message:
- messagebox.showerror("错误", "保存配置失败!")
- else:
- self.status_label.configure(text="保存失败!")
- return False
-
- def reload_config(self):
- """重新加载配置"""
- if self.check_unsaved_changes():
- self.config_data = load_config()
- if not self.config_data:
- messagebox.showerror("错误", "无法加载配置文件!")
- return
-
- # 保存原始配置,用于检测变更
- self.original_config = json.dumps(self.config_data, sort_keys=True)
-
- # 重新显示当前分类
- self.show_category(self.active_category)
-
- self.status_label.configure(text="配置已重新加载")
-
- def check_unsaved_changes(self):
- """检查是否有未保存的更改"""
- # 临时更新配置数据以进行比较
- temp_config = self.config_data.copy()
-
- try:
- for path, (var, var_type, *args) in self.config_vars.items():
- parts = path.split(".")
-
- target = temp_config
- for p in parts[:-1]:
- target = target[p]
-
- if var_type == "bool":
- target[parts[-1]] = var.get()
- elif var_type == "number":
- num_type = args[0] if args else int
- target[parts[-1]] = num_type(var.get())
- elif var_type == "list":
- target[parts[-1]] = json.loads(var.get())
- else:
- target[parts[-1]] = var.get()
- except (ValueError, json.JSONDecodeError):
- # 如果有无效输入,认为有未保存更改
- return False
-
- # 比较原始配置和当前配置
- current_config = json.dumps(temp_config, sort_keys=True)
-
- if current_config != self.original_config:
- result = messagebox.askyesnocancel("未保存的更改", "有未保存的更改,是否保存?", icon="warning")
-
- if result is None: # 取消
- return False
- elif result: # 是
- return self.save_config()
-
- return True
-
- def show_about(self):
- """显示关于对话框"""
- about_window = ctk.CTkToplevel(self)
- about_window.title("关于")
- about_window.geometry("400x200")
- about_window.resizable(False, False)
- about_window.grab_set() # 模态对话框
-
- # 居中
- x = self.winfo_x() + (self.winfo_width() - 400) // 2
- y = self.winfo_y() + (self.winfo_height() - 200) // 2
- about_window.geometry(f"+{x}+{y}")
-
- # 内容
- ctk.CTkLabel(about_window, text="麦麦配置修改器", font=("Arial", 16, "bold")).pack(pady=(20, 10))
-
- ctk.CTkLabel(about_window, text="用于修改MaiBot-Core的配置文件\n配置文件路径: config/bot_config.toml").pack(
- pady=5
- )
-
- ctk.CTkLabel(about_window, text="注意: 修改配置前请备份原始配置文件", text_color=("red", "light coral")).pack(
- pady=5
- )
-
- ctk.CTkButton(about_window, text="确定", command=about_window.destroy, width=100).pack(pady=15)
-
- def on_closing(self):
- """关闭窗口前检查未保存更改"""
- if self.check_unsaved_changes():
- self.destroy()
-
- def backup_config(self):
- """手动备份当前配置文件"""
- try:
- # 检查配置文件是否存在
- if not os.path.exists(CONFIG_PATH):
- messagebox.showerror("错误", "配置文件不存在!")
- return False
-
- # 创建备份目录
- backup_dir = os.path.join(os.path.dirname(CONFIG_PATH), "old")
- if not os.path.exists(backup_dir):
- os.makedirs(backup_dir)
-
- # 生成备份文件名(使用时间戳)
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
- backup_filename = f"bot_config_{timestamp}.toml.bak"
- backup_path = os.path.join(backup_dir, backup_filename)
-
- # 复制文件
- with open(CONFIG_PATH, "r", encoding="utf-8") as src:
- with open(backup_path, "w", encoding="utf-8") as dst:
- dst.write(src.read())
-
- messagebox.showinfo("成功", f"配置已备份到:\n{backup_path}")
- self.status_label.configure(text=f"手动备份已创建: {backup_filename}")
- return True
- except Exception as e:
- messagebox.showerror("备份失败", f"备份配置文件失败: {e}")
- return False
-
- def view_backups(self):
- """查看备份文件列表"""
- # 创建备份目录
- backup_dir = os.path.join(os.path.dirname(CONFIG_PATH), "old")
- if not os.path.exists(backup_dir):
- os.makedirs(backup_dir)
-
- # 查找备份文件
- backup_files = []
- for filename in os.listdir(backup_dir):
- if filename.startswith("bot_config_") and filename.endswith(".toml.bak"):
- backup_path = os.path.join(backup_dir, filename)
- mod_time = os.path.getmtime(backup_path)
- backup_files.append((filename, backup_path, mod_time))
-
- if not backup_files:
- messagebox.showinfo("提示", "未找到备份文件")
- return
-
- # 按修改时间排序,最新的在前
- backup_files.sort(key=lambda x: x[2], reverse=True)
-
- # 创建备份查看窗口
- backup_window = ctk.CTkToplevel(self)
- backup_window.title("备份文件")
- backup_window.geometry("600x400")
- backup_window.grab_set() # 模态对话框
-
- # 居中
- x = self.winfo_x() + (self.winfo_width() - 600) // 2
- y = self.winfo_y() + (self.winfo_height() - 400) // 2
- backup_window.geometry(f"+{x}+{y}")
-
- # 创建说明标签
- ctk.CTkLabel(backup_window, text="备份文件列表 (双击可恢复)", font=("Arial", 14, "bold")).pack(
- pady=(10, 5), padx=10, anchor="w"
- )
-
- # 创建列表框
- backup_frame = ctk.CTkScrollableFrame(backup_window, width=580, height=300)
- backup_frame.pack(padx=10, pady=10, fill="both", expand=True)
-
- # 添加备份文件项
- for _i, (filename, filepath, mod_time) in enumerate(backup_files):
- # 格式化时间为可读格式
- time_str = datetime.datetime.fromtimestamp(mod_time).strftime("%Y-%m-%d %H:%M:%S")
-
- # 创建一个框架用于每个备份项
- item_frame = ctk.CTkFrame(backup_frame)
- item_frame.pack(fill="x", padx=5, pady=5)
-
- # 显示备份文件信息
- ctk.CTkLabel(item_frame, text=f"{time_str}", font=("Arial", 12, "bold"), width=200).pack(
- side="left", padx=10, pady=10
- )
-
- # 文件名
- name_label = ctk.CTkLabel(item_frame, text=filename, font=("Arial", 11))
- name_label.pack(side="left", fill="x", expand=True, padx=5, pady=10)
-
- # 恢复按钮
- restore_btn = ctk.CTkButton(
- item_frame, text="恢复", width=80, command=lambda path=filepath: self.restore_backup(path)
- )
- restore_btn.pack(side="right", padx=10, pady=10)
-
- # 绑定双击事件
- for widget in (item_frame, name_label):
- widget.bind("", lambda e, path=filepath: self.restore_backup(path))
-
- # 关闭按钮
- ctk.CTkButton(backup_window, text="关闭", command=backup_window.destroy, width=100).pack(pady=10)
-
- def restore_backup(self, backup_path):
- """从备份文件恢复配置"""
- if not os.path.exists(backup_path):
- messagebox.showerror("错误", "备份文件不存在!")
- return False
-
- # 确认还原
- confirm = messagebox.askyesno(
- "确认",
- f"确定要从以下备份文件恢复配置吗?\n{os.path.basename(backup_path)}\n\n这将覆盖当前的配置!",
- icon="warning",
- )
-
- if not confirm:
- return False
-
- try:
- # 先备份当前配置
- self.backup_config()
-
- # 恢复配置
- with open(backup_path, "r", encoding="utf-8") as src:
- with open(CONFIG_PATH, "w", encoding="utf-8") as dst:
- dst.write(src.read())
-
- messagebox.showinfo("成功", "配置已从备份恢复!")
-
- # 重新加载配置
- self.reload_config()
- return True
- except Exception as e:
- messagebox.showerror("恢复失败", f"恢复配置失败: {e}")
- return False
-
- def show_search_dialog(self):
- """显示搜索对话框"""
- try:
- search_window = ctk.CTkToplevel(self)
- search_window.title("搜索配置项")
- search_window.geometry("500x400")
- search_window.grab_set() # 模态对话框
-
- # 居中
- x = self.winfo_x() + (self.winfo_width() - 500) // 2
- y = self.winfo_y() + (self.winfo_height() - 400) // 2
- search_window.geometry(f"+{x}+{y}")
-
- # 搜索框
- search_frame = ctk.CTkFrame(search_window)
- search_frame.pack(fill="x", padx=10, pady=10)
-
- search_var = StringVar()
- search_entry = ctk.CTkEntry(
- search_frame, placeholder_text="输入关键词搜索...", width=380, textvariable=search_var
- )
- search_entry.pack(side="left", padx=5, pady=5, fill="x", expand=True)
-
- # 结果列表框
- results_frame = ctk.CTkScrollableFrame(search_window, width=480, height=300)
- results_frame.pack(padx=10, pady=5, fill="both", expand=True)
-
- # 搜索结果标签
- results_label = ctk.CTkLabel(results_frame, text="请输入关键词进行搜索", anchor="w")
- results_label.pack(fill="x", padx=10, pady=10)
-
- # 结果项列表
- results_items = []
-
- # 搜索函数
- def perform_search():
- # 清除之前的结果
- for item in results_items:
- item.destroy()
- results_items.clear()
-
- keyword = search_var.get().lower()
- if not keyword:
- results_label.configure(text="请输入关键词进行搜索")
- return
-
- # 收集所有匹配的配置项
- matches = []
-
- def search_config(section_path, config_data):
- for key, value in config_data.items():
- full_path = f"{section_path}.{key}" if section_path else key
-
- # 检查键名是否匹配
- if keyword in key.lower():
- matches.append((full_path, value))
-
- # 检查描述是否匹配
- description = get_description(full_path)
- if description and keyword in description.lower():
- matches.append((full_path, value))
-
- # 检查值是否匹配(仅字符串类型)
- if isinstance(value, str) and keyword in value.lower():
- matches.append((full_path, value))
-
- # 递归搜索子项
- if isinstance(value, dict):
- search_config(full_path, value)
-
- # 开始搜索
- search_config("", self.config_data)
-
- if not matches:
- results_label.configure(text=f"未找到包含 '{keyword}' 的配置项")
- return
-
- results_label.configure(text=f"找到 {len(matches)} 个匹配项")
-
- # 显示搜索结果
- for full_path, value in matches:
- # 创建一个框架用于每个结果项
- item_frame = ctk.CTkFrame(results_frame)
- item_frame.pack(fill="x", padx=5, pady=3)
- results_items.append(item_frame)
-
- # 配置项路径
- path_parts = full_path.split(".")
- section = path_parts[0] if len(path_parts) > 0 else ""
- _key = path_parts[-1] if len(path_parts) > 0 else ""
-
- # 获取描述
- description = get_description(full_path)
- desc_text = f" ({description})" if description else ""
-
- # 显示完整路径
- path_label = ctk.CTkLabel(
- item_frame,
- text=f"{full_path}{desc_text}",
- font=("Arial", 11, "bold"),
- anchor="w",
- wraplength=450,
- )
- path_label.pack(anchor="w", padx=10, pady=(5, 0), fill="x")
-
- # 显示值的预览(截断过长的值)
- value_str = str(value)
- if len(value_str) > 50:
- value_str = value_str[:50] + "..."
-
- value_label = ctk.CTkLabel(
- item_frame, text=f"值: {value_str}", font=("Arial", 10), anchor="w", wraplength=450
- )
- value_label.pack(anchor="w", padx=10, pady=(0, 5), fill="x")
-
- # 添加"转到"按钮
- goto_btn = ctk.CTkButton(
- item_frame,
- text="转到",
- width=60,
- height=25,
- command=lambda s=section: self.goto_config_item(s, search_window),
- )
- goto_btn.pack(side="right", padx=10, pady=5)
-
- # 绑定双击事件
- for widget in (item_frame, path_label, value_label):
- widget.bind("", lambda e, s=section: self.goto_config_item(s, search_window))
-
- # 搜索按钮
- search_button = ctk.CTkButton(search_frame, text="搜索", width=80, command=perform_search)
- search_button.pack(side="right", padx=5, pady=5)
-
- # 绑定回车键
- search_entry.bind("", lambda e: perform_search())
-
- # 初始聚焦到搜索框
- search_window.after(100, lambda: self.safe_focus(search_entry))
- except Exception as e:
- print(f"显示搜索对话框出错: {e}")
- messagebox.showerror("错误", f"显示搜索对话框失败: {e}")
-
- def safe_focus(self, widget):
- """安全地设置焦点,避免应用崩溃"""
- try:
- if widget.winfo_exists():
- widget.focus_set()
- except Exception as e:
- print(f"设置焦点出错: {e}")
- # 忽略错误
-
- def goto_config_item(self, section, dialog=None):
- """跳转到指定的配置项"""
- if dialog:
- dialog.destroy()
-
- # 切换到相应的分类
- if section in self.category_buttons:
- self.show_category(section)
-
- def show_import_export_menu(self):
- """显示导入导出菜单"""
- menu_window = ctk.CTkToplevel(self)
- menu_window.title("导入/导出配置")
- menu_window.geometry("300x200")
- menu_window.resizable(False, False)
- menu_window.grab_set() # 模态对话框
-
- # 居中
- x = self.winfo_x() + (self.winfo_width() - 300) // 2
- y = self.winfo_y() + (self.winfo_height() - 200) // 2
- menu_window.geometry(f"+{x}+{y}")
-
- # 创建按钮
- ctk.CTkLabel(menu_window, text="配置导入导出", font=("Arial", 16, "bold")).pack(pady=(20, 10))
-
- # 导出按钮
- export_btn = ctk.CTkButton(
- menu_window, text="导出配置到文件", command=lambda: self.export_config(menu_window), width=200
- )
- export_btn.pack(pady=10)
-
- # 导入按钮
- import_btn = ctk.CTkButton(
- menu_window, text="从文件导入配置", command=lambda: self.import_config(menu_window), width=200
- )
- import_btn.pack(pady=10)
-
- # 取消按钮
- cancel_btn = ctk.CTkButton(menu_window, text="取消", command=menu_window.destroy, width=100)
- cancel_btn.pack(pady=10)
-
- def export_config(self, parent_window=None):
- """导出配置到文件"""
- # 先保存当前配置
- if not self.save_config(show_message=False):
- if messagebox.askyesno("警告", "当前配置存在错误,是否仍要导出?"):
- pass
- else:
- return
-
- # 选择保存位置
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
- default_filename = f"bot_config_export_{timestamp}.toml"
-
- file_path = filedialog.asksaveasfilename(
- title="导出配置",
- filetypes=[("TOML 文件", "*.toml"), ("所有文件", "*.*")],
- defaultextension=".toml",
- initialfile=default_filename,
- )
-
- if not file_path:
- return
-
- try:
- # 复制当前配置文件到选择的位置
- shutil.copy2(CONFIG_PATH, file_path)
-
- messagebox.showinfo("成功", f"配置已导出到:\n{file_path}")
- self.status_label.configure(text=f"配置已导出到: {file_path}")
-
- if parent_window:
- parent_window.destroy()
-
- return True
- except Exception as e:
- messagebox.showerror("导出失败", f"导出配置失败: {e}")
- return False
-
- def import_config(self, parent_window=None):
- """从文件导入配置"""
- # 先检查是否有未保存的更改
- if not self.check_unsaved_changes():
- return
-
- # 选择要导入的文件
- file_path = filedialog.askopenfilename(
- title="导入配置", filetypes=[("TOML 文件", "*.toml"), ("所有文件", "*.*")]
- )
-
- if not file_path:
- return
-
- try:
- # 尝试加载TOML文件以验证格式
- with open(file_path, "r", encoding="utf-8") as f:
- import_data = toml.load(f)
-
- # 验证导入文件的基本结构
- if "inner" not in import_data:
- raise ValueError("导入的配置文件没有inner部分,格式不正确")
-
- if "version" not in import_data["inner"]:
- raise ValueError("导入的配置文件没有版本信息,格式不正确")
-
- # 确认导入
- confirm = messagebox.askyesno(
- "确认导入", f"确定要导入此配置文件吗?\n{file_path}\n\n这将替换当前的配置!", icon="warning"
- )
-
- if not confirm:
- return
-
- # 先备份当前配置
- self.backup_config()
-
- # 复制导入的文件到配置位置
- shutil.copy2(file_path, CONFIG_PATH)
-
- messagebox.showinfo("成功", "配置已导入,请重新加载以应用更改")
-
- # 重新加载配置
- self.reload_config()
-
- if parent_window:
- parent_window.destroy()
-
- return True
- except Exception as e:
- messagebox.showerror("导入失败", f"导入配置失败: {e}")
- return False
-
-
-# 主函数
-def main():
- try:
- app = ConfigUI()
- app.mainloop()
- except Exception as e:
- print(f"程序发生错误: {e}")
- # 显示错误对话框
-
- import tkinter as tk
- from tkinter import messagebox
-
- root = tk.Tk()
- root.withdraw()
- messagebox.showerror("程序错误", f"程序运行时发生错误:\n{e}")
- root.destroy()
-
-
-if __name__ == "__main__":
- main()
diff --git a/temp_utils_ui/thingking_ui.py b/temp_utils_ui/thingking_ui.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml
index 1cf324a97..92fb886a8 100644
--- a/template/bot_config_template.toml
+++ b/template/bot_config_template.toml
@@ -1,5 +1,5 @@
[inner]
-version = "1.2.6"
+version = "1.3.0"
#以下是给开发人员阅读的,一般用户不需要阅读
@@ -60,7 +60,7 @@ appearance = "用几句话描述外貌特征" # 外貌特征
enable_schedule_gen = true # 是否启用日程表(尚未完成)
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
-schedule_temperature = 0.2 # 日程表温度,建议0.2-0.5
+schedule_temperature = 0.1 # 日程表温度,建议0.1-0.5
time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程
[platforms] # 必填项目,填写每个平台适配器提供的链接
@@ -77,12 +77,16 @@ model_v3_probability = 0.3 # 麦麦回答时选择次要回复模型2 模型的
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
-heart_flow_update_interval = 300 # 心流更新频率,间隔 单位秒
+heart_flow_update_interval = 600 # 心流更新频率,间隔 单位秒
+
+observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
+compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
+compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
[message]
max_context_size = 12 # 麦麦获得的上文数量,建议12,太短太长都会导致脑袋尖尖
-emoji_chance = 0.2 # 麦麦使用表情包的概率
+emoji_chance = 0.2 # 麦麦使用表情包的概率,设置为1让麦麦自己决定发不发
thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃
max_response_length = 256 # 麦麦回答的最大token数
message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟
@@ -159,8 +163,8 @@ min_freq=9 # 最小字频阈值
tone_error_rate=0.1 # 声调错误概率
word_replace_rate=0.006 # 整词替换概率
-[response_spliter]
-enable_response_spliter = true # 是否启用回复分割器
+[response_splitter]
+enable_response_splitter = true # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 4 # 回复允许的最大句子数
@@ -189,11 +193,12 @@ pri_out = 16 #模型的输出价格(非必填,可以记录消耗)
#非推理模型
-[model.llm_normal] #V3 回复模型1 主要回复模型
+[model.llm_normal] #V3 回复模型1 主要回复模型,默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
+temp = 0.2 #模型的温度,新V3建议0.1-0.3
[model.llm_emotion_judge] #表情包判断
name = "Qwen/Qwen2.5-14B-Instruct"
@@ -213,11 +218,11 @@ provider = "SILICONFLOW"
pri_in = 1.26
pri_out = 1.26
-[model.moderation] #内容审核,开发中
-name = ""
+[model.llm_tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
+name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW"
-pri_in = 1.0
-pri_out = 2.0
+pri_in = 1.26
+pri_out = 1.26
# 识图模型
@@ -247,6 +252,7 @@ name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
+temp = 0.2 #模型的温度,新V3建议0.1-0.3
[model.llm_heartflow] #心流:建议使用qwen2.5 32b
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
diff --git a/从0.6.0升级0.6.1请先看我.txt b/从0.6.0升级0.6.2请先看我.txt
similarity index 100%
rename from 从0.6.0升级0.6.1请先看我.txt
rename to 从0.6.0升级0.6.2请先看我.txt
diff --git a/配置文件修改器(临时测试用,以config为准).exe b/配置文件修改器(临时测试用,以config为准).exe
deleted file mode 100644
index dcb699074..000000000
Binary files a/配置文件修改器(临时测试用,以config为准).exe and /dev/null differ