This commit is contained in:
tcmofashi
2025-04-16 14:12:36 +08:00
59 changed files with 1789 additions and 2221 deletions

View File

@@ -9,6 +9,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.head_ref || github.ref_name }}
- uses: astral-sh/ruff-action@v3 - uses: astral-sh/ruff-action@v3
- run: ruff check --fix - run: ruff check --fix
- run: ruff format - run: ruff format

6
.gitignore vendored
View File

@@ -20,6 +20,8 @@ message_queue_window.bat
message_queue_window.txt message_queue_window.txt
queue_update.txt queue_update.txt
memory_graph.gml memory_graph.gml
/src/do_tool/tool_can_use/auto_create_tool.py
/src/do_tool/tool_can_use/execute_python_code_tool.py
.env .env
.env.* .env.*
.cursor .cursor
@@ -28,6 +30,9 @@ config/bot_config.toml
config/bot_config.toml.bak config/bot_config.toml.bak
src/plugins/remote/client_uuid.json src/plugins/remote/client_uuid.json
run_none.bat run_none.bat
(测试版)麦麦生成人格.bat
(临时版)麦麦开始学习.bat
src/plugins/utils/statistic.py
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
*.py[cod] *.py[cod]
@@ -237,3 +242,4 @@ logs
/config/* /config/*
run_none.bat run_none.bat
config/old/bot_config_20250405_212257.toml config/old/bot_config_20250405_212257.toml

View File

@@ -37,7 +37,7 @@
</p> </p>
## 新版0.6.0部署前先阅读https://docs.mai-mai.org/manual/usage/mmc_q_a ## 新版0.6.x部署前先阅读https://docs.mai-mai.org/manual/usage/mmc_q_a
## 📝 项目简介 ## 📝 项目简介
@@ -62,7 +62,7 @@
### 📢 版本信息 ### 📢 版本信息
**最新版本: v0.6.0** ([查看更新日志](changelogs/changelog.md)) **最新版本: v0.6.2** ([查看更新日志](changelogs/changelog.md))
> [!WARNING] > [!WARNING]
> 请阅读教程后更新!!!!!!! > 请阅读教程后更新!!!!!!!
> 请阅读教程后更新!!!!!!! > 请阅读教程后更新!!!!!!!
@@ -86,7 +86,7 @@
### ⚠️ 重要提示 ### ⚠️ 重要提示
- 升级到v0.6.0版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a) - 升级到v0.6.x版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
- 本版本基于MaiCore重构通过nonebot插件与QQ平台交互 - 本版本基于MaiCore重构通过nonebot插件与QQ平台交互
- 项目处于活跃开发阶段功能和API可能随时调整 - 项目处于活跃开发阶段功能和API可能随时调整
@@ -108,28 +108,29 @@
- [📚 核心Wiki文档](https://docs.mai-mai.org) - 项目最全面的文档中心,你可以了解麦麦有关的一切 - [📚 核心Wiki文档](https://docs.mai-mai.org) - 项目最全面的文档中心,你可以了解麦麦有关的一切
### 最新版本部署教程(MaiCore版本) ### 最新版本部署教程(MaiCore版本)
- [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy.html) - 基于MaiCore的新版本部署方式与旧版本不兼容 - [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html) - 基于MaiCore的新版本部署方式与旧版本不兼容
## 🎯 功能介绍 ## 🎯 功能介绍
| 模块 | 主要功能 | 特点 | | 模块 | 主要功能 | 特点 |
|------|---------|------| |------|---------|------|
| 💬 聊天系统 | • 思维流/推理聊天<br>• 关键词主动发言<br>• 多模型支持<br>• 动态prompt构建<br>• 私聊功能(PFC) | 拟人化交互 | | 💬 聊天系统 | • 流/推理聊天<br>• 关键词主动发言<br>• 多模型支持<br>• 动态prompt构建<br>• 私聊功能(PFC) | 拟人化交互 |
| 🧠 思维流系统 | • 实时思考生成<br>• 自动启停机制<br>• 日程系统联动 | 智能化决策 | | 🧠 流系统 | • 实时思考生成<br>• 自动启停机制<br>• 日程系统联动<br>• 工具调用能力 | 智能化决策 |
| 🧠 记忆系统 2.0 | • 优化记忆抽取<br>• 海马体记忆机制<br>• 聊天记录概括 | 持久化记忆 | | 🧠 记忆系统 | • 优化记忆抽取<br>• 海马体记忆机制<br>• 聊天记录概括 | 持久化记忆 |
| 😊 表情系统 | • 情绪匹配发送<br>• GIF支持<br>• 自动收集与审查 | 丰富表达 | | 😊 表情系统 | • 情绪匹配发送<br>• GIF支持<br>• 自动收集与审查 | 丰富表达 |
| 📅 日程系统 | • 动态日程生成<br>• 自定义想象力<br>• 思维流联动 | 智能规划 | | 📅 日程系统 | • 动态日程生成<br>• 自定义想象力<br>• 思维流联动 | 智能规划 |
| 👥 关系系统 2.0 | • 关系管理优化<br>• 丰富接口支持<br>• 个性化交互 | 深度社交 | | 👥 关系系统 | • 关系管理优化<br>• 丰富接口支持<br>• 个性化交互 | 深度社交 |
| 📊 统计系统 | • 使用数据统计<br>• LLM调用记录<br>• 实时控制台显示 | 数据可视 | | 📊 统计系统 | • 使用数据统计<br>• LLM调用记录<br>• 实时控制台显示 | 数据可视 |
| 🔧 系统功能 | • 优雅关闭机制<br>• 自动数据保存<br>• 异常处理完善 | 稳定可靠 | | 🔧 系统功能 | • 优雅关闭机制<br>• 自动数据保存<br>• 异常处理完善 | 稳定可靠 |
| 🛠️ 工具系统 | • 知识获取工具<br>• 自动注册机制<br>• 多工具支持 | 扩展功能 |
## 📐 项目架构 ## 📐 项目架构
```mermaid ```mermaid
graph TD graph TD
A[MaiCore] --> B[对话系统] A[MaiCore] --> B[对话系统]
A --> C[思维流系统] A --> C[流系统]
A --> D[记忆系统] A --> D[记忆系统]
A --> E[情感系统] A --> E[情感系统]
B --> F[多模型支持] B --> F[多模型支持]

View File

@@ -1,5 +1,58 @@
# Changelog # Changelog
## [0.6.2] - 2025-4-14
### 摘要
- MaiBot 0.6.2 版本发布!
- 优化了心流的观察系统,优化提示词和表现,现在心流表现更好!
- 新增工具调用能力,可以更好地获取信息
- 本次更新主要围绕工具系统、心流系统、消息处理和代码优化展开,新增多个工具类,优化了心流系统的逻辑,改进了消息处理流程,并修复了多个问题。
### 🌟 核心功能增强
#### 工具系统
- 新增了知识获取工具系统,支持通过心流调用获取多种知识
- 新增了工具系统使用指南,详细说明工具结构、自动注册机制和添加步骤
- 新增了多个实用工具类,包括心情调整工具`ChangeMoodTool`、关系查询工具`RelationshipTool`、数值比较工具`CompareNumbersTool`、日程获取工具`GetCurrentTaskTool`、上下文压缩工具`CompressContextTool`和知识获取工具`GetKnowledgeTool`
- 更新了`ToolUser`类,支持自动获取已注册工具定义并调用`execute`方法
- 需要配置支持工具调用的模型才能使用完整功能
#### 心流系统
- 新增了上下文压缩缓存功能,可以有更持久的记忆
- 新增了心流系统的README.md文件详细介绍了系统架构、主要功能和工作流程。
- 优化了心流系统的逻辑,包括子心流自动清理和合理配置更新间隔。
- 改进了心流观察系统,优化了提示词设计和系统表现,使心流运行更加稳定高效。
- 更新了`Heartflow`类的方法和属性,支持异步生成提示词并提升生成质量。
#### 消息处理
- 改进了消息处理流程,包括回复检查、消息生成和发送逻辑。
- 新增了`ReplyGenerator`类,用于根据观察信息和对话信息生成回复。
- 优化了消息队列管理系统,支持按时间顺序处理消息。
#### 现在可以启用更好的表情包发送系统
### 💻 系统架构优化
#### 部署支持
- 更新了Docker部署文档优化了服务配置和挂载路径。
- 完善了Linux和Windows脚本支持。
### 🐛 问题修复
- 修复了消息处理器中的正则表达式匹配问题。
- 修复了图像处理中的帧大小和拼接问题。
- 修复了私聊时产生`reply`消息的bug。
- 修复了配置文件加载时的版本兼容性问题。
### 📚 文档更新
- 更新了`README.md`文件包括Python版本要求和协议信息。
- 新增了工具系统和心流系统的详细文档。
- 优化了部署相关文档的完整性。
### 🔧 其他改进
- 新增了崩溃日志记录器,记录崩溃信息到日志文件。
- 优化了统计信息输出,在控制台显示详细统计信息。
- 改进了异常处理机制,提升系统稳定性。
- 现可配置部分模型的temp参数
## [0.6.0] - 2025-4-4 ## [0.6.0] - 2025-4-4
### 摘要 ### 摘要

View File

@@ -22,7 +22,7 @@
## [0.0.11] - 2025-3-12 ## [0.0.11] - 2025-3-12
### Added ### Added
- 新增了 `schedule` 配置项,用于配置日程表生成功能 - 新增了 `schedule` 配置项,用于配置日程表生成功能
- 新增了 `response_spliter` 配置项,用于控制回复分割 - 新增了 `response_splitter` 配置项,用于控制回复分割
- 新增了 `experimental` 配置项,用于实验性功能开关 - 新增了 `experimental` 配置项,用于实验性功能开关
- 新增了 `llm_observation``llm_sub_heartflow` 模型配置 - 新增了 `llm_observation``llm_sub_heartflow` 模型配置
- 新增了 `llm_heartflow` 模型配置 - 新增了 `llm_heartflow` 模型配置

Binary file not shown.

View File

@@ -1,10 +1,10 @@
#!/bin/bash #!/bin/bash
# MaiCore & Nonebot adapter一键安装脚本 by Cookie_987 # MaiCore & NapCat Adapter一键安装脚本 by Cookie_987
# 适用于Arch/Ubuntu 24.10/Debian 12/CentOS 9 # 适用于Arch/Ubuntu 24.10/Debian 12/CentOS 9
# 请小心使用任何一键脚本! # 请小心使用任何一键脚本!
INSTALLER_VERSION="0.0.2-refactor" INSTALLER_VERSION="0.0.3-refactor"
LANG=C.UTF-8 LANG=C.UTF-8
# 如无法访问GitHub请修改此处镜像地址 # 如无法访问GitHub请修改此处镜像地址
@@ -31,7 +31,7 @@ DEFAULT_INSTALL_DIR="/opt/maicore"
# 服务名称 # 服务名称
SERVICE_NAME="maicore" SERVICE_NAME="maicore"
SERVICE_NAME_WEB="maicore-web" SERVICE_NAME_WEB="maicore-web"
SERVICE_NAME_NBADAPTER="maicore-nonebot-adapter" SERVICE_NAME_NBADAPTER="maibot-napcat-adapter"
IS_INSTALL_MONGODB=false IS_INSTALL_MONGODB=false
IS_INSTALL_NAPCAT=false IS_INSTALL_NAPCAT=false
@@ -59,9 +59,9 @@ show_menu() {
"1" "启动MaiCore" \ "1" "启动MaiCore" \
"2" "停止MaiCore" \ "2" "停止MaiCore" \
"3" "重启MaiCore" \ "3" "重启MaiCore" \
"4" "启动Nonebot adapter" \ "4" "启动NapCat Adapter" \
"5" "停止Nonebot adapter" \ "5" "停止NapCat Adapter" \
"6" "重启Nonebot adapter" \ "6" "重启NapCat Adapter" \
"7" "拉取最新MaiCore仓库" \ "7" "拉取最新MaiCore仓库" \
"8" "切换分支" \ "8" "切换分支" \
"9" "退出" 3>&1 1>&2 2>&3) "9" "退出" 3>&1 1>&2 2>&3)
@@ -83,15 +83,15 @@ show_menu() {
;; ;;
4) 4)
systemctl start ${SERVICE_NAME_NBADAPTER} systemctl start ${SERVICE_NAME_NBADAPTER}
whiptail --msgbox "✅Nonebot adapter已启动" 10 60 whiptail --msgbox "✅NapCat Adapter已启动" 10 60
;; ;;
5) 5)
systemctl stop ${SERVICE_NAME_NBADAPTER} systemctl stop ${SERVICE_NAME_NBADAPTER}
whiptail --msgbox "🛑Nonebot adapter已停止" 10 60 whiptail --msgbox "🛑NapCat Adapter已停止" 10 60
;; ;;
6) 6)
systemctl restart ${SERVICE_NAME_NBADAPTER} systemctl restart ${SERVICE_NAME_NBADAPTER}
whiptail --msgbox "🔄Nonebot adapter已重启" 10 60 whiptail --msgbox "🔄NapCat Adapter已重启" 10 60
;; ;;
7) 7)
update_dependencies update_dependencies
@@ -357,8 +357,8 @@ run_installation() {
# Python版本检查 # Python版本检查
check_python() { check_python() {
PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")') PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
if ! python3 -c "import sys; exit(0) if sys.version_info >= (3,9) else exit(1)"; then if ! python3 -c "import sys; exit(0) if sys.version_info >= (3,10) else exit(1)"; then
whiptail --title "⚠️ [4/6] Python 版本过低" --msgbox "检测到 Python 版本为 $PYTHON_VERSION,需要 3.9 或以上!\n请升级 Python 后重新运行本脚本。" 10 60 whiptail --title "⚠️ [4/6] Python 版本过低" --msgbox "检测到 Python 版本为 $PYTHON_VERSION,需要 3.10 或以上!\n请升级 Python 后重新运行本脚本。" 10 60
exit 1 exit 1
fi fi
} }
@@ -410,7 +410,7 @@ run_installation() {
# 确认安装 # 确认安装
confirm_install() { confirm_install() {
local confirm_msg="请确认以下更改:\n\n" local confirm_msg="请确认以下更改:\n\n"
confirm_msg+="📂 安装MaiCore、Nonebot Adapter到: $INSTALL_DIR\n" confirm_msg+="📂 安装MaiCore、NapCat Adapter到: $INSTALL_DIR\n"
confirm_msg+="🔀 分支: $BRANCH\n" confirm_msg+="🔀 分支: $BRANCH\n"
[[ $IS_INSTALL_DEPENDENCIES == true ]] && confirm_msg+="📦 安装依赖:${missing_packages[@]}\n" [[ $IS_INSTALL_DEPENDENCIES == true ]] && confirm_msg+="📦 安装依赖:${missing_packages[@]}\n"
[[ $IS_INSTALL_MONGODB == true || $IS_INSTALL_NAPCAT == true ]] && confirm_msg+="📦 安装额外组件:\n" [[ $IS_INSTALL_MONGODB == true || $IS_INSTALL_NAPCAT == true ]] && confirm_msg+="📦 安装额外组件:\n"
@@ -499,50 +499,28 @@ EOF
} }
echo -e "${GREEN}克隆 nonebot-plugin-maibot-adapters 仓库...${RESET}" echo -e "${GREEN}克隆 nonebot-plugin-maibot-adapters 仓库...${RESET}"
git clone $GITHUB_REPO/MaiM-with-u/nonebot-plugin-maibot-adapters.git || { git clone $GITHUB_REPO/MaiM-with-u/MaiBot-Napcat-Adapter.git || {
echo -e "${RED}克隆 nonebot-plugin-maibot-adapters 仓库失败!${RESET}" echo -e "${RED}克隆 MaiBot-Napcat-Adapter.git 仓库失败!${RESET}"
exit 1 exit 1
} }
echo -e "${GREEN}安装Python依赖...${RESET}" echo -e "${GREEN}安装Python依赖...${RESET}"
pip install -r MaiBot/requirements.txt pip install -r MaiBot/requirements.txt
pip install nb-cli cd MaiBot
pip install nonebot-adapter-onebot pip install uv
pip install 'nonebot2[fastapi]' uv pip install -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt
cd ..
echo -e "${GREEN}安装maim_message依赖...${RESET}" echo -e "${GREEN}安装maim_message依赖...${RESET}"
cd maim_message cd maim_message
pip install -e . uv pip install -i https://mirrors.aliyun.com/pypi/simple -e .
cd .. cd ..
echo -e "${GREEN}部署Nonebot adapter...${RESET}" echo -e "${GREEN}部署MaiBot Napcat Adapter...${RESET}"
cd MaiBot cd MaiBot-Napcat-Adapter
mkdir nonebot-maibot-adapter uv pip install -i https://mirrors.aliyun.com/pypi/simple -r requirements.txt
cd nonebot-maibot-adapter
cat > pyproject.toml <<EOF
[project]
name = "nonebot-maibot-adapter"
version = "0.1.0"
description = "nonebot-maibot-adapter"
readme = "README.md"
requires-python = ">=3.9, <4.0"
[tool.nonebot]
adapters = [
{ name = "OneBot V11", module_name = "nonebot.adapters.onebot.v11" }
]
plugins = []
plugin_dirs = ["src/plugins"]
builtin_plugins = []
EOF
echo "Manually created by run.sh" > README.md
mkdir src
cp -r ../../nonebot-plugin-maibot-adapters/nonebot_plugin_maibot_adapters src/plugins/nonebot_plugin_maibot_adapters
cd .. cd ..
cd ..
echo -e "${GREEN}同意协议...${RESET}" echo -e "${GREEN}同意协议...${RESET}"
@@ -590,13 +568,13 @@ EOF
cat > /etc/systemd/system/${SERVICE_NAME_NBADAPTER}.service <<EOF cat > /etc/systemd/system/${SERVICE_NAME_NBADAPTER}.service <<EOF
[Unit] [Unit]
Description=Maicore Nonebot adapter Description=MaiBot Napcat Adapter
After=network.target mongod.service After=network.target mongod.service ${SERVICE_NAME}.service
[Service] [Service]
Type=simple Type=simple
WorkingDirectory=${INSTALL_DIR}/MaiBot/nonebot-maibot-adapter WorkingDirectory=${INSTALL_DIR}/MaiBot-Napcat-Adapter
ExecStart=/bin/bash -c "source $INSTALL_DIR/venv/bin/activate && nb run --reload" ExecStart=$INSTALL_DIR/venv/bin/python3 main.py
Restart=always Restart=always
RestartSec=10s RestartSec=10s
@@ -605,7 +583,6 @@ WantedBy=multi-user.target
EOF EOF
systemctl daemon-reload systemctl daemon-reload
systemctl enable ${SERVICE_NAME}
# 保存安装信息 # 保存安装信息
echo "INSTALLER_VERSION=${INSTALLER_VERSION}" > /etc/maicore_install.conf echo "INSTALLER_VERSION=${INSTALLER_VERSION}" > /etc/maicore_install.conf

View File

@@ -102,10 +102,28 @@ MOOD_STYLE_CONFIG = {
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"), "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"),
}, },
"simple": { "simple": {
"console_format": ("<green>{time:MM-DD HH:mm}</green> | <light-green>心情</light-green> | {message}"), "console_format": ("<green>{time:MM-DD HH:mm}</green> | <magenta>心情</magenta> | {message}"),
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"), "file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 心情 | {message}"),
}, },
} }
# tool use
TOOL_USE_STYLE_CONFIG = {
"advanced": {
"console_format": (
"<green>{time:YYYY-MM-DD HH:mm:ss}</green> | "
"<level>{level: <8}</level> | "
"<cyan>{extra[module]: <12}</cyan> | "
"<magenta>工具使用</magenta> | "
"<level>{message}</level>"
),
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}"),
},
"simple": {
"console_format": ("<green>{time:MM-DD HH:mm}</green> | <magenta>工具使用</magenta> | {message}"),
"file_format": ("{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 工具使用 | {message}"),
},
}
# relationship # relationship
RELATION_STYLE_CONFIG = { RELATION_STYLE_CONFIG = {
@@ -308,6 +326,7 @@ SUB_HEARTFLOW_STYLE_CONFIG = (
) # noqa: E501 ) # noqa: E501
WILLING_STYLE_CONFIG = WILLING_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else WILLING_STYLE_CONFIG["advanced"] WILLING_STYLE_CONFIG = WILLING_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else WILLING_STYLE_CONFIG["advanced"]
CONFIG_STYLE_CONFIG = CONFIG_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CONFIG_STYLE_CONFIG["advanced"] CONFIG_STYLE_CONFIG = CONFIG_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CONFIG_STYLE_CONFIG["advanced"]
TOOL_USE_STYLE_CONFIG = TOOL_USE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else TOOL_USE_STYLE_CONFIG["advanced"]
def is_registered_module(record: dict) -> bool: def is_registered_module(record: dict) -> bool:

View File

@@ -0,0 +1,57 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.config.config import global_config
from src.common.logger import get_module_logger
from src.plugins.moods.moods import MoodManager
from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
from typing import Dict, Any
logger = get_module_logger("change_mood_tool")
class ChangeMoodTool(BaseTool):
"""改变心情的工具"""
name = "change_mood"
description = "根据收到的内容和自身回复的内容,改变心情,当你回复了别人的消息,你可以使用这个工具"
parameters = {
"type": "object",
"properties": {
"text": {"type": "string", "description": "引起你改变心情的文本"},
"response_set": {"type": "list", "description": "你对文本的回复"},
},
"required": ["text", "response_set"],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
"""执行心情改变
Args:
function_args: 工具参数
message_processed_plain_text: 原始消息文本
response_set: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
response_set = function_args.get("response_set")
message_processed_plain_text = function_args.get("text")
mood_manager = MoodManager.get_instance()
gpt = ResponseGenerator()
if response_set is None:
response_set = ["你还没有回复"]
ori_response = ",".join(response_set)
_stance, emotion = await gpt._get_emotion_tags(ori_response, message_processed_plain_text)
mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
return {"name": "change_mood", "content": f"你的心情刚刚变化了,现在的心情是: {emotion}"}
except Exception as e:
logger.error(f"心情改变工具执行失败: {str(e)}")
return {"name": "change_mood", "content": f"心情改变失败: {str(e)}"}
# 注册工具
# register_tool(ChangeMoodTool)

View File

@@ -0,0 +1,44 @@
# from src.plugins.person_info.relationship_manager import relationship_manager
from src.common.logger import get_module_logger
from src.do_tool.tool_can_use.base_tool import BaseTool
# from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
logger = get_module_logger("relationship_tool")
class RelationshipTool(BaseTool):
name = "change_relationship"
description = "根据收到的文本和回复内容,修改与特定用户的关系值,当你回复了别人的消息,你可以使用这个工具"
parameters = {
"type": "object",
"properties": {
"text": {"type": "string", "description": "收到的文本"},
"changed_value": {"type": "number", "description": "变更值"},
"reason": {"type": "string", "description": "变更原因"},
},
"required": ["text", "changed_value", "reason"],
}
async def execute(self, args: dict, message_txt: str) -> dict:
"""执行工具功能
Args:
args: 包含工具参数的字典
text: 原始消息文本
changed_value: 变更值
reason: 变更原因
Returns:
dict: 包含执行结果的字典
"""
try:
text = args.get("text")
changed_value = args.get("changed_value")
reason = args.get("reason")
return {"content": f"因为你刚刚因为{reason},所以你和发[{text}]这条消息的人的关系值变化为{changed_value}"}
except Exception as e:
logger.error(f"修改关系值时发生错误: {str(e)}")
return {"content": f"修改关系值失败: {str(e)}"}

View File

@@ -0,0 +1,50 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
from src.common.logger import get_module_logger
from typing import Dict, Any
logger = get_module_logger("compare_numbers_tool")
class CompareNumbersTool(BaseTool):
"""比较两个数大小的工具"""
name = "compare_numbers"
description = "比较两个数的大小,返回较大的数"
parameters = {
"type": "object",
"properties": {
"num1": {"type": "number", "description": "第一个数字"},
"num2": {"type": "number", "description": "第二个数字"},
},
"required": ["num1", "num2"],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行比较两个数的大小
Args:
function_args: 工具参数
message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
num1 = function_args.get("num1")
num2 = function_args.get("num2")
if num1 > num2:
result = f"{num1} 大于 {num2}"
elif num1 < num2:
result = f"{num1} 小于 {num2}"
else:
result = f"{num1} 等于 {num2}"
return {"name": self.name, "content": result}
except Exception as e:
logger.error(f"比较数字失败: {str(e)}")
return {"name": self.name, "content": f"比较数字失败: {str(e)}"}
# 注册工具
# register_tool(CompareNumbersTool)

View File

@@ -1,47 +0,0 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
from src.common.logger import get_module_logger
from typing import Dict, Any
logger = get_module_logger("fibonacci_sequence_tool")
class FibonacciSequenceTool(BaseTool):
"""生成斐波那契数列的工具"""
name = "fibonacci_sequence"
description = "生成指定长度的斐波那契数列"
parameters = {
"type": "object",
"properties": {"n": {"type": "integer", "description": "斐波那契数列的长度", "minimum": 1}},
"required": ["n"],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行工具功能
Args:
function_args: 工具参数
message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
n = function_args.get("n")
if n <= 0:
raise ValueError("参数n必须大于0")
sequence = []
a, b = 0, 1
for _ in range(n):
sequence.append(a)
a, b = b, a + b
return {"name": self.name, "content": sequence}
except Exception as e:
logger.error(f"fibonacci_sequence工具执行失败: {str(e)}")
return {"name": self.name, "content": f"执行失败: {str(e)}"}
# 注册工具
register_tool(FibonacciSequenceTool)

View File

@@ -1,41 +0,0 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
from src.common.logger import get_module_logger
from typing import Dict, Any
logger = get_module_logger("generate_buddha_emoji_tool")
class GenerateBuddhaEmojiTool(BaseTool):
"""生成佛祖颜文字的工具类"""
name = "generate_buddha_emoji"
description = "生成一个佛祖的颜文字表情"
parameters = {
"type": "object",
"properties": {
# 无参数
},
"required": [],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行工具功能,生成佛祖颜文字
Args:
function_args: 工具参数
message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
buddha_emoji = "这是一个佛祖emoji༼ つ ◕_◕ ༽つ"
return {"name": self.name, "content": buddha_emoji}
except Exception as e:
logger.error(f"generate_buddha_emoji工具执行失败: {str(e)}")
return {"name": self.name, "content": f"执行失败: {str(e)}"}
# 注册工具
register_tool(GenerateBuddhaEmojiTool)

View File

@@ -1,66 +0,0 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
from src.common.logger import get_module_logger
from typing import Dict, Any
logger = get_module_logger("generate_cmd_tutorial_tool")
class GenerateCmdTutorialTool(BaseTool):
"""生成Windows CMD基本操作教程的工具"""
name = "generate_cmd_tutorial"
description = "生成关于Windows命令提示符(CMD)的基本操作教程,包括常用命令和使用方法"
parameters = {"type": "object", "properties": {}, "required": []}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行工具功能
Args:
function_args: 工具参数
message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
tutorial_content = """
# Windows CMD 基本操作教程
## 1. 基本导航命令
- `dir`: 列出当前目录下的文件和文件夹
- `cd <目录名>`: 进入指定目录
- `cd..`: 返回上一级目录
- `cd\\`: 返回根目录
## 2. 文件操作命令
- `copy <源文件> <目标位置>`: 复制文件
- `move <源文件> <目标位置>`: 移动文件
- `del <文件名>`: 删除文件
- `ren <旧文件名> <新文件名>`: 重命名文件
## 3. 系统信息命令
- `systeminfo`: 显示系统配置信息
- `hostname`: 显示计算机名称
- `ver`: 显示Windows版本
## 4. 网络相关命令
- `ipconfig`: 显示网络配置信息
- `ping <主机名或IP>`: 测试网络连接
- `tracert <主机名或IP>`: 跟踪网络路径
## 5. 实用技巧
- 按Tab键可以自动补全文件名或目录名
- 使用`> <文件名>`可以将命令输出重定向到文件
- 使用`| more`可以分页显示长输出
注意:使用命令时要小心,特别是删除操作。
"""
return {"name": self.name, "content": tutorial_content}
except Exception as e:
logger.error(f"generate_cmd_tutorial工具执行失败: {str(e)}")
return {"name": self.name, "content": f"执行失败: {str(e)}"}
# 注册工具
register_tool(GenerateCmdTutorialTool)

View File

@@ -1,7 +1,8 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.schedule.schedule_generator import bot_schedule from src.plugins.schedule.schedule_generator import bot_schedule
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from typing import Dict, Any from typing import Dict, Any
from datetime import datetime
logger = get_module_logger("get_current_task_tool") logger = get_module_logger("get_current_task_tool")
@@ -9,19 +10,19 @@ logger = get_module_logger("get_current_task_tool")
class GetCurrentTaskTool(BaseTool): class GetCurrentTaskTool(BaseTool):
"""获取当前正在做的事情/最近的任务工具""" """获取当前正在做的事情/最近的任务工具"""
name = "get_current_task" name = "get_schedule"
description = "获取当前正在做的事情/最近的任务" description = "获取当前正在做的事情,或者某个时间点/时间段的日程信息"
parameters = { parameters = {
"type": "object", "type": "object",
"properties": { "properties": {
"num": {"type": "integer", "description": "要获取的任务数量"}, "start_time": {"type": "string", "description": "开始时间,格式为'HH:MM'填写current则获取当前任务"},
"time_info": {"type": "boolean", "description": "是否包含时间信息"}, "end_time": {"type": "string", "description": "结束时间,格式为'HH:MM'填写current则获取当前任务"},
}, },
"required": [], "required": ["start_time", "end_time"],
} }
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]: async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行获取当前任务 """执行获取当前任务或指定时间段的日程信息
Args: Args:
function_args: 工具参数 function_args: 工具参数
@@ -30,25 +31,29 @@ class GetCurrentTaskTool(BaseTool):
Returns: Returns:
Dict: 工具执行结果 Dict: 工具执行结果
""" """
try: start_time = function_args.get("start_time")
# 获取参数,如果没有提供则使用默认值 end_time = function_args.get("end_time")
num = function_args.get("num", 1)
time_info = function_args.get("time_info", False)
# 调用日程系统获取当前任务 # 如果 start_time 或 end_time 为 "current",则获取当前任务
current_task = bot_schedule.get_current_num_task(num=num, time_info=time_info) if start_time == "current" or end_time == "current":
current_task = bot_schedule.get_current_num_task(num=1, time_info=True)
# 格式化返回结果 current_time = datetime.now().strftime("%H:%M:%S")
current_date = datetime.now().strftime("%Y-%m-%d")
if current_task: if current_task:
task_info = current_task task_info = f"{current_date} {current_time},你在{current_task}"
else: else:
task_info = "当前没有正在进行的任务" task_info = f"{current_time} {current_date},没在做任何事情"
# 如果提供了时间范围,则获取该时间段的日程信息
elif start_time and end_time:
tasks = await bot_schedule.get_task_from_time_to_time(start_time, end_time)
if tasks:
task_list = []
for task in tasks:
task_time = task[0].strftime("%H:%M")
task_content = task[1]
task_list.append(f"{task_time}时,{task_content}")
task_info = "\n".join(task_list)
else:
task_info = f"{start_time}{end_time} 之间没有找到日程信息"
return {"name": "get_current_task", "content": f"当前任务信息: {task_info}"} return {"name": "get_current_task", "content": f"日程信息: {task_info}"}
except Exception as e:
logger.error(f"获取当前任务工具执行失败: {str(e)}")
return {"name": "get_current_task", "content": f"获取当前任务失败: {str(e)}"}
# 注册工具
register_tool(GetCurrentTaskTool)

View File

@@ -1,4 +1,4 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.chat.utils import get_embedding from src.plugins.chat.utils import get_embedding
from src.common.database import db from src.common.database import db
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
@@ -132,4 +132,4 @@ class SearchKnowledgeTool(BaseTool):
# 注册工具 # 注册工具
register_tool(SearchKnowledgeTool) # register_tool(SearchKnowledgeTool)

View File

@@ -1,15 +1,15 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.memory_system.Hippocampus import HippocampusManager from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from typing import Dict, Any from typing import Dict, Any
logger = get_module_logger("get_memory_tool") logger = get_module_logger("mid_chat_mem_tool")
class GetMemoryTool(BaseTool): class GetMemoryTool(BaseTool):
"""从记忆系统中获取相关记忆的工具""" """从记忆系统中获取相关记忆的工具"""
name = "get_memory" name = "mid_chat_mem"
description = "从记忆系统中获取相关记忆" description = "从记忆系统中获取相关记忆"
parameters = { parameters = {
"type": "object", "type": "object",
@@ -49,11 +49,11 @@ class GetMemoryTool(BaseTool):
else: else:
content = f"你不太记得有关{text}的记忆,你对此不太了解" content = f"你不太记得有关{text}的记忆,你对此不太了解"
return {"name": "get_memory", "content": content} return {"name": "mid_chat_mem", "content": content}
except Exception as e: except Exception as e:
logger.error(f"记忆获取工具执行失败: {str(e)}") logger.error(f"记忆获取工具执行失败: {str(e)}")
return {"name": "get_memory", "content": f"记忆获取失败: {str(e)}"} return {"name": "mid_chat_mem", "content": f"记忆获取失败: {str(e)}"}
# 注册工具 # 注册工具
register_tool(GetMemoryTool) # register_tool(GetMemoryTool)

View File

@@ -0,0 +1,38 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
from src.common.logger import get_module_logger
from typing import Dict, Any
from datetime import datetime
logger = get_module_logger("get_time_date")
class GetCurrentDateTimeTool(BaseTool):
"""获取当前时间、日期、年份和星期的工具"""
name = "get_current_date_time"
description = "当有人询问或者涉及到具体时间或者日期的时候,必须使用这个工具"
parameters = {
"type": "object",
"properties": {},
"required": [],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行获取当前时间、日期、年份和星期
Args:
function_args: 工具参数(此工具不使用)
message_txt: 原始消息文本(此工具不使用)
Returns:
Dict: 工具执行结果
"""
current_time = datetime.now().strftime("%H:%M:%S")
current_date = datetime.now().strftime("%Y-%m-%d")
current_year = datetime.now().strftime("%Y")
current_weekday = datetime.now().strftime("%A")
return {
"name": "get_current_date_time",
"content": f"当前时间: {current_time}, 日期: {current_date}, 年份: {current_year}, 星期: {current_weekday}",
}

View File

@@ -0,0 +1,40 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
from src.common.logger import get_module_logger
from typing import Dict, Any
logger = get_module_logger("get_mid_memory_tool")
class GetMidMemoryTool(BaseTool):
"""从记忆系统中获取相关记忆的工具"""
name = "mid_chat_mem"
description = "之前的聊天内容中获取具体信息,当最新消息提到,或者你需要回复的消息中提到,你可以使用这个工具"
parameters = {
"type": "object",
"properties": {
"id": {"type": "integer", "description": "要查询的聊天记录id"},
},
"required": ["id"],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str = "") -> Dict[str, Any]:
"""执行记忆获取
Args:
function_args: 工具参数
message_txt: 原始消息文本
Returns:
Dict: 工具执行结果
"""
try:
id = function_args.get("id")
return {"name": "mid_chat_mem", "content": str(id)}
except Exception as e:
logger.error(f"聊天记录获取工具执行失败: {str(e)}")
return {"name": "mid_chat_mem", "content": f"聊天记录获取失败: {str(e)}"}
# 注册工具
# register_tool(GetMemoryTool)

View File

@@ -0,0 +1,25 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
from src.common.logger import get_module_logger
from typing import Dict, Any
logger = get_module_logger("send_emoji_tool")
class SendEmojiTool(BaseTool):
"""发送表情包的工具"""
name = "send_emoji"
description = "当你觉得需要表达情感,或者帮助表达,可以使用这个工具发送表情包"
parameters = {
"type": "object",
"properties": {"text": {"type": "string", "description": "要发送的表情包描述"}},
"required": ["text"],
}
async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
text = function_args.get("text", message_txt)
return {
"name": "send_emoji",
"content": text,
}

View File

@@ -4,19 +4,27 @@ from src.plugins.chat.chat_stream import ChatStream
from src.common.database import db from src.common.database import db
import time import time
import json import json
from src.common.logger import get_module_logger from src.common.logger import get_module_logger, TOOL_USE_STYLE_CONFIG, LogConfig
from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance
from src.heart_flow.sub_heartflow import SubHeartflow
logger = get_module_logger("tool_use") tool_use_config = LogConfig(
# 使用消息发送专用样式
console_format=TOOL_USE_STYLE_CONFIG["console_format"],
file_format=TOOL_USE_STYLE_CONFIG["file_format"],
)
logger = get_module_logger("tool_use", config=tool_use_config)
class ToolUser: class ToolUser:
def __init__(self): def __init__(self):
self.llm_model_tool = LLM_request( self.llm_model_tool = LLM_request(
model=global_config.llm_heartflow, temperature=0.2, max_tokens=1000, request_type="tool_use" model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
) )
async def _build_tool_prompt(self, message_txt: str, sender_name: str, chat_stream: ChatStream): async def _build_tool_prompt(
self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
):
"""构建工具使用的提示词 """构建工具使用的提示词
Args: Args:
@@ -27,6 +35,12 @@ class ToolUser:
Returns: Returns:
str: 构建好的提示词 str: 构建好的提示词
""" """
if subheartflow:
mid_memory_info = subheartflow.observations[0].mid_memory_info
# print(f"intol111111111111111111111111111111111222222222222mid_memory_info{mid_memory_info}")
else:
mid_memory_info = ""
new_messages = list( new_messages = list(
db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15) db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15)
) )
@@ -38,11 +52,12 @@ class ToolUser:
# 这些信息应该从调用者传入而不是从self获取 # 这些信息应该从调用者传入而不是从self获取
bot_name = global_config.BOT_NICKNAME bot_name = global_config.BOT_NICKNAME
prompt = "" prompt = ""
prompt += mid_memory_info
prompt += "你正在思考如何回复群里的消息。\n" prompt += "你正在思考如何回复群里的消息。\n"
prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n" prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
prompt += f"注意你就是{bot_name}{bot_name}指的就是你。" prompt += f"注意你就是{bot_name}{bot_name}指的就是你。"
prompt += "你现在需要对群里的聊天内容进行回复,现在请你思考,你是否需要额外的信息,或者一些工具来帮你回复,比如回忆或者搜寻已有的知识,或者了解你现在正在做什么,请输出你需要的工具,或者你需要的额外信息。"
prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
return prompt return prompt
def _define_tools(self): def _define_tools(self):
@@ -76,10 +91,14 @@ class ToolUser:
# 执行工具 # 执行工具
result = await tool_instance.execute(function_args, message_txt) result = await tool_instance.execute(function_args, message_txt)
if result: if result:
# 直接使用 function_name 作为 tool_type
tool_type = function_name
return { return {
"tool_call_id": tool_call["id"], "tool_call_id": tool_call["id"],
"role": "tool", "role": "tool",
"name": function_name, "name": function_name,
"type": tool_type,
"content": result["content"], "content": result["content"],
} }
return None return None
@@ -87,7 +106,9 @@ class ToolUser:
logger.error(f"执行工具调用时发生错误: {str(e)}") logger.error(f"执行工具调用时发生错误: {str(e)}")
return None return None
async def use_tool(self, message_txt: str, sender_name: str, chat_stream: ChatStream): async def use_tool(
self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
):
"""使用工具辅助思考,判断是否需要额外信息 """使用工具辅助思考,判断是否需要额外信息
Args: Args:
@@ -96,14 +117,15 @@ class ToolUser:
chat_stream: 聊天流对象 chat_stream: 聊天流对象
Returns: Returns:
dict: 工具使用结果 dict: 工具使用结果,包含结构化的信息
""" """
try: try:
# 构建提示词 # 构建提示词
prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream) prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, subheartflow)
# 定义可用工具 # 定义可用工具
tools = self._define_tools() tools = self._define_tools()
logger.trace(f"工具定义: {tools}")
# 使用llm_model_tool发送带工具定义的请求 # 使用llm_model_tool发送带工具定义的请求
payload = { payload = {
@@ -114,7 +136,7 @@ class ToolUser:
"temperature": 0.2, "temperature": 0.2,
} }
logger.debug(f"发送工具调用请求,模型: {self.llm_model_tool.model_name}") logger.trace(f"发送工具调用请求,模型: {self.llm_model_tool.model_name}")
# 发送请求获取模型是否需要调用工具 # 发送请求获取模型是否需要调用工具
response = await self.llm_model_tool._execute_request( response = await self.llm_model_tool._execute_request(
endpoint="/chat/completions", payload=payload, prompt=prompt endpoint="/chat/completions", payload=payload, prompt=prompt
@@ -123,36 +145,40 @@ class ToolUser:
# 根据返回值数量判断是否有工具调用 # 根据返回值数量判断是否有工具调用
if len(response) == 3: if len(response) == 3:
content, reasoning_content, tool_calls = response content, reasoning_content, tool_calls = response
logger.info(f"工具思考: {tool_calls}") # logger.info(f"工具思考: {tool_calls}")
# logger.debug(f"工具思考: {content}")
# 检查响应中工具调用是否有效 # 检查响应中工具调用是否有效
if not tool_calls: if not tool_calls:
logger.info("模型返回了空的tool_calls列表") logger.debug("模型返回了空的tool_calls列表")
return {"used_tools": False} return {"used_tools": False}
logger.info(f"模型请求调用{len(tool_calls)}个工具") tool_calls_str = ""
for tool_call in tool_calls:
tool_calls_str += f"{tool_call['function']['name']}\n"
logger.info(f"根据:\n{prompt}\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}")
tool_results = [] tool_results = []
collected_info = "" structured_info = {} # 动态生成键
# 执行所有工具调用 # 执行所有工具调用
for tool_call in tool_calls: for tool_call in tool_calls:
result = await self._execute_tool_call(tool_call, message_txt) result = await self._execute_tool_call(tool_call, message_txt)
if result: if result:
tool_results.append(result) tool_results.append(result)
# 将工具结果添加到收集的信息中 # 使用工具名称作为键
collected_info += f"\n{result['name']}返回结果: {result['content']}\n" tool_name = result["name"]
if tool_name not in structured_info:
structured_info[tool_name] = []
structured_info[tool_name].append({"name": result["name"], "content": result["content"]})
# 如果有工具结果,直接返回收集的信息 # 如果有工具结果,返回结构化的信息
if collected_info: if structured_info:
logger.info(f"工具调用收集到信息: {collected_info}") logger.info(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}")
return { return {"used_tools": True, "structured_info": structured_info}
"used_tools": True,
"collected_info": collected_info,
}
else: else:
# 没有工具调用 # 没有工具调用
content, reasoning_content = response content, reasoning_content = response
logger.info("模型没有请求调用任何工具") logger.debug("模型没有请求调用任何工具")
# 如果没有工具调用或处理失败,直接返回原始思考 # 如果没有工具调用或处理失败,直接返回原始思考
return { return {

82
src/heart_flow/README.md Normal file
View File

@@ -0,0 +1,82 @@
# 心流系统 (Heart Flow System)
心流系统是一个模拟AI机器人内心思考和情感流动的核心系统。它通过多层次的心流结构使AI能够对外界信息进行观察、思考和情感反应从而产生更自然的对话和行为。
## 系统架构
### 1. 主心流 (Heartflow)
- 位于 `heartflow.py`
- 作为整个系统的主控制器
- 负责管理和协调多个子心流
- 维护AI的整体思维状态
- 定期进行全局思考更新
### 2. 子心流 (SubHeartflow)
- 位于 `sub_heartflow.py`
- 处理具体的对话场景(如群聊)
- 维护特定场景下的思维状态
- 通过观察者模式接收和处理信息
- 能够进行独立的思考和回复判断
### 3. 观察系统 (Observation)
- 位于 `observation.py`
- 负责收集和处理外部信息
- 支持多种观察类型(如聊天观察)
- 对信息进行实时总结和更新
## 主要功能
### 思维系统
- 定期进行思维更新
- 维护短期记忆和思维连续性
- 支持多层次的思维处理
### 情感系统
- 情绪状态管理
- 回复意愿判断
- 情感因素影响决策
### 交互系统
- 群聊消息处理
- 多场景并行处理
- 智能回复生成
## 工作流程
1. 主心流启动并创建必要的子心流
2. 子心流通过观察者接收外部信息
3. 系统进行信息处理和思维更新
4. 根据情感状态和思维结果决定是否回复
5. 生成合适的回复并更新思维状态
## 使用说明
### 创建新的子心流
```python
heartflow = Heartflow()
subheartflow = heartflow.create_subheartflow(chat_id)
```
### 添加观察者
```python
observation = ChattingObservation(chat_id)
subheartflow.add_observation(observation)
```
### 启动心流系统
```python
await heartflow.heartflow_start_working()
```
## 配置说明
系统的主要配置参数:
- `sub_heart_flow_stop_time`: 子心流停止时间
- `sub_heart_flow_freeze_time`: 子心流冻结时间
- `heart_flow_update_interval`: 心流更新间隔
## 注意事项
1. 子心流会在长时间不活跃后自动清理
2. 需要合理配置更新间隔以平衡性能和响应速度
3. 观察系统会限制消息处理数量以避免过载

View File

@@ -4,6 +4,10 @@ from datetime import datetime
from src.plugins.models.utils_model import LLM_request from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config from src.plugins.config.config import global_config
from src.common.database import db from src.common.database import db
from src.common.logger import get_module_logger
import traceback
logger = get_module_logger("observation")
# 所有观察的基类 # 所有观察的基类
@@ -27,30 +31,59 @@ class ChattingObservation(Observation):
self.name = global_config.BOT_NICKNAME self.name = global_config.BOT_NICKNAME
self.nick_name = global_config.BOT_ALIAS_NAMES self.nick_name = global_config.BOT_ALIAS_NAMES
self.observe_times = 0 self.max_now_obs_len = global_config.observation_context_size
self.overlap_len = global_config.compressed_length
self.mid_memorys = []
self.max_mid_memory_len = global_config.compress_length_limit
self.mid_memory_info = ""
self.now_message_info = ""
self.summary_count = 0 # 30秒内的更新次数 self.updating_old = False
self.max_update_in_30s = 2 # 30秒内最多更新2次
self.last_summary_time = 0 # 上次更新summary的时间
self.sub_observe = None
self.llm_summary = LLM_request( self.llm_summary = LLM_request(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation" model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
) )
# 进行一次观察 返回观察结果observe_info # 进行一次观察 返回观察结果observe_info
def get_observe_info(self, ids=None):
if ids:
mid_memory_str = ""
for id in ids:
print(f"id{id}")
try:
for mid_memory in self.mid_memorys:
if mid_memory["id"] == id:
mid_memory_by_id = mid_memory
msg_str = ""
for msg in mid_memory_by_id["messages"]:
msg_str += f"{msg['detailed_plain_text']}"
time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
# mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
mid_memory_str += f"{msg_str}\n"
except Exception as e:
logger.error(f"获取mid_memory_id失败: {e}")
traceback.print_exc()
# print(f"获取mid_memory_id失败: {e}")
return self.now_message_info
return mid_memory_str + "现在群里正在聊:\n" + self.now_message_info
else:
return self.now_message_info
async def observe(self): async def observe(self):
# 查找新消息限制最多30条 # 查找新消息
new_messages = list( new_messages = list(
db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}) db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}}).sort("time", 1)
.sort("time", 1) ) # 按时间正序排列
.limit(15)
) # 按时间正序排列最多15条
if not new_messages: if not new_messages:
return self.observe_info # 没有新消息,返回上次观察结果 return self.observe_info # 没有新消息,返回上次观察结果
self.last_observe_time = new_messages[-1]["time"]
self.talking_message.extend(new_messages)
# 将新消息转换为字符串格式 # 将新消息转换为字符串格式
new_messages_str = "" new_messages_str = ""
for msg in new_messages: for msg in new_messages:
@@ -60,83 +93,56 @@ class ChattingObservation(Observation):
# print(f"new_messages_str{new_messages_str}") # print(f"new_messages_str{new_messages_str}")
# 将新消息添加到talking_message同时保持列表长度不超过20条 # 将新消息添加到talking_message同时保持列表长度不超过20条
self.talking_message.extend(new_messages)
if len(self.talking_message) > 15:
self.talking_message = self.talking_message[-15:] # 只保留最新的15条
self.translate_message_list_to_str()
# 更新观察次数 if len(self.talking_message) > self.max_now_obs_len and not self.updating_old:
# self.observe_times += 1 self.updating_old = True
self.last_observe_time = new_messages[-1]["time"] # 计算需要保留的消息数量
keep_messages_count = self.max_now_obs_len - self.overlap_len
# 提取所有超出保留数量的最老消息
oldest_messages = self.talking_message[:-keep_messages_count]
self.talking_message = self.talking_message[-keep_messages_count:]
oldest_messages_str = "\n".join([msg["detailed_plain_text"] for msg in oldest_messages])
oldest_timestamps = [msg["time"] for msg in oldest_messages]
# 检查是否需要更新summary # 调用 LLM 总结主题
# current_time = int(datetime.now().timestamp()) prompt = f"请总结以下聊天记录的主题:\n{oldest_messages_str}\n主题,用一句话概括包括人物事件和主要信息,不要分点:"
# if current_time - self.last_summary_time >= 30: # 如果超过30秒重置计数 try:
# self.summary_count = 0 summary, _ = await self.llm_summary.generate_response_async(prompt)
# self.last_summary_time = current_time except Exception as e:
print(f"总结主题失败: {e}")
summary = "无法总结主题"
# if self.summary_count < self.max_update_in_30s: # 如果30秒内更新次数小于2次 mid_memory = {
# await self.update_talking_summary(new_messages_str) "id": str(int(datetime.now().timestamp())),
# print(f"更新聊天总结:{self.observe_info}11111111111111") "theme": summary,
# self.summary_count += 1 "messages": oldest_messages,
updated_observe_info = await self.update_talking_summary(new_messages_str) "timestamps": oldest_timestamps,
print(f"更新聊天总结:{updated_observe_info}11111111111111") "chat_id": self.chat_id,
self.observe_info = updated_observe_info "created_at": datetime.now().timestamp(),
}
# print(f"mid_memory{mid_memory}")
# 存入内存中的 mid_memorys
self.mid_memorys.append(mid_memory)
if len(self.mid_memorys) > self.max_mid_memory_len:
self.mid_memorys.pop(0)
return updated_observe_info mid_memory_str = "之前聊天的内容概括是:\n"
for mid_memory in self.mid_memorys:
time_diff = int((datetime.now().timestamp() - mid_memory["created_at"]) / 60)
mid_memory_str += f"距离现在{time_diff}分钟前(聊天记录id:{mid_memory['id']}){mid_memory['theme']}\n"
self.mid_memory_info = mid_memory_str
async def carefully_observe(self): self.updating_old = False
# 查找新消息限制最多40条
new_messages = list(
db.messages.find({"chat_id": self.chat_id, "time": {"$gt": self.last_observe_time}})
.sort("time", 1)
.limit(30)
) # 按时间正序排列最多30条
if not new_messages: # print(f"处理后self.talking_message{self.talking_message}")
return self.observe_info # 没有新消息,返回上次观察结果
# 将新消息转换为字符串格式 now_message_str = ""
new_messages_str = "" now_message_str += self.translate_message_list_to_str(talking_message=self.talking_message)
for msg in new_messages: self.now_message_info = now_message_str
if "detailed_plain_text" in msg:
new_messages_str += f"{msg['detailed_plain_text']}\n"
# 将新消息添加到talking_message同时保持列表长度不超过30条 logger.debug(f"压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.now_message_info}")
self.talking_message.extend(new_messages)
if len(self.talking_message) > 30:
self.talking_message = self.talking_message[-30:] # 只保留最新的30条
self.translate_message_list_to_str()
# 更新观察次数
self.observe_times += 1
self.last_observe_time = new_messages[-1]["time"]
updated_observe_info = await self.update_talking_summary(new_messages_str)
self.observe_info = updated_observe_info
return updated_observe_info
async def update_talking_summary(self, new_messages_str): async def update_talking_summary(self, new_messages_str):
# 基于已经有的talking_summary和新的talking_message生成一个summary
# print(f"更新聊天总结:{self.talking_summary}")
# 开始构建prompt
# prompt_personality = "你"
# # person
# individuality = Individuality.get_instance()
# personality_core = individuality.personality.personality_core
# prompt_personality += personality_core
# personality_sides = individuality.personality.personality_sides
# random.shuffle(personality_sides)
# prompt_personality += f",{personality_sides[0]}"
# identity_detail = individuality.identity.identity_detail
# random.shuffle(identity_detail)
# prompt_personality += f",{identity_detail[0]}"
# personality_info = prompt_personality
prompt = "" prompt = ""
# prompt += f"{personality_info}" # prompt += f"{personality_info}"
prompt += f"你的名字叫:{self.name}\n,标识'{self.name}'的都是你自己说的话" prompt += f"你的名字叫:{self.name}\n,标识'{self.name}'的都是你自己说的话"
@@ -155,7 +161,9 @@ class ChattingObservation(Observation):
# print(f"prompt{prompt}") # print(f"prompt{prompt}")
# print(f"self.observe_info{self.observe_info}") # print(f"self.observe_info{self.observe_info}")
def translate_message_list_to_str(self): def translate_message_list_to_str(self, talking_message):
self.talking_message_str = "" talking_message_str = ""
for message in self.talking_message: for message in talking_message:
self.talking_message_str += message["detailed_plain_text"] talking_message_str += message["detailed_plain_text"]
return talking_message_str

View File

@@ -1,10 +1,11 @@
from .observation import Observation from .observation import Observation, ChattingObservation
import asyncio import asyncio
from src.plugins.moods.moods import MoodManager from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLM_request from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config from src.plugins.config.config import global_config
import re
import time import time
from src.plugins.chat.message import UserInfo
from src.plugins.chat.utils import parse_text_timestamps
# from src.plugins.schedule.schedule_generator import bot_schedule # from src.plugins.schedule.schedule_generator import bot_schedule
# from src.plugins.memory_system.Hippocampus import HippocampusManager # from src.plugins.memory_system.Hippocampus import HippocampusManager
@@ -18,7 +19,6 @@ import random
from src.plugins.chat.chat_stream import ChatStream from src.plugins.chat.chat_stream import ChatStream
from src.plugins.person_info.relationship_manager import relationship_manager from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.chat.utils import get_recent_group_speaker from src.plugins.chat.utils import get_recent_group_speaker
from src.do_tool.tool_use import ToolUser
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
subheartflow_config = LogConfig( subheartflow_config = LogConfig(
@@ -32,23 +32,25 @@ logger = get_module_logger("subheartflow", config=subheartflow_config)
def init_prompt(): def init_prompt():
prompt = "" prompt = ""
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n" # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
prompt += "{collected_info}\n" prompt += "{extra_info}\n"
# prompt += "{prompt_schedule}\n"
prompt += "{relation_prompt_all}\n" prompt += "{relation_prompt_all}\n"
prompt += "{prompt_personality}\n" prompt += "{prompt_personality}\n"
prompt += "刚刚你的想法是{current_thinking_info}如果有新的内容,记得转换话题\n" prompt += "刚刚你的想法是{current_thinking_info}可以适当转换话题\n"
prompt += "-----------------------------------\n" prompt += "-----------------------------------\n"
prompt += "现在你正在上网和qq群里的网友们聊天群里正在聊的话题是{chat_observe_info}\n" prompt += "现在{time_now}你正在上网和qq群里的网友们聊天群里正在聊的话题是\n{chat_observe_info}\n"
prompt += "你现在{mood_info}\n" prompt += "你现在{mood_info}\n"
prompt += "你注意到{sender_name}刚刚说:{message_txt}\n" prompt += "你注意到{sender_name}刚刚说:{message_txt}\n"
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白" prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白"
prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话\n" prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n"
prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写" prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{bot_name}{bot_name}指的就是你。" prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{bot_name}{bot_name}指的就是你。"
Prompt(prompt, "sub_heartflow_prompt_before") Prompt(prompt, "sub_heartflow_prompt_before")
prompt = "" prompt = ""
# prompt += f"你现在正在做的事情是:{schedule_info}\n" # prompt += f"你现在正在做的事情是:{schedule_info}\n"
prompt += "{extra_info}\n"
prompt += "{prompt_personality}\n" prompt += "{prompt_personality}\n"
prompt += "现在你正在上网和qq群里的网友们聊天群里正在聊的话题是{chat_observe_info}\n" prompt += "现在{time_now}你正在上网和qq群里的网友们聊天群里正在聊的话题是\n{chat_observe_info}\n"
prompt += "刚刚你的想法是{current_thinking_info}" prompt += "刚刚你的想法是{current_thinking_info}"
prompt += "你现在看到了网友们发的新消息:{message_new_info}\n" prompt += "你现在看到了网友们发的新消息:{message_new_info}\n"
prompt += "你刚刚回复了群友们:{reply_info}" prompt += "你刚刚回复了群友们:{reply_info}"
@@ -78,7 +80,10 @@ class SubHeartflow:
self.past_mind = [] self.past_mind = []
self.current_state: CurrentState = CurrentState() self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request( self.llm_model = LLM_request(
model=global_config.llm_sub_heartflow, temperature=0.2, max_tokens=600, request_type="sub_heart_flow" model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
max_tokens=600,
request_type="sub_heart_flow",
) )
self.main_heartflow_info = "" self.main_heartflow_info = ""
@@ -91,14 +96,12 @@ class SubHeartflow:
self.is_active = False self.is_active = False
self.observations: list[Observation] = [] self.observations: list[ChattingObservation] = []
self.running_knowledges = [] self.running_knowledges = []
self.bot_name = global_config.BOT_NICKNAME self.bot_name = global_config.BOT_NICKNAME
self.tool_user = ToolUser()
def add_observation(self, observation: Observation): def add_observation(self, observation: Observation):
"""添加一个新的observation对象到列表中如果已存在相同id的observation则不添加""" """添加一个新的observation对象到列表中如果已存在相同id的observation则不添加"""
# 查找是否存在相同id的observation # 查找是否存在相同id的observation
@@ -151,25 +154,24 @@ class SubHeartflow:
observation = self.observations[0] observation = self.observations[0]
await observation.observe() await observation.observe()
async def do_thinking_before_reply(self, message_txt: str, sender_name: str, chat_stream: ChatStream): async def do_thinking_before_reply(
self, message_txt: str, sender_info: UserInfo, chat_stream: ChatStream, extra_info: str, obs_id: int = None
):
current_thinking_info = self.current_mind current_thinking_info = self.current_mind
mood_info = self.current_state.mood mood_info = self.current_state.mood
# mood_info = "你很生气,很愤怒" # mood_info = "你很生气,很愤怒"
observation = self.observations[0] observation = self.observations[0]
chat_observe_info = observation.observe_info if obs_id:
# print(f"chat_observe_info{chat_observe_info}") print(f"11111111111有id,开始获取观察信息{obs_id}")
chat_observe_info = observation.get_observe_info(obs_id)
else:
chat_observe_info = observation.get_observe_info()
# 首先尝试使用工具获取更多信息 extra_info_prompt = ""
tool_result = await self.tool_user.use_tool(message_txt, sender_name, chat_stream) for tool_name, tool_data in extra_info.items():
extra_info_prompt += f"{tool_name} 相关信息:\n"
# 如果工具被使用且获得了结果,将收集到的信息合并到思考中 for item in tool_data:
collected_info = "" extra_info_prompt += f"- {item['name']}: {item['content']}\n"
if tool_result.get("used_tools", False):
logger.info("使用工具收集了信息")
# 如果有收集到的信息,将其添加到当前思考中
if "collected_info" in tool_result:
collected_info = tool_result["collected_info"]
# 开始构建prompt # 开始构建prompt
prompt_personality = f"你的名字是{self.bot_name},你" prompt_personality = f"你的名字是{self.bot_name},你"
@@ -206,9 +208,11 @@ class SubHeartflow:
# f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。" # f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
# ) # )
relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format( relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format(
relation_prompt, sender_name relation_prompt, sender_info.user_nickname
) )
sender_name_sign = f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>"
# prompt = "" # prompt = ""
# # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n" # # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
# if tool_result.get("used_tools", False): # if tool_result.get("used_tools", False):
@@ -225,18 +229,25 @@ class SubHeartflow:
# prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写" # prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
# prompt += f"记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{self.bot_name}{self.bot_name}指的就是你。" # prompt += f"记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{self.bot_name}{self.bot_name}指的就是你。"
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format( prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
collected_info, extra_info_prompt,
# prompt_schedule,
relation_prompt_all, relation_prompt_all,
prompt_personality, prompt_personality,
current_thinking_info, current_thinking_info,
time_now,
chat_observe_info, chat_observe_info,
mood_info, mood_info,
sender_name, sender_name_sign,
message_txt, message_txt,
self.bot_name, self.bot_name,
) )
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
prompt = parse_text_timestamps(prompt, mode="lite")
try: try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt) response, reasoning_content = await self.llm_model.generate_response_async(prompt)
except Exception as e: except Exception as e:
@@ -250,7 +261,7 @@ class SubHeartflow:
logger.info(f"麦麦的思考前脑内状态:{self.current_mind}") logger.info(f"麦麦的思考前脑内状态:{self.current_mind}")
return self.current_mind, self.past_mind return self.current_mind, self.past_mind
async def do_thinking_after_reply(self, reply_content, chat_talking_prompt): async def do_thinking_after_reply(self, reply_content, chat_talking_prompt, extra_info):
# print("麦麦回复之后脑袋转起来了") # print("麦麦回复之后脑袋转起来了")
# 开始构建prompt # 开始构建prompt
@@ -261,6 +272,12 @@ class SubHeartflow:
personality_core = individuality.personality.personality_core personality_core = individuality.personality.personality_core
prompt_personality += personality_core prompt_personality += personality_core
extra_info_prompt = ""
for tool_name, tool_data in extra_info.items():
extra_info_prompt += f"{tool_name} 相关信息:\n"
for item in tool_data:
extra_info_prompt += f"- {item['name']}: {item['content']}\n"
personality_sides = individuality.personality.personality_sides personality_sides = individuality.personality.personality_sides
random.shuffle(personality_sides) random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}" prompt_personality += f",{personality_sides[0]}"
@@ -277,22 +294,23 @@ class SubHeartflow:
message_new_info = chat_talking_prompt message_new_info = chat_talking_prompt
reply_info = reply_content reply_info = reply_content
# schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False)
# prompt = "" time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# # prompt += f"你现在正在做的事情是:{schedule_info}\n"
# prompt += f"{prompt_personality}\n"
# prompt += f"现在你正在上网和qq群里的网友们聊天群里正在聊的话题是{chat_observe_info}\n"
# prompt += f"刚刚你的想法是{current_thinking_info}。"
# prompt += f"你现在看到了网友们发的新消息:{message_new_info}\n"
# prompt += f"你刚刚回复了群友们:{reply_info}"
# prompt += f"你现在{mood_info}"
# prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白"
# prompt += "不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,关注你回复的内容,不要思考太多:"
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format( prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format(
prompt_personality, chat_observe_info, current_thinking_info, message_new_info, reply_info, mood_info extra_info_prompt,
prompt_personality,
time_now,
chat_observe_info,
current_thinking_info,
message_new_info,
reply_info,
mood_info,
) )
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
prompt = parse_text_timestamps(prompt, mode="lite")
try: try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt) response, reasoning_content = await self.llm_model.generate_response_async(prompt)
except Exception as e: except Exception as e:
@@ -305,48 +323,6 @@ class SubHeartflow:
self.last_reply_time = time.time() self.last_reply_time = time.time()
async def judge_willing(self):
# 开始构建prompt
prompt_personality = ""
# person
individuality = Individuality.get_instance()
personality_core = individuality.personality.personality_core
prompt_personality += personality_core
personality_sides = individuality.personality.personality_sides
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
identity_detail = individuality.identity.identity_detail
random.shuffle(identity_detail)
prompt_personality += f",{identity_detail[0]}"
# print("麦麦闹情绪了1")
current_thinking_info = self.current_mind
mood_info = self.current_state.mood
# print("麦麦闹情绪了2")
prompt = ""
prompt += f"{prompt_personality}\n"
prompt += "现在你正在上网和qq群里的网友们聊天"
prompt += f"你现在的想法是{current_thinking_info}"
prompt += f"你现在{mood_info}"
prompt += "现在请你思考你想不想发言或者回复请你输出一个数字1-101表示非常不想10表示非常想。"
prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
# 解析willing值
willing_match = re.search(r"<(\d+)>", response)
except Exception as e:
logger.error(f"意愿判断获取失败: {e}")
willing_match = None
if willing_match:
self.current_state.willing = int(willing_match.group(1))
else:
self.current_state.willing = 0
return self.current_state.willing
def update_current_mind(self, response): def update_current_mind(self, response):
self.past_mind.append(self.current_mind) self.past_mind.append(self.current_mind)
self.current_mind = response self.current_mind = response

View File

@@ -24,7 +24,10 @@ class ActionPlanner:
def __init__(self, stream_id: str): def __init__(self, stream_id: str):
self.llm = LLM_request( self.llm = LLM_request(
model=global_config.llm_normal, temperature=0.2, max_tokens=1000, request_type="action_planning" model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=1000,
request_type="action_planning",
) )
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2) self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME self.name = global_config.BOT_NICKNAME

View File

@@ -100,15 +100,15 @@ class NotificationManager:
""" """
print(1145145511114445551111444) print(1145145511114445551111444)
if target not in self._handlers: if target not in self._handlers:
print("没11有target") # print("没11有target")
self._handlers[target] = {} self._handlers[target] = {}
if notification_type not in self._handlers[target]: if notification_type not in self._handlers[target]:
print("没11有notification_type") # print("没11有notification_type")
self._handlers[target][notification_type] = [] self._handlers[target][notification_type] = []
print(self._handlers[target][notification_type]) # print(self._handlers[target][notification_type])
print(f"注册1111111111111111111111处理器: {target} {notification_type} {handler}") # print(f"注册1111111111111111111111处理器: {target} {notification_type} {handler}")
self._handlers[target][notification_type].append(handler) self._handlers[target][notification_type].append(handler)
print(self._handlers[target][notification_type]) # print(self._handlers[target][notification_type])
def unregister_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler): def unregister_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler):
"""注销通知处理器 """注销通知处理器

View File

@@ -3,7 +3,8 @@ from src.common.logger import get_module_logger
from ..chat.chat_stream import ChatStream from ..chat.chat_stream import ChatStream
from ..chat.message import Message from ..chat.message import Message
from ..message.message_base import Seg from ..message.message_base import Seg
from src.plugins.chat.message import MessageSending from src.plugins.chat.message import MessageSending, MessageSet
from src.plugins.chat.message_sender import message_manager
logger = get_module_logger("message_sender") logger = get_module_logger("message_sender")
@@ -39,9 +40,11 @@ class DirectMessageSender:
message_sending = MessageSending(segments=segments) message_sending = MessageSending(segments=segments)
# 发送消息 # 发送消息
await chat_stream.send_message(message_sending) message_set = MessageSet(chat_stream, message_sending.message_id)
logger.info(f"消息已发送: {content}") message_set.add_message(message_sending)
message_manager.add_message(message_set)
logger.info(f"PFC消息已发送: {content}")
except Exception as e: except Exception as e:
logger.error(f"发送消息失败: {str(e)}") logger.error(f"PFC消息发送失败: {str(e)}")
raise raise

View File

@@ -50,21 +50,18 @@ class MessageStorage(ABC):
class MongoDBMessageStorage(MessageStorage): class MongoDBMessageStorage(MessageStorage):
"""MongoDB消息存储实现""" """MongoDB消息存储实现"""
def __init__(self):
self.db = db
async def get_messages_after(self, chat_id: str, message_time: float) -> List[Dict[str, Any]]: async def get_messages_after(self, chat_id: str, message_time: float) -> List[Dict[str, Any]]:
query = {"chat_id": chat_id} query = {"chat_id": chat_id}
# print(f"storage_check_message: {message_time}") # print(f"storage_check_message: {message_time}")
query["time"] = {"$gt": message_time} query["time"] = {"$gt": message_time}
return list(self.db.messages.find(query).sort("time", 1)) return list(db.messages.find(query).sort("time", 1))
async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]: async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
query = {"chat_id": chat_id, "time": {"$lt": time_point}} query = {"chat_id": chat_id, "time": {"$lt": time_point}}
messages = list(self.db.messages.find(query).sort("time", -1).limit(limit)) messages = list(db.messages.find(query).sort("time", -1).limit(limit))
# 将消息按时间正序排列 # 将消息按时间正序排列
messages.reverse() messages.reverse()
@@ -73,7 +70,7 @@ class MongoDBMessageStorage(MessageStorage):
async def has_new_messages(self, chat_id: str, after_time: float) -> bool: async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
query = {"chat_id": chat_id, "time": {"$gt": after_time}} query = {"chat_id": chat_id, "time": {"$gt": after_time}}
return self.db.messages.find_one(query) is not None return db.messages.find_one(query) is not None
# # 创建一个内存消息存储实现,用于测试 # # 创建一个内存消息存储实现,用于测试

View File

@@ -299,6 +299,12 @@ class DirectMessageSender:
self.logger = get_module_logger("direct_sender") self.logger = get_module_logger("direct_sender")
self.storage = MessageStorage() self.storage = MessageStorage()
async def send_via_ws(self, message: MessageSending) -> None:
try:
await global_api.send_message(message)
except Exception as e:
raise ValueError(f"未找到平台:{message.message_info.platform} 的url配置请检查配置文件") from e
async def send_message( async def send_message(
self, self,
chat_stream: ChatStream, chat_stream: ChatStream,
@@ -335,21 +341,22 @@ class DirectMessageSender:
# 处理消息 # 处理消息
await message.process() await message.process()
message_json = message.to_dict()
# 发送消息 # 发送消息
try: try:
message_json = message.to_dict() end_point = global_config.api_urls.get(message.message_info.platform, None)
end_point = global_config.api_urls.get(chat_stream.platform, None) if end_point:
# logger.info(f"发送消息到{end_point}")
if not end_point: # logger.info(message_json)
raise ValueError(f"未找到平台:{chat_stream.platform} 的url配置") try:
await global_api.send_message_REST(end_point, message_json) await global_api.send_message_REST(end_point, message_json)
# 存储消息
await self.storage.store_message(message, message.chat_stream)
self.logger.info(f"直接发送消息成功: {content[:30]}...")
except Exception as e: except Exception as e:
self.logger.error(f"直接发送消息失败: {str(e)}") logger.error(f"REST方式发送失败出现错误: {str(e)}")
raise logger.info("尝试使用ws发送")
await self.send_via_ws(message)
else:
await self.send_via_ws(message)
logger.success(f"PFC消息已发送: {content}")
except Exception as e:
logger.error(f"PFC消息发送失败: {str(e)}")

View File

@@ -13,7 +13,10 @@ class KnowledgeFetcher:
def __init__(self): def __init__(self):
self.llm = LLM_request( self.llm = LLM_request(
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="knowledge_fetch" model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=1000,
request_type="knowledge_fetch",
) )
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]: async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:

View File

@@ -16,7 +16,10 @@ class ReplyGenerator:
def __init__(self, stream_id: str): def __init__(self, stream_id: str):
self.llm = LLM_request( self.llm = LLM_request(
model=global_config.llm_normal, temperature=0.2, max_tokens=300, request_type="reply_generation" model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=300,
request_type="reply_generation",
) )
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2) self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME self.name = global_config.BOT_NICKNAME

View File

@@ -3,7 +3,6 @@ from ..person_info.relationship_manager import relationship_manager
from .chat_stream import chat_manager from .chat_stream import chat_manager
from .message_sender import message_manager from .message_sender import message_manager
from ..storage.storage import MessageStorage from ..storage.storage import MessageStorage
from .auto_speak import auto_speak_manager
__all__ = [ __all__ = [
@@ -12,5 +11,4 @@ __all__ = [
"chat_manager", "chat_manager",
"message_manager", "message_manager",
"MessageStorage", "MessageStorage",
"auto_speak_manager",
] ]

View File

@@ -1,184 +0,0 @@
import time
import asyncio
import random
from random import random as random_float
from typing import Dict
from ..config.config import global_config
from .message import MessageSending, MessageThinking, MessageSet, MessageRecv
from ..message.message_base import UserInfo, Seg
from .message_sender import message_manager
from ..moods.moods import MoodManager
from ..chat_module.reasoning_chat.reasoning_generator import ResponseGenerator
from src.common.logger import get_module_logger
from src.heart_flow.heartflow import heartflow
from ...common.database import db
logger = get_module_logger("auto_speak")
class AutoSpeakManager:
def __init__(self):
self._last_auto_speak_time: Dict[str, float] = {} # 记录每个聊天流上次自主发言的时间
self.mood_manager = MoodManager.get_instance()
self.gpt = ResponseGenerator() # 添加gpt实例
self._started = False
self._check_task = None
self.db = db
async def get_chat_info(self, chat_id: str) -> dict:
"""从数据库获取聊天流信息"""
chat_info = await self.db.chat_streams.find_one({"stream_id": chat_id})
return chat_info
async def start_auto_speak_check(self):
"""启动自动发言检查任务"""
if not self._started:
self._check_task = asyncio.create_task(self._periodic_check())
self._started = True
logger.success("自动发言检查任务已启动")
async def _periodic_check(self):
"""定期检查是否需要自主发言"""
while True and global_config.enable_think_flow:
# 获取所有活跃的子心流
active_subheartflows = []
for chat_id, subheartflow in heartflow._subheartflows.items():
if (
subheartflow.is_active and subheartflow.current_state.willing > 0
): # 只考虑活跃且意愿值大于0.5的子心流
active_subheartflows.append((chat_id, subheartflow))
logger.debug(
f"发现活跃子心流 - 聊天ID: {chat_id}, 意愿值: {subheartflow.current_state.willing:.2f}"
)
if not active_subheartflows:
logger.debug("当前没有活跃的子心流")
await asyncio.sleep(20) # 添加异步等待
continue
# 随机选择一个活跃的子心流
chat_id, subheartflow = random.choice(active_subheartflows)
logger.info(f"随机选择子心流 - 聊天ID: {chat_id}, 意愿值: {subheartflow.current_state.willing:.2f}")
# 检查是否应该自主发言
if await self.check_auto_speak(subheartflow):
logger.info(f"准备自主发言 - 聊天ID: {chat_id}")
# 生成自主发言
bot_user_info = UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform="qq", # 默认使用qq平台
)
# 创建一个空的MessageRecv对象作为上下文
message = MessageRecv(
{
"message_info": {
"user_info": {"user_id": chat_id, "user_nickname": "", "platform": "qq"},
"group_info": None,
"platform": "qq",
"time": time.time(),
},
"processed_plain_text": "",
"raw_message": "",
"is_emoji": False,
}
)
await self.generate_auto_speak(
subheartflow, message, bot_user_info, message.message_info["user_info"], message.message_info
)
else:
logger.debug(f"不满足自主发言条件 - 聊天ID: {chat_id}")
# 每分钟检查一次
await asyncio.sleep(20)
# await asyncio.sleep(5) # 发生错误时等待5秒再继续
async def check_auto_speak(self, subheartflow) -> bool:
"""检查是否应该自主发言"""
if not subheartflow:
return False
current_time = time.time()
chat_id = subheartflow.observe_chat_id
# 获取上次自主发言时间
if chat_id not in self._last_auto_speak_time:
self._last_auto_speak_time[chat_id] = 0
last_speak_time = self._last_auto_speak_time.get(chat_id, 0)
# 如果距离上次自主发言不到5分钟不发言
if current_time - last_speak_time < 30:
logger.debug(
f"距离上次发言时间太短 - 聊天ID: {chat_id}, 剩余时间: {30 - (current_time - last_speak_time):.1f}"
)
return False
# 获取当前意愿值
current_willing = subheartflow.current_state.willing
if current_willing > 0.1 and random_float() < 0.5:
self._last_auto_speak_time[chat_id] = current_time
logger.info(f"满足自主发言条件 - 聊天ID: {chat_id}, 意愿值: {current_willing:.2f}")
return True
logger.debug(f"不满足自主发言条件 - 聊天ID: {chat_id}, 意愿值: {current_willing:.2f}")
return False
async def generate_auto_speak(self, subheartflow, message, bot_user_info: UserInfo, userinfo, messageinfo):
"""生成自主发言内容"""
thinking_time_point = round(time.time(), 2)
think_id = "mt" + str(thinking_time_point)
thinking_message = MessageThinking(
message_id=think_id,
chat_stream=None, # 不需要chat_stream
bot_user_info=bot_user_info,
reply=message,
thinking_start_time=thinking_time_point,
)
message_manager.add_message(thinking_message)
# 生成自主发言内容
try:
response, raw_content = await self.gpt.generate_response(message)
except Exception as e:
logger.error(f"生成自主发言内容时发生错误: {e}")
return False
if response:
message_set = MessageSet(None, think_id) # 不需要chat_stream
mark_head = False
for msg in response:
message_segment = Seg(type="text", data=msg)
bot_message = MessageSending(
message_id=think_id,
chat_stream=None, # 不需要chat_stream
bot_user_info=bot_user_info,
sender_info=userinfo,
message_segment=message_segment,
reply=message,
is_head=not mark_head,
is_emoji=False,
thinking_start_time=thinking_time_point,
)
if not mark_head:
mark_head = True
message_set.add_message(bot_message)
message_manager.add_message(message_set)
# 更新情绪和关系
stance, emotion = await self.gpt._get_emotion_tags(raw_content, message.processed_plain_text)
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
return True
return False
# 创建全局AutoSpeakManager实例
auto_speak_manager = AutoSpeakManager()

View File

@@ -38,7 +38,7 @@ class ChatBot:
async def _ensure_started(self): async def _ensure_started(self):
"""确保所有任务已启动""" """确保所有任务已启动"""
if not self._started: if not self._started:
logger.info("确保ChatBot所有任务已启动") logger.trace("确保ChatBot所有任务已启动")
self._started = True self._started = True
@@ -65,10 +65,6 @@ class ChatBot:
- 没有思维流相关的状态管理 - 没有思维流相关的状态管理
- 更简单直接的回复逻辑 - 更简单直接的回复逻辑
3. pfc_chatting模式仅进行消息处理
- 不进行任何回复
- 只处理和存储消息
所有模式都包含: 所有模式都包含:
- 消息过滤 - 消息过滤
- 记忆激活 - 记忆激活
@@ -84,7 +80,7 @@ class ChatBot:
message = MessageRecv(message_data) message = MessageRecv(message_data)
groupinfo = message.message_info.group_info groupinfo = message.message_info.group_info
userinfo = message.message_info.user_info userinfo = message.message_info.user_info
logger.debug(f"处理消息:{str(message_data)[:120]}...") logger.trace(f"处理消息:{str(message_data)[:120]}...")
if userinfo.user_id in global_config.ban_user_id: if userinfo.user_id in global_config.ban_user_id:
logger.debug(f"用户{userinfo.user_id}被禁止回复") logger.debug(f"用户{userinfo.user_id}被禁止回复")

View File

@@ -142,14 +142,18 @@ class MessageRecv(Message):
def _generate_detailed_text(self) -> str: def _generate_detailed_text(self) -> str:
"""生成详细文本,包含时间和用户信息""" """生成详细文本,包含时间和用户信息"""
time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time)) # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
time = self.message_info.time
user_info = self.message_info.user_info user_info = self.message_info.user_info
# name = (
# f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
# if user_info.user_cardname != None
# else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
# )
name = ( name = (
f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})" f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
if user_info.user_cardname != None
else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
) )
return f"[{time_str}] {name}: {self.processed_plain_text}\n" return f"[{time}] {name}: {self.processed_plain_text}\n"
@dataclass @dataclass
@@ -239,14 +243,18 @@ class MessageProcessBase(Message):
def _generate_detailed_text(self) -> str: def _generate_detailed_text(self) -> str:
"""生成详细文本,包含时间和用户信息""" """生成详细文本,包含时间和用户信息"""
time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time)) # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
time = self.message_info.time
user_info = self.message_info.user_info user_info = self.message_info.user_info
# name = (
# f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
# if user_info.user_cardname != None
# else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
# )
name = ( name = (
f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})" f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
if user_info.user_cardname != None
else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
) )
return f"[{time_str}] {name}: {self.processed_plain_text}\n" return f"[{time}] {name}: {self.processed_plain_text}\n"
@dataclass @dataclass

View File

@@ -153,11 +153,11 @@ class MessageBuffer:
# 更新当前消息的processed_plain_text # 更新当前消息的processed_plain_text
if combined_text and combined_text[0] != message.processed_plain_text and is_update: if combined_text and combined_text[0] != message.processed_plain_text and is_update:
if type == "text": if type == "text":
message.processed_plain_text = "".join(combined_text) message.processed_plain_text = "".join(combined_text)
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容到当前消息") logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容到当前消息")
elif type == "emoji": elif type == "emoji":
combined_text.pop() combined_text.pop()
message.processed_plain_text = "".join(combined_text) message.processed_plain_text = "".join(combined_text)
message.is_emoji = False message.is_emoji = False
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容覆盖当前emoji消息") logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容覆盖当前emoji消息")

View File

@@ -70,9 +70,9 @@ class Message_Sender:
thinking_start_time=message.thinking_start_time, thinking_start_time=message.thinking_start_time,
is_emoji=message.is_emoji, is_emoji=message.is_emoji,
) )
logger.debug(f"{message.processed_plain_text},{typing_time},计算输入时间结束") logger.trace(f"{message.processed_plain_text},{typing_time},计算输入时间结束")
await asyncio.sleep(typing_time) await asyncio.sleep(typing_time)
logger.debug(f"{message.processed_plain_text},{typing_time},等待输入时间结束") logger.trace(f"{message.processed_plain_text},{typing_time},等待输入时间结束")
message_json = message.to_dict() message_json = message.to_dict()

View File

@@ -334,27 +334,35 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]: def process_llm_response(text: str) -> List[str]:
# processed_response = process_text_with_typos(content) # 提取被 () 或 [] 包裹的内容
# 对西文字符段落的回复长度设置为汉字字符的两倍 pattern = re.compile(r"[\(\[].*?[\)\]]")
max_length = global_config.response_max_length _extracted_contents = pattern.findall(text)
# 去除 () 和 [] 及其包裹的内容
cleaned_text = pattern.sub("", text)
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
# 对清理后的文本进行进一步处理
max_length = global_config.response_max_length * 2
max_sentence_num = global_config.response_max_sentence_num max_sentence_num = global_config.response_max_sentence_num
if len(text) > max_length and not is_western_paragraph(text): if len(cleaned_text) > max_length and not is_western_paragraph(cleaned_text):
logger.warning(f"回复过长 ({len(text)} 字符),返回默认回复") logger.warning(f"回复过长 ({len(cleaned_text)} 字符),返回默认回复")
return ["懒得说"] return ["懒得说"]
elif len(text) > 200: elif len(cleaned_text) > 200:
logger.warning(f"回复过长 ({len(text)} 字符),返回默认回复") logger.warning(f"回复过长 ({len(cleaned_text)} 字符),返回默认回复")
return ["懒得说"] return ["懒得说"]
# 处理长消息
typo_generator = ChineseTypoGenerator( typo_generator = ChineseTypoGenerator(
error_rate=global_config.chinese_typo_error_rate, error_rate=global_config.chinese_typo_error_rate,
min_freq=global_config.chinese_typo_min_freq, min_freq=global_config.chinese_typo_min_freq,
tone_error_rate=global_config.chinese_typo_tone_error_rate, tone_error_rate=global_config.chinese_typo_tone_error_rate,
word_replace_rate=global_config.chinese_typo_word_replace_rate, word_replace_rate=global_config.chinese_typo_word_replace_rate,
) )
if global_config.enable_response_spliter:
split_sentences = split_into_sentences_w_remove_punctuation(text) if global_config.enable_response_splitter:
split_sentences = split_into_sentences_w_remove_punctuation(cleaned_text)
else: else:
split_sentences = [text] split_sentences = [cleaned_text]
sentences = [] sentences = []
for sentence in split_sentences: for sentence in split_sentences:
if global_config.chinese_typo_enable: if global_config.chinese_typo_enable:
@@ -364,12 +372,13 @@ def process_llm_response(text: str) -> List[str]:
sentences.append(typo_corrections) sentences.append(typo_corrections)
else: else:
sentences.append(sentence) sentences.append(sentence)
# 检查分割后的消息数量是否过多超过3条
if len(sentences) > max_sentence_num: if len(sentences) > max_sentence_num:
logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复") logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
return [f"{global_config.BOT_NICKNAME}不知道哦"] return [f"{global_config.BOT_NICKNAME}不知道哦"]
# sentences.extend(extracted_contents)
return sentences return sentences
@@ -630,3 +639,141 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
except Exception as e: except Exception as e:
logger.error(f"计算消息数量时出错: {str(e)}") logger.error(f"计算消息数量时出错: {str(e)}")
return 0, 0 return 0, 0
def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
"""将时间戳转换为人类可读的时间格式
Args:
timestamp: 时间戳
mode: 转换模式,"normal"为标准格式,"relative"为相对时间格式
Returns:
str: 格式化后的时间字符串
"""
if mode == "normal":
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
elif mode == "relative":
now = time.time()
diff = now - timestamp
if diff < 20:
return "刚刚:"
elif diff < 60:
return f"{int(diff)}秒前:"
elif diff < 1800:
return f"{int(diff / 60)}分钟前:"
elif diff < 3600:
return f"{int(diff / 60)}分钟前:\n"
elif diff < 86400:
return f"{int(diff / 3600)}小时前:\n"
elif diff < 604800:
return f"{int(diff / 86400)}天前:\n"
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":"
def parse_text_timestamps(text: str, mode: str = "normal") -> str:
"""解析文本中的时间戳并转换为可读时间格式
Args:
text: 包含时间戳的文本,时间戳应以[]包裹
mode: 转换模式传递给translate_timestamp_to_human_readable"normal""relative"
Returns:
str: 替换后的文本
转换规则:
- normal模式: 将文本中所有时间戳转换为可读格式
- lite模式:
- 第一个和最后一个时间戳必须转换
- 以5秒为间隔划分时间段每段最多转换一个时间戳
- 不转换的时间戳替换为空字符串
"""
# 匹配[数字]或[数字.数字]格式的时间戳
pattern = r'\[(\d+(?:\.\d+)?)\]'
# 找出所有匹配的时间戳
matches = list(re.finditer(pattern, text))
if not matches:
return text
# normal模式: 直接转换所有时间戳
if mode == "normal":
result_text = text
for match in matches:
timestamp = float(match.group(1))
readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
# 由于替换会改变文本长度,需要使用正则替换而非直接替换
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
return result_text
else:
# lite模式: 按5秒间隔划分并选择性转换
result_text = text
# 提取所有时间戳及其位置
timestamps = [(float(m.group(1)), m) for m in matches]
timestamps.sort(key=lambda x: x[0]) # 按时间戳升序排序
if not timestamps:
return text
# 获取第一个和最后一个时间戳
first_timestamp, first_match = timestamps[0]
last_timestamp, last_match = timestamps[-1]
# 将时间范围划分成5秒间隔的时间段
time_segments = {}
# 对所有时间戳按15秒间隔分组
for ts, match in timestamps:
segment_key = int(ts // 15) # 将时间戳除以15取整作为时间段的键
if segment_key not in time_segments:
time_segments[segment_key] = []
time_segments[segment_key].append((ts, match))
# 记录需要转换的时间戳
to_convert = []
# 从每个时间段中选择一个时间戳进行转换
for segment, segment_timestamps in time_segments.items():
# 选择这个时间段中的第一个时间戳
to_convert.append(segment_timestamps[0])
# 确保第一个和最后一个时间戳在转换列表中
first_in_list = False
last_in_list = False
for ts, match in to_convert:
if ts == first_timestamp:
first_in_list = True
if ts == last_timestamp:
last_in_list = True
if not first_in_list:
to_convert.append((first_timestamp, first_match))
if not last_in_list:
to_convert.append((last_timestamp, last_match))
# 创建需要转换的时间戳集合,用于快速查找
to_convert_set = {match.group(0) for _, match in to_convert}
# 首先替换所有不需要转换的时间戳为空字符串
for ts, match in timestamps:
if match.group(0) not in to_convert_set:
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, "", result_text, count=1)
# 按照时间戳原始顺序排序,避免替换时位置错误
to_convert.sort(key=lambda x: x[1].start())
# 执行替换
# 由于替换会改变文本长度,从后向前替换
to_convert.reverse()
for ts, match in to_convert:
readable_time = translate_timestamp_to_human_readable(ts, "relative")
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
return result_text

View File

@@ -19,6 +19,7 @@ from ...chat.chat_stream import chat_manager
from ...person_info.relationship_manager import relationship_manager from ...person_info.relationship_manager import relationship_manager
from ...chat.message_buffer import message_buffer from ...chat.message_buffer import message_buffer
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from ...utils.timer_calculater import Timer
# 定义日志配置 # 定义日志配置
chat_config = LogConfig( chat_config = LogConfig(
@@ -173,12 +174,10 @@ class ReasoningChat:
await self.storage.store_message(message, chat) await self.storage.store_message(message, chat)
# 记忆激活 # 记忆激活
timer1 = time.time() with Timer("记忆激活", timing_results):
interested_rate = await HippocampusManager.get_instance().get_activate_from_text( interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
message.processed_plain_text, fast_retrieval=True message.processed_plain_text, fast_retrieval=True
) )
timer2 = time.time()
timing_results["记忆激活"] = timer2 - timer1
# 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text # 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message) buffer_result = await message_buffer.query_buffer_result(message)
@@ -228,10 +227,8 @@ class ReasoningChat:
await willing_manager.before_generate_reply_handle(message.message_info.message_id) await willing_manager.before_generate_reply_handle(message.message_info.message_id)
# 创建思考消息 # 创建思考消息
timer1 = time.time() with Timer("创建思考消息", timing_results):
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo) thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
timer2 = time.time()
timing_results["创建思考消息"] = timer2 - timer1
logger.debug(f"创建捕捉器thinking_id:{thinking_id}") logger.debug(f"创建捕捉器thinking_id:{thinking_id}")
@@ -239,11 +236,9 @@ class ReasoningChat:
info_catcher.catch_decide_to_response(message) info_catcher.catch_decide_to_response(message)
# 生成回复 # 生成回复
timer1 = time.time()
try: try:
with Timer("生成回复", timing_results):
response_set = await self.gpt.generate_response(message, thinking_id) response_set = await self.gpt.generate_response(message, thinking_id)
timer2 = time.time()
timing_results["生成回复"] = timer2 - timer1
info_catcher.catch_after_generate_response(timing_results["生成回复"]) info_catcher.catch_after_generate_response(timing_results["生成回复"])
except Exception as e: except Exception as e:
@@ -255,26 +250,20 @@ class ReasoningChat:
return return
# 发送消息 # 发送消息
timer1 = time.time() with Timer("发送消息", timing_results):
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id) first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
timer2 = time.time()
timing_results["发送消息"] = timer2 - timer1
info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg) info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
info_catcher.done_catch() info_catcher.done_catch()
# 处理表情包 # 处理表情包
timer1 = time.time() with Timer("处理表情包", timing_results):
await self._handle_emoji(message, chat, response_set) await self._handle_emoji(message, chat, response_set)
timer2 = time.time()
timing_results["处理表情包"] = timer2 - timer1
# 更新关系情绪 # 更新关系情绪
timer1 = time.time() with Timer("更新关系情绪", timing_results):
await self._update_relationship(message, response_set) await self._update_relationship(message, response_set)
timer2 = time.time()
timing_results["更新关系情绪"] = timer2 - timer1
# 回复后处理 # 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id) await willing_manager.after_generate_reply_handle(message.message_info.message_id)

View File

@@ -1,4 +1,3 @@
import time
from typing import List, Optional, Tuple, Union from typing import List, Optional, Tuple, Union
import random import random
@@ -7,6 +6,7 @@ from ...config.config import global_config
from ...chat.message import MessageThinking from ...chat.message import MessageThinking
from .reasoning_prompt_builder import prompt_builder from .reasoning_prompt_builder import prompt_builder
from ...chat.utils import process_llm_response from ...chat.utils import process_llm_response
from ...utils.timer_calculater import Timer
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
@@ -29,7 +29,10 @@ class ResponseGenerator:
request_type="response_reasoning", request_type="response_reasoning",
) )
self.model_normal = LLM_request( self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_reasoning" model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_reasoning",
) )
self.model_sum = LLM_request( self.model_sum = LLM_request(
@@ -82,15 +85,14 @@ class ResponseGenerator:
logger.debug("开始使用生成回复-2") logger.debug("开始使用生成回复-2")
# 构建prompt # 构建prompt
timer1 = time.time() with Timer() as t_build_prompt:
prompt = await prompt_builder._build_prompt( prompt = await prompt_builder._build_prompt(
message.chat_stream, message.chat_stream,
message_txt=message.processed_plain_text, message_txt=message.processed_plain_text,
sender_name=sender_name, sender_name=sender_name,
stream_id=message.chat_stream.stream_id, stream_id=message.chat_stream.stream_id,
) )
timer2 = time.time() logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
logger.info(f"构建prompt时间: {timer2 - timer1}")
try: try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt) content, reasoning_content, self.current_model_name = await model.generate_response(prompt)

View File

@@ -175,7 +175,7 @@ class PromptBuilder:
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。 # moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。""" # 涉及政治敏感以及违法违规的内容请规避。"""
logger.info("开始构建prompt") logger.debug("开始构建prompt")
# prompt = f""" # prompt = f"""
# {relation_prompt_all} # {relation_prompt_all}

View File

@@ -20,6 +20,8 @@ from ...chat.chat_stream import chat_manager
from ...person_info.relationship_manager import relationship_manager from ...person_info.relationship_manager import relationship_manager
from ...chat.message_buffer import message_buffer from ...chat.message_buffer import message_buffer
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from ...utils.timer_calculater import Timer
from src.do_tool.tool_use import ToolUser
# 定义日志配置 # 定义日志配置
chat_config = LogConfig( chat_config = LogConfig(
@@ -36,6 +38,7 @@ class ThinkFlowChat:
self.gpt = ResponseGenerator() self.gpt = ResponseGenerator()
self.mood_manager = MoodManager.get_instance() self.mood_manager = MoodManager.get_instance()
self.mood_manager.start_mood_update() self.mood_manager.start_mood_update()
self.tool_user = ToolUser()
async def _create_thinking_message(self, message, chat, userinfo, messageinfo): async def _create_thinking_message(self, message, chat, userinfo, messageinfo):
"""创建思考消息""" """创建思考消息"""
@@ -105,18 +108,16 @@ class ThinkFlowChat:
message_manager.add_message(message_set) message_manager.add_message(message_set)
return first_bot_msg return first_bot_msg
async def _handle_emoji(self, message, chat, response): async def _handle_emoji(self, message, chat, response, send_emoji=""):
"""处理表情包""" """处理表情包"""
if random() < global_config.emoji_chance: if send_emoji:
emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji)
else:
emoji_raw = await emoji_manager.get_emoji_for_text(response) emoji_raw = await emoji_manager.get_emoji_for_text(response)
# print("11111111111111")
# logger.info(emoji_raw)
if emoji_raw: if emoji_raw:
emoji_path, description = emoji_raw emoji_path, description = emoji_raw
emoji_cq = image_path_to_base64(emoji_path) emoji_cq = image_path_to_base64(emoji_path)
# logger.info(emoji_cq)
thinking_time_point = round(message.message_info.time, 2) thinking_time_point = round(message.message_info.time, 2)
message_segment = Seg(type="emoji", data=emoji_cq) message_segment = Seg(type="emoji", data=emoji_cq)
@@ -135,20 +136,8 @@ class ThinkFlowChat:
is_emoji=True, is_emoji=True,
) )
# logger.info("22222222222222")
message_manager.add_message(bot_message) message_manager.add_message(bot_message)
async def _update_using_response(self, message, response_set):
"""更新心流状态"""
stream_id = message.chat_stream.stream_id
chat_talking_prompt = ""
if stream_id:
chat_talking_prompt = get_recent_group_detailed_plain_text(
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
)
await heartflow.get_subheartflow(stream_id).do_thinking_after_reply(response_set, chat_talking_prompt)
async def _update_relationship(self, message: MessageRecv, response_set): async def _update_relationship(self, message: MessageRecv, response_set):
"""更新关系情绪""" """更新关系情绪"""
ori_response = ",".join(response_set) ori_response = ",".join(response_set)
@@ -183,26 +172,24 @@ class ThinkFlowChat:
heartflow.create_subheartflow(chat.stream_id) heartflow.create_subheartflow(chat.stream_id)
await message.process() await message.process()
logger.debug(f"消息处理成功{message.processed_plain_text}") logger.trace(f"消息处理成功{message.processed_plain_text}")
# 过滤词/正则表达式过滤 # 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex( if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
message.raw_message, chat, userinfo message.raw_message, chat, userinfo
): ):
return return
logger.debug(f"过滤词/正则表达式过滤成功{message.processed_plain_text}") logger.trace(f"过滤词/正则表达式过滤成功{message.processed_plain_text}")
await self.storage.store_message(message, chat) await self.storage.store_message(message, chat)
logger.debug(f"存储成功{message.processed_plain_text}") logger.trace(f"存储成功{message.processed_plain_text}")
# 记忆激活 # 记忆激活
timer1 = time.time() with Timer("记忆激活", timing_results):
interested_rate = await HippocampusManager.get_instance().get_activate_from_text( interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
message.processed_plain_text, fast_retrieval=True message.processed_plain_text, fast_retrieval=True
) )
timer2 = time.time() logger.trace(f"记忆激活: {interested_rate}")
timing_results["记忆激活"] = timer2 - timer1
logger.debug(f"记忆激活: {interested_rate}")
# 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text # 查询缓冲器结果会整合前面跳过的消息改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message) buffer_result = await message_buffer.query_buffer_result(message)
@@ -225,13 +212,6 @@ class ThinkFlowChat:
logger.info("触发缓冲,已炸飞消息列") logger.info("触发缓冲,已炸飞消息列")
return return
# 计算回复意愿
# current_willing_old = willing_manager.get_willing(chat_stream=chat)
# # current_willing_new = (heartflow.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4
# # current_willing = (current_willing_old + current_willing_new) / 2
# # 有点bug
# current_willing = current_willing_old
# 获取回复概率 # 获取回复概率
is_willing = False is_willing = False
if reply_probability != 1: if reply_probability != 1:
@@ -255,6 +235,7 @@ class ThinkFlowChat:
do_reply = False do_reply = False
if random() < reply_probability: if random() < reply_probability:
try: try:
do_reply = True do_reply = True
# 回复前处理 # 回复前处理
@@ -262,49 +243,110 @@ class ThinkFlowChat:
# 创建思考消息 # 创建思考消息
try: try:
timer1 = time.time() with Timer("创建思考消息", timing_results):
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo) thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
timer2 = time.time()
timing_results["创建思考消息"] = timer2 - timer1
except Exception as e: except Exception as e:
logger.error(f"心流创建思考消息失败: {e}") logger.error(f"心流创建思考消息失败: {e}")
logger.debug(f"创建捕捉器thinking_id:{thinking_id}") logger.trace(f"创建捕捉器thinking_id:{thinking_id}")
info_catcher = info_catcher_manager.get_info_catcher(thinking_id) info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
info_catcher.catch_decide_to_response(message) info_catcher.catch_decide_to_response(message)
try:
# 观察 # 观察
timer1 = time.time() try:
with Timer("观察", timing_results):
await heartflow.get_subheartflow(chat.stream_id).do_observe() await heartflow.get_subheartflow(chat.stream_id).do_observe()
timer2 = time.time()
timing_results["观察"] = timer2 - timer1
except Exception as e: except Exception as e:
logger.error(f"心流观察失败: {e}") logger.error(f"心流观察失败: {e}")
logger.error(traceback.format_exc())
info_catcher.catch_after_observe(timing_results["观察"]) info_catcher.catch_after_observe(timing_results["观察"])
# 思考前使用工具
update_relationship = ""
get_mid_memory_id = []
tool_result_info = {}
send_emoji = ""
try:
with Timer("思考前使用工具", timing_results):
tool_result = await self.tool_user.use_tool(
message.processed_plain_text,
message.message_info.user_info.user_nickname,
chat,
heartflow.get_subheartflow(chat.stream_id),
)
# 如果工具被使用且获得了结果,将收集到的信息合并到思考中
# collected_info = ""
if tool_result.get("used_tools", False):
if "structured_info" in tool_result:
tool_result_info = tool_result["structured_info"]
# collected_info = ""
get_mid_memory_id = []
update_relationship = ""
# 动态解析工具结果
for tool_name, tool_data in tool_result_info.items():
# tool_result_info += f"\n{tool_name} 相关信息:\n"
# for item in tool_data:
# tool_result_info += f"- {item['name']}: {item['content']}\n"
# 特殊判定mid_chat_mem
if tool_name == "mid_chat_mem":
for mid_memory in tool_data:
get_mid_memory_id.append(mid_memory["content"])
# 特殊判定change_mood
if tool_name == "change_mood":
for mood in tool_data:
self.mood_manager.update_mood_from_emotion(
mood["content"], global_config.mood_intensity_factor
)
# 特殊判定change_relationship
if tool_name == "change_relationship":
update_relationship = tool_data[0]["content"]
if tool_name == "send_emoji":
send_emoji = tool_data[0]["content"]
except Exception as e:
logger.error(f"思考前工具调用失败: {e}")
logger.error(traceback.format_exc())
# 处理关系更新
if update_relationship:
stance, emotion = await self.gpt._get_emotion_tags_with_reason(
"你还没有回复", message.processed_plain_text, update_relationship
)
await relationship_manager.calculate_update_relationship_value(
chat_stream=message.chat_stream, label=emotion, stance=stance
)
# 思考前脑内状态 # 思考前脑内状态
try: try:
timer1 = time.time() with Timer("思考前脑内状态", timing_results):
current_mind, past_mind = await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply( current_mind, past_mind = await heartflow.get_subheartflow(
chat.stream_id
).do_thinking_before_reply(
message_txt=message.processed_plain_text, message_txt=message.processed_plain_text,
sender_name=message.message_info.user_info.user_nickname, sender_info=message.message_info.user_info,
chat_stream=chat, chat_stream=chat,
obs_id=get_mid_memory_id,
extra_info=tool_result_info,
) )
timer2 = time.time()
timing_results["思考前脑内状态"] = timer2 - timer1
except Exception as e: except Exception as e:
logger.error(f"心流思考前脑内状态失败: {e}") logger.error(f"心流思考前脑内状态失败: {e}")
logger.error(traceback.format_exc())
# 确保变量被定义,即使在错误情况下
current_mind = ""
past_mind = ""
info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"], past_mind, current_mind) info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"], past_mind, current_mind)
# 生成回复 # 生成回复
timer1 = time.time() with Timer("生成回复", timing_results):
response_set = await self.gpt.generate_response(message, thinking_id) response_set = await self.gpt.generate_response(message, thinking_id)
timer2 = time.time()
timing_results["生成回复"] = timer2 - timer1
info_catcher.catch_after_generate_response(timing_results["生成回复"]) info_catcher.catch_after_generate_response(timing_results["生成回复"])
@@ -314,10 +356,8 @@ class ThinkFlowChat:
# 发送消息 # 发送消息
try: try:
timer1 = time.time() with Timer("发送消息", timing_results):
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id) first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
timer2 = time.time()
timing_results["发送消息"] = timer2 - timer1
except Exception as e: except Exception as e:
logger.error(f"心流发送消息失败: {e}") logger.error(f"心流发送消息失败: {e}")
@@ -327,34 +367,70 @@ class ThinkFlowChat:
# 处理表情包 # 处理表情包
try: try:
timer1 = time.time() with Timer("处理表情包", timing_results):
if global_config.emoji_chance == 1:
if send_emoji:
logger.info(f"麦麦决定发送表情包{send_emoji}")
await self._handle_emoji(message, chat, response_set, send_emoji)
else:
if random() < global_config.emoji_chance:
await self._handle_emoji(message, chat, response_set) await self._handle_emoji(message, chat, response_set)
timer2 = time.time()
timing_results["处理表情包"] = timer2 - timer1
except Exception as e: except Exception as e:
logger.error(f"心流处理表情包失败: {e}") logger.error(f"心流处理表情包失败: {e}")
# 更新心流 # 思考后脑内状态更新
try: try:
timer1 = time.time() with Timer("思考后脑内状态更新", timing_results):
await self._update_using_response(message, response_set) stream_id = message.chat_stream.stream_id
timer2 = time.time() chat_talking_prompt = ""
timing_results["更新心流"] = timer2 - timer1 if stream_id:
except Exception as e: chat_talking_prompt = get_recent_group_detailed_plain_text(
logger.error(f"心流更新失败: {e}") stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
)
# 更新关系情绪 await heartflow.get_subheartflow(stream_id).do_thinking_after_reply(
try: response_set, chat_talking_prompt, tool_result_info
timer1 = time.time() )
await self._update_relationship(message, response_set)
timer2 = time.time()
timing_results["更新关系情绪"] = timer2 - timer1
except Exception as e: except Exception as e:
logger.error(f"心流更新关系情绪失败: {e}") logger.error(f"心流思考后脑内状态更新失败: {e}")
logger.error(traceback.format_exc())
# 回复后处理 # 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id) await willing_manager.after_generate_reply_handle(message.message_info.message_id)
# 处理认识关系
try:
is_known = await relationship_manager.is_known_some_one(
message.message_info.platform,
message.message_info.user_info.user_id
)
if not is_known:
logger.info(f"首次认识用户: {message.message_info.user_info.user_nickname}")
await relationship_manager.first_knowing_some_one(
message.message_info.platform,
message.message_info.user_info.user_id,
message.message_info.user_info.user_nickname,
message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
""
)
else:
logger.debug(f"已认识用户: {message.message_info.user_info.user_nickname}")
if not await relationship_manager.is_qved_name(
message.message_info.platform,
message.message_info.user_info.user_id
):
logger.info(f"更新已认识但未取名的用户: {message.message_info.user_info.user_nickname}")
await relationship_manager.first_knowing_some_one(
message.message_info.platform,
message.message_info.user_info.user_id,
message.message_info.user_info.user_nickname,
message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
""
)
except Exception as e:
logger.error(f"处理认识关系失败: {e}")
logger.error(traceback.format_exc())
except Exception as e: except Exception as e:
logger.error(f"心流处理消息失败: {e}") logger.error(f"心流处理消息失败: {e}")
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())

View File

@@ -1,4 +1,3 @@
import time
from typing import List, Optional from typing import List, Optional
import random import random
@@ -10,6 +9,7 @@ from .think_flow_prompt_builder import prompt_builder
from ...chat.utils import process_llm_response from ...chat.utils import process_llm_response
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from ...utils.timer_calculater import Timer
from src.plugins.moods.moods import MoodManager from src.plugins.moods.moods import MoodManager
@@ -26,7 +26,10 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator: class ResponseGenerator:
def __init__(self): def __init__(self):
self.model_normal = LLM_request( self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=0.15, max_tokens=256, request_type="response_heartflow" model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_heartflow",
) )
self.model_sum = LLM_request( self.model_sum = LLM_request(
@@ -44,13 +47,14 @@ class ResponseGenerator:
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier() arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
time1 = time.time() with Timer() as t_generate_response:
checked = False checked = False
if random.random() > 0: if random.random() > 0:
checked = False checked = False
current_model = self.model_normal current_model = self.model_normal
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高 current_model.temperature = (
global_config.llm_normal["temp"] * arousal_multiplier
) # 激活度越高,温度越高
model_response = await self._generate_response_with_model( model_response = await self._generate_response_with_model(
message, current_model, thinking_id, mode="normal" message, current_model, thinking_id, mode="normal"
) )
@@ -59,26 +63,28 @@ class ResponseGenerator:
else: else:
checked = True checked = True
current_model = self.model_normal current_model = self.model_normal
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高 current_model.temperature = (
global_config.llm_normal["temp"] * arousal_multiplier
) # 激活度越高,温度越高
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}") print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
model_response = await self._generate_response_with_model( model_response = await self._generate_response_with_model(
message, current_model, thinking_id, mode="simple" message, current_model, thinking_id, mode="simple"
) )
current_model.temperature = 0.3 current_model.temperature = global_config.llm_normal["temp"]
model_checked_response = await self._check_response_with_model( model_checked_response = await self._check_response_with_model(
message, model_response, current_model, thinking_id message, model_response, current_model, thinking_id
) )
time2 = time.time()
if model_response: if model_response:
if checked: if checked:
logger.info( logger.info(
f"{global_config.BOT_NICKNAME}的回复是:{model_response},思忖后,回复是:{model_checked_response},生成回复时间: {time2 - time1}" f"{global_config.BOT_NICKNAME}的回复是:{model_response},思忖后,回复是:{model_checked_response},生成回复时间: {t_generate_response.human_readable}"
) )
else: else:
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response},生成回复时间: {time2 - time1}") logger.info(
f"{global_config.BOT_NICKNAME}的回复是:{model_response},生成回复时间: {t_generate_response.human_readable}"
)
model_processed_response = await self._process_response(model_checked_response) model_processed_response = await self._process_response(model_checked_response)
@@ -94,18 +100,20 @@ class ResponseGenerator:
info_catcher = info_catcher_manager.get_info_catcher(thinking_id) info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname: # if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
sender_name = ( # sender_name = (
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]" # f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
f"{message.chat_stream.user_info.user_cardname}" # f"{message.chat_stream.user_info.user_cardname}"
) # )
elif message.chat_stream.user_info.user_nickname: # elif message.chat_stream.user_info.user_nickname:
sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}" # sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
else: # else:
sender_name = f"用户({message.chat_stream.user_info.user_id})" # sender_name = f"用户({message.chat_stream.user_info.user_id})"
sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
# 构建prompt # 构建prompt
timer1 = time.time() with Timer() as t_build_prompt:
if mode == "normal": if mode == "normal":
prompt = await prompt_builder._build_prompt( prompt = await prompt_builder._build_prompt(
message.chat_stream, message.chat_stream,
@@ -113,15 +121,7 @@ class ResponseGenerator:
sender_name=sender_name, sender_name=sender_name,
stream_id=message.chat_stream.stream_id, stream_id=message.chat_stream.stream_id,
) )
elif mode == "simple": logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
prompt = await prompt_builder._build_prompt_simple(
message.chat_stream,
message_txt=message.processed_plain_text,
sender_name=sender_name,
stream_id=message.chat_stream.stream_id,
)
timer2 = time.time()
logger.info(f"构建{mode}prompt时间: {timer2 - timer1}")
try: try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt) content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
@@ -136,50 +136,6 @@ class ResponseGenerator:
return content return content
async def _check_response_with_model(
self, message: MessageRecv, content: str, model: LLM_request, thinking_id: str
) -> str:
_info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
sender_name = ""
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
sender_name = (
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
f"{message.chat_stream.user_info.user_cardname}"
)
elif message.chat_stream.user_info.user_nickname:
sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
else:
sender_name = f"用户({message.chat_stream.user_info.user_id})"
# 构建prompt
timer1 = time.time()
prompt = await prompt_builder._build_prompt_check_response(
message.chat_stream,
message_txt=message.processed_plain_text,
sender_name=sender_name,
stream_id=message.chat_stream.stream_id,
content=content,
)
timer2 = time.time()
logger.info(f"构建check_prompt: {prompt}")
logger.info(f"构建check_prompt时间: {timer2 - timer1}")
try:
checked_content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
# info_catcher.catch_after_llm_generated(
# prompt=prompt,
# response=content,
# reasoning_content=reasoning_content,
# model_name=self.current_model_name)
except Exception:
logger.exception("检查回复时出错")
return None
return checked_content
async def _get_emotion_tags(self, content: str, processed_plain_text: str): async def _get_emotion_tags(self, content: str, processed_plain_text: str):
"""提取情感标签,结合立场和情绪""" """提取情感标签,结合立场和情绪"""
try: try:
@@ -229,6 +185,57 @@ class ResponseGenerator:
logger.debug(f"获取情感标签时出错: {e}") logger.debug(f"获取情感标签时出错: {e}")
return "中立", "平静" # 出错时返回默认值 return "中立", "平静" # 出错时返回默认值
async def _get_emotion_tags_with_reason(self, content: str, processed_plain_text: str, reason: str):
"""提取情感标签,结合立场和情绪"""
try:
# 构建提示词,结合回复内容、被回复的内容以及立场分析
prompt = f"""
请严格根据以下对话内容,完成以下任务:
1. 判断回复者对被回复者观点的直接立场:
- "支持":明确同意或强化被回复者观点
- "反对":明确反驳或否定被回复者观点
- "中立":不表达明确立场或无关回应
2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒"
4. 考虑回复者的人格设定为{global_config.personality_core}
对话示例:
被回复「A就是笨」
回复「A明明很聪明」 → 反对-愤怒
当前对话:
被回复:「{processed_plain_text}
回复:「{content}
原因:「{reason}
输出要求:
- 只需输出"立场-情绪"结果,不要解释
- 严格基于文字直接表达的对立关系判断
"""
# 调用模型生成结果
result, _, _ = await self.model_sum.generate_response(prompt)
result = result.strip()
# 解析模型输出的结果
if "-" in result:
stance, emotion = result.split("-", 1)
valid_stances = ["支持", "反对", "中立"]
valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"]
if stance in valid_stances and emotion in valid_emotions:
return stance, emotion # 返回有效的立场-情绪组合
else:
logger.debug(f"无效立场-情感组合:{result}")
return "中立", "平静" # 默认返回中立-平静
else:
logger.debug(f"立场-情感格式错误:{result}")
return "中立", "平静" # 格式错误时返回默认值
except Exception as e:
logger.debug(f"获取情感标签时出错: {e}")
return "中立", "平静" # 出错时返回默认值
async def _process_response(self, content: str) -> List[str]: async def _process_response(self, content: str) -> List[str]:
"""处理响应内容,返回处理后的内容和情感标签""" """处理响应内容,返回处理后的内容和情感标签"""
if not content: if not content:

View File

@@ -8,7 +8,8 @@ from src.common.logger import get_module_logger
from ....individuality.individuality import Individuality from ....individuality.individuality import Individuality
from src.heart_flow.heartflow import heartflow from src.heart_flow.heartflow import heartflow
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.chat.utils import parse_text_timestamps
logger = get_module_logger("prompt") logger = get_module_logger("prompt")
@@ -43,7 +44,7 @@ def init_prompt():
{chat_talking_prompt} {chat_talking_prompt}
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n 现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
你刚刚脑子里在想:{current_mind_info} 你刚刚脑子里在想:{current_mind_info}
现在请你读读之前的聊天记录,然后给出日常,口语化且简短的回复内容,只给出文字的回复内容,不要有内心独白: 现在请你读读之前的聊天记录,然后给出日常,口语化且简短的回复内容,请只对一个话题进行回复,只给出文字的回复内容,不要有内心独白:
""", """,
"heart_flow_prompt_simple", "heart_flow_prompt_simple",
) )
@@ -52,7 +53,7 @@ def init_prompt():
你的名字叫{bot_name}{prompt_identity} 你的名字叫{bot_name}{prompt_identity}
{chat_target},你希望在群里回复:{content}。现在请你根据以下信息修改回复内容。将这个回复修改的更加日常且口语化的回复,平淡一些,回复尽量简短一些。不要回复的太有条理。 {chat_target},你希望在群里回复:{content}。现在请你根据以下信息修改回复内容。将这个回复修改的更加日常且口语化的回复,平淡一些,回复尽量简短一些。不要回复的太有条理。
{prompt_ger},不要刻意突出自身学科背景,注意只输出回复内容。 {prompt_ger},不要刻意突出自身学科背景,注意只输出回复内容。
{moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", {moderation_prompt}。注意:不要输出多余内容(包括前后缀冒号和引号at或 @等 )。""",
"heart_flow_prompt_response", "heart_flow_prompt_response",
) )
@@ -128,7 +129,7 @@ class PromptBuilder:
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。 # moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。""" # 涉及政治敏感以及违法违规的内容请规避。"""
logger.info("开始构建prompt") logger.debug("开始构建prompt")
# prompt = f""" # prompt = f"""
# {chat_target} # {chat_target}
@@ -161,6 +162,9 @@ class PromptBuilder:
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
) )
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
prompt = parse_text_timestamps(prompt, mode="lite")
return prompt return prompt
async def _build_prompt_simple( async def _build_prompt_simple(
@@ -206,7 +210,7 @@ class PromptBuilder:
) )
keywords_reaction_prompt += rule.get("reaction", "") + "" keywords_reaction_prompt += rule.get("reaction", "") + ""
logger.info("开始构建prompt") logger.debug("开始构建prompt")
# prompt = f""" # prompt = f"""
# 你的名字叫{global_config.BOT_NICKNAME}{prompt_personality}。 # 你的名字叫{global_config.BOT_NICKNAME}{prompt_personality}。
@@ -257,7 +261,7 @@ class PromptBuilder:
# moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。 # moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
# 涉及政治敏感以及违法违规的内容请规避。""" # 涉及政治敏感以及违法违规的内容请规避。"""
logger.info("开始构建check_prompt") logger.debug("开始构建check_prompt")
# prompt = f""" # prompt = f"""
# 你的名字叫{global_config.BOT_NICKNAME}{prompt_identity}。 # 你的名字叫{global_config.BOT_NICKNAME}{prompt_identity}。

View File

@@ -27,8 +27,8 @@ logger = get_module_logger("config", config=config_config)
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = True is_test = True
mai_version_main = "0.6.2" mai_version_main = "0.6.3"
mai_version_fix = "snapshot-2" mai_version_fix = "snapshot-1"
if mai_version_fix: if mai_version_fix:
if is_test: if is_test:
@@ -196,6 +196,9 @@ class BotConfig:
sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒 sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒 sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒 heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
# willing # willing
willing_mode: str = "classical" # 意愿模式 willing_mode: str = "classical" # 意愿模式
@@ -253,8 +256,8 @@ class BotConfig:
chinese_typo_tone_error_rate = 0.2 # 声调错误概率 chinese_typo_tone_error_rate = 0.2 # 声调错误概率
chinese_typo_word_replace_rate = 0.02 # 整词替换概率 chinese_typo_word_replace_rate = 0.02 # 整词替换概率
# response_spliter # response_splitter
enable_response_spliter = True # 是否启用回复分割器 enable_response_splitter = True # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度 response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 3 # 回复允许的最大句子数 response_max_sentence_num = 3 # 回复允许的最大句子数
@@ -440,6 +443,14 @@ class BotConfig:
config.heart_flow_update_interval = heartflow_config.get( config.heart_flow_update_interval = heartflow_config.get(
"heart_flow_update_interval", config.heart_flow_update_interval "heart_flow_update_interval", config.heart_flow_update_interval
) )
if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
config.observation_context_size = heartflow_config.get(
"observation_context_size", config.observation_context_size
)
config.compressed_length = heartflow_config.get("compressed_length", config.compressed_length)
config.compress_length_limit = heartflow_config.get(
"compress_length_limit", config.compress_length_limit
)
def willing(parent: dict): def willing(parent: dict):
willing_config = parent["willing"] willing_config = parent["willing"]
@@ -477,7 +488,7 @@ class BotConfig:
"llm_emotion_judge", "llm_emotion_judge",
"vlm", "vlm",
"embedding", "embedding",
"moderation", "llm_tool_use",
"llm_observation", "llm_observation",
"llm_sub_heartflow", "llm_sub_heartflow",
"llm_heartflow", "llm_heartflow",
@@ -489,7 +500,15 @@ class BotConfig:
# base_url 的例子: SILICONFLOW_BASE_URL # base_url 的例子: SILICONFLOW_BASE_URL
# key 的例子: SILICONFLOW_KEY # key 的例子: SILICONFLOW_KEY
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0} cfg_target = {
"name": "",
"base_url": "",
"key": "",
"stream": False,
"pri_in": 0,
"pri_out": 0,
"temp": 0.7,
}
if config.INNER_VERSION in SpecifierSet("<=0.0.0"): if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
cfg_target = cfg_item cfg_target = cfg_item
@@ -502,6 +521,7 @@ class BotConfig:
stable_item.append("stream") stable_item.append("stream")
pricing_item = ["pri_in", "pri_out"] pricing_item = ["pri_in", "pri_out"]
# 从配置中原始拷贝稳定字段 # 从配置中原始拷贝稳定字段
for i in stable_item: for i in stable_item:
# 如果 字段 属于计费项 且获取不到,那默认值是 0 # 如果 字段 属于计费项 且获取不到,那默认值是 0
@@ -519,6 +539,13 @@ class BotConfig:
logger.error(f"{item} 中的必要字段不存在,请检查") logger.error(f"{item} 中的必要字段不存在,请检查")
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
# 如果配置中有temp参数就使用配置中的值
if "temp" in cfg_item:
cfg_target["temp"] = cfg_item["temp"]
else:
# 如果没有temp参数就删除默认值
cfg_target.pop("temp", None)
provider = cfg_item.get("provider") provider = cfg_item.get("provider")
if provider is None: if provider is None:
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查") logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
@@ -604,13 +631,13 @@ class BotConfig:
"word_replace_rate", config.chinese_typo_word_replace_rate "word_replace_rate", config.chinese_typo_word_replace_rate
) )
def response_spliter(parent: dict): def response_splitter(parent: dict):
response_spliter_config = parent["response_spliter"] response_splitter_config = parent["response_splitter"]
config.enable_response_spliter = response_spliter_config.get( config.enable_response_splitter = response_splitter_config.get(
"enable_response_spliter", config.enable_response_spliter "enable_response_splitter", config.enable_response_splitter
) )
config.response_max_length = response_spliter_config.get("response_max_length", config.response_max_length) config.response_max_length = response_splitter_config.get("response_max_length", config.response_max_length)
config.response_max_sentence_num = response_spliter_config.get( config.response_max_sentence_num = response_splitter_config.get(
"response_max_sentence_num", config.response_max_sentence_num "response_max_sentence_num", config.response_max_sentence_num
) )
@@ -664,7 +691,7 @@ class BotConfig:
"keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False}, "keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False},
"chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False}, "chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False},
"platforms": {"func": platforms, "support": ">=1.0.0"}, "platforms": {"func": platforms, "support": ">=1.0.0"},
"response_spliter": {"func": response_spliter, "support": ">=0.0.11", "necessary": False}, "response_splitter": {"func": response_splitter, "support": ">=0.0.11", "necessary": False},
"experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False}, "experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False},
"heartflow": {"func": heartflow, "support": ">=1.0.2", "necessary": False}, "heartflow": {"func": heartflow, "support": ">=1.0.2", "necessary": False},
} }

View File

@@ -436,7 +436,7 @@ class Hippocampus:
activation_values[neighbor] = new_activation activation_values[neighbor] = new_activation
visited_nodes.add(neighbor) visited_nodes.add(neighbor)
nodes_to_process.append((neighbor, new_activation, current_depth + 1)) nodes_to_process.append((neighbor, new_activation, current_depth + 1))
logger.debug( logger.trace(
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})" f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
) # noqa: E501 ) # noqa: E501
@@ -1144,7 +1144,7 @@ class Hippocampus:
activation_values[neighbor] = new_activation activation_values[neighbor] = new_activation
visited_nodes.add(neighbor) visited_nodes.add(neighbor)
nodes_to_process.append((neighbor, new_activation, current_depth + 1)) nodes_to_process.append((neighbor, new_activation, current_depth + 1))
logger.debug( logger.trace(
f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})" f"节点 '{neighbor}' 被激活,激活值: {new_activation:.2f} (通过 '{current_node}' 连接,强度: {strength}, 深度: {current_depth + 1})"
) # noqa: E501 ) # noqa: E501
@@ -1301,7 +1301,7 @@ class Hippocampus:
# 对每个关键词进行扩散式检索 # 对每个关键词进行扩散式检索
for keyword in valid_keywords: for keyword in valid_keywords:
logger.debug(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):") logger.trace(f"开始以关键词 '{keyword}' 为中心进行扩散检索 (最大深度: {max_depth}):")
# 初始化激活值 # 初始化激活值
activation_values = {keyword: 1.0} activation_values = {keyword: 1.0}
# 记录已访问的节点 # 记录已访问的节点
@@ -1352,7 +1352,7 @@ class Hippocampus:
# 计算激活节点数与总节点数的比值 # 计算激活节点数与总节点数的比值
total_activation = sum(activate_map.values()) total_activation = sum(activate_map.values())
logger.info(f"总激活值: {total_activation:.2f}") logger.trace(f"总激活值: {total_activation:.2f}")
total_nodes = len(self.memory_graph.G.nodes()) total_nodes = len(self.memory_graph.G.nodes())
# activated_nodes = len(activate_map) # activated_nodes = len(activate_map)
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0 activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0

View File

@@ -98,7 +98,7 @@ class LLM_request:
"timestamp": datetime.now(), "timestamp": datetime.now(),
} }
db.llm_usage.insert_one(usage_data) db.llm_usage.insert_one(usage_data)
logger.debug( logger.trace(
f"Token使用情况 - 模型: {self.model_name}, " f"Token使用情况 - 模型: {self.model_name}, "
f"用户: {user_id}, 类型: {request_type}, " f"用户: {user_id}, 类型: {request_type}, "
f"提示词: {prompt_tokens}, 完成: {completion_tokens}, " f"提示词: {prompt_tokens}, 完成: {completion_tokens}, "

View File

@@ -6,6 +6,9 @@ from typing import Any, Callable, Dict
import datetime import datetime
import asyncio import asyncio
import numpy as np import numpy as np
from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config
from src.individuality.individuality import Individuality
import matplotlib import matplotlib
@@ -13,6 +16,8 @@ matplotlib.use("Agg")
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from pathlib import Path from pathlib import Path
import pandas as pd import pandas as pd
import json
import re
""" """
@@ -32,6 +37,8 @@ logger = get_module_logger("person_info")
person_info_default = { person_info_default = {
"person_id": None, "person_id": None,
"person_name": None,
"name_reason": None,
"platform": None, "platform": None,
"user_id": None, "user_id": None,
"nickname": None, "nickname": None,
@@ -48,16 +55,46 @@ person_info_default = {
class PersonInfoManager: class PersonInfoManager:
def __init__(self): def __init__(self):
self.person_name_list = {}
self.qv_name_llm = LLM_request(
model=global_config.llm_normal,
max_tokens=256,
request_type="qv_name",
)
if "person_info" not in db.list_collection_names(): if "person_info" not in db.list_collection_names():
db.create_collection("person_info") db.create_collection("person_info")
db.person_info.create_index("person_id", unique=True) db.person_info.create_index("person_id", unique=True)
# 初始化时读取所有person_name
cursor = db.person_info.find(
{"person_name": {"$exists": True}},
{"person_id": 1, "person_name": 1, "_id": 0}
)
for doc in cursor:
if doc.get("person_name"):
self.person_name_list[doc["person_id"]] = doc["person_name"]
logger.debug(f"已加载 {len(self.person_name_list)} 个用户名称")
def get_person_id(self, platform: str, user_id: int): def get_person_id(self, platform: str, user_id: int):
"""获取唯一id""" """获取唯一id"""
#如果platform中存在-,就截取-后面的部分
if "-" in platform:
platform = platform.split("-")[1]
components = [platform, str(user_id)] components = [platform, str(user_id)]
key = "_".join(components) key = "_".join(components)
return hashlib.md5(key.encode()).hexdigest() return hashlib.md5(key.encode()).hexdigest()
def is_person_known(self, platform: str, user_id: int):
"""判断是否认识某人"""
person_id = self.get_person_id(platform, user_id)
document = db.person_info.find_one({"person_id": person_id})
if document:
return True
else:
return False
async def create_person_info(self, person_id: str, data: dict = None): async def create_person_info(self, person_id: str, data: dict = None):
"""创建一个项""" """创建一个项"""
if not person_id: if not person_id:
@@ -89,6 +126,109 @@ class PersonInfoManager:
logger.debug(f"更新时{person_id}不存在,已新建") logger.debug(f"更新时{person_id}不存在,已新建")
await self.create_person_info(person_id, Data) await self.create_person_info(person_id, Data)
async def has_one_field(self, person_id: str, field_name: str):
"""判断是否存在某一个字段"""
document = db.person_info.find_one({"person_id": person_id}, {field_name: 1})
if document:
return True
else:
return False
def _extract_json_from_text(self, text: str) -> dict:
"""从文本中提取JSON数据的高容错方法"""
try:
# 尝试直接解析
return json.loads(text)
except json.JSONDecodeError:
try:
# 尝试找到JSON格式的部分
json_pattern = r'\{[^{}]*\}'
matches = re.findall(json_pattern, text)
if matches:
return json.loads(matches[0])
# 如果上面都失败了,尝试提取键值对
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
nickname_match = re.search(nickname_pattern, text)
reason_match = re.search(reason_pattern, text)
if nickname_match:
return {
"nickname": nickname_match.group(1),
"reason": reason_match.group(1) if reason_match else "未提供理由"
}
except Exception as e:
logger.error(f"JSON提取失败: {str(e)}")
# 如果所有方法都失败了,返回空结果
return {"nickname": "", "reason": ""}
async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
"""给某个用户取名"""
if not person_id:
logger.debug("取名失败person_id不能为空")
return
old_name = await self.get_value(person_id, "person_name")
old_reason = await self.get_value(person_id, "name_reason")
max_retries = 5 # 最大重试次数
current_try = 0
existing_names = ""
while current_try < max_retries:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
bot_name = individuality.personality.bot_nickname
qv_name_prompt = f"你是{bot_name},你{prompt_personality}"
qv_name_prompt += f"现在你想给一个用户取一个昵称用户是的qq昵称是{user_nickname}"
qv_name_prompt += f"用户的qq群昵称名是{user_cardname}"
if user_avatar:
qv_name_prompt += f"用户的qq头像是{user_avatar}"
if old_name:
qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason}"
qv_name_prompt += "\n请根据以上用户信息想想你叫他什么比较好请最好使用用户的qq昵称可以稍作修改"
if existing_names:
qv_name_prompt += f"\n请注意,以下名称已被使用,不要使用以下昵称:{existing_names}\n"
qv_name_prompt += "请用json给出你的想法并给出理由示例如下"
qv_name_prompt += '''{
"nickname": "昵称",
"reason": "理由"
}'''
logger.debug(f"取名提示词:{qv_name_prompt}")
response = await self.qv_name_llm.generate_response(qv_name_prompt)
logger.debug(f"取名回复:{response}")
result = self._extract_json_from_text(response[0])
if not result["nickname"]:
logger.error("生成的昵称为空,重试中...")
current_try += 1
continue
# 检查生成的昵称是否已存在
if result["nickname"] not in self.person_name_list.values():
# 更新数据库和内存中的列表
await self.update_one_field(person_id, "person_name", result["nickname"])
# await self.update_one_field(person_id, "nickname", user_nickname)
# await self.update_one_field(person_id, "avatar", user_avatar)
await self.update_one_field(person_id, "name_reason", result["reason"])
self.person_name_list[person_id] = result["nickname"]
logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}")
return result
else:
existing_names += f"{result['nickname']}"
logger.debug(f"生成的昵称 {result['nickname']} 已存在,重试中...")
current_try += 1
logger.error(f"{max_retries}次尝试后仍未能生成唯一昵称")
return None
async def del_one_document(self, person_id: str): async def del_one_document(self, person_id: str):
"""删除指定 person_id 的文档""" """删除指定 person_id 的文档"""
if not person_id: if not person_id:
@@ -117,7 +257,7 @@ class PersonInfoManager:
return document[field_name] return document[field_name]
else: else:
default_value = copy.deepcopy(person_info_default[field_name]) default_value = copy.deepcopy(person_info_default[field_name])
logger.debug(f"获取{person_id}{field_name}失败,已返回默认值{default_value}") logger.trace(f"获取{person_id}{field_name}失败,已返回默认值{default_value}")
return default_value return default_value
async def get_values(self, person_id: str, field_names: list) -> dict: async def get_values(self, person_id: str, field_names: list) -> dict:
@@ -264,17 +404,17 @@ class PersonInfoManager:
msg_interval = int(round(np.percentile(filtered, 80))) msg_interval = int(round(np.percentile(filtered, 80)))
await self.update_one_field(person_id, "msg_interval", msg_interval) await self.update_one_field(person_id, "msg_interval", msg_interval)
logger.debug(f"用户{person_id}的msg_interval已经被更新为{msg_interval}") logger.trace(f"用户{person_id}的msg_interval已经被更新为{msg_interval}")
except Exception as e: except Exception as e:
logger.debug(f"用户{person_id}消息间隔计算失败: {type(e).__name__}: {str(e)}") logger.trace(f"用户{person_id}消息间隔计算失败: {type(e).__name__}: {str(e)}")
continue continue
# 其他... # 其他...
if msg_interval_map: if msg_interval_map:
logger.info("已保存分布图到: logs/person_info") logger.trace("已保存分布图到: logs/person_info")
current_time = datetime.datetime.now() current_time = datetime.datetime.now()
logger.info(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}") logger.trace(f"个人信息推断结束: {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
await asyncio.sleep(86400) await asyncio.sleep(86400)
except Exception as e: except Exception as e:

View File

@@ -4,6 +4,8 @@ import math
from bson.decimal128 import Decimal128 from bson.decimal128 import Decimal128
from .person_info import person_info_manager from .person_info import person_info_manager
import time import time
import re
import traceback
relationship_config = LogConfig( relationship_config = LogConfig(
# 使用关系专用样式 # 使用关系专用样式
@@ -75,7 +77,62 @@ class RelationshipManager:
else: else:
return mood_value / coefficient return mood_value / coefficient
async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> None: async def is_known_some_one(self, platform , user_id):
"""判断是否认识某人"""
is_known = person_info_manager.is_person_known(platform, user_id)
return is_known
async def is_qved_name(self, platform , user_id):
"""判断是否认识某人"""
person_id = person_info_manager.get_person_id(platform, user_id)
is_qved = await person_info_manager.has_one_field(person_id, "person_name")
old_name = await person_info_manager.get_value(person_id, "person_name")
print(f"old_name: {old_name}")
print(f"is_qved: {is_qved}")
if is_qved and old_name != None:
return True
else:
return False
async def first_knowing_some_one(self, platform , user_id, user_nickname, user_cardname, user_avatar):
"""判断是否认识某人"""
person_id = person_info_manager.get_person_id(platform,user_id)
await person_info_manager.update_one_field(person_id, "nickname", user_nickname)
# await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname)
# await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar)
await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar)
async def convert_all_person_sign_to_person_name(self,input_text:str):
"""将所有人的<platform:user_id:nickname:cardname>格式转换为person_name"""
try:
# 使用正则表达式匹配<platform:user_id:nickname:cardname>格式
all_person = person_info_manager.person_name_list
pattern = r'<([^:]+):(\d+):([^:]+):([^>]+)>'
matches = re.findall(pattern, input_text)
# 遍历匹配结果,将<platform:user_id:nickname:cardname>替换为person_name
result_text = input_text
for platform, user_id, nickname, cardname in matches:
person_id = person_info_manager.get_person_id(platform, user_id)
# 默认使用昵称作为人名
person_name = nickname.strip() if nickname.strip() else cardname.strip()
if person_id in all_person:
if all_person[person_id] != None:
person_name = all_person[person_id]
print(f"将<{platform}:{user_id}:{nickname}:{cardname}>替换为{person_name}")
result_text = result_text.replace(f"<{platform}:{user_id}:{nickname}:{cardname}>", person_name)
return result_text
except Exception as e:
logger.error(traceback.format_exc())
return input_text
async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> tuple:
"""计算并变更关系值 """计算并变更关系值
新的关系值变更计算方式: 新的关系值变更计算方式:
将关系值限定在-1000到1000 将关系值限定在-1000到1000
@@ -84,6 +141,10 @@ class RelationshipManager:
2.关系越差,改善越难,关系越好,恶化越容易 2.关系越差,改善越难,关系越好,恶化越容易
3.人维护关系的精力往往有限,所以当高关系值用户越多,对于中高关系值用户增长越慢 3.人维护关系的精力往往有限,所以当高关系值用户越多,对于中高关系值用户增长越慢
4.连续正面或负面情感会正反馈 4.连续正面或负面情感会正反馈
返回:
用户昵称,变更值,变更后关系等级
""" """
stancedict = { stancedict = {
"支持": 0, "支持": 0,
@@ -147,6 +208,7 @@ class RelationshipManager:
level_num = self.calculate_level_num(old_value + value) level_num = self.calculate_level_num(old_value + value)
relationship_level = ["厌恶", "冷漠", "一般", "友好", "喜欢", "暧昧"] relationship_level = ["厌恶", "冷漠", "一般", "友好", "喜欢", "暧昧"]
logger.info( logger.info(
f"用户: {chat_stream.user_info.user_nickname}"
f"当前关系: {relationship_level[level_num]}, " f"当前关系: {relationship_level[level_num]}, "
f"关系值: {old_value:.2f}, " f"关系值: {old_value:.2f}, "
f"当前立场情感: {stance}-{label}, " f"当前立场情感: {stance}-{label}, "
@@ -155,6 +217,97 @@ class RelationshipManager:
await person_info_manager.update_one_field(person_id, "relationship_value", old_value + value, data) await person_info_manager.update_one_field(person_id, "relationship_value", old_value + value, data)
return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
async def calculate_update_relationship_value_with_reason(
self, chat_stream: ChatStream, label: str, stance: str, reason: str
) -> tuple:
"""计算并变更关系值
新的关系值变更计算方式:
将关系值限定在-1000到1000
对于关系值的变更,期望:
1.向两端逼近时会逐渐减缓
2.关系越差,改善越难,关系越好,恶化越容易
3.人维护关系的精力往往有限,所以当高关系值用户越多,对于中高关系值用户增长越慢
4.连续正面或负面情感会正反馈
返回:
用户昵称,变更值,变更后关系等级
"""
stancedict = {
"支持": 0,
"中立": 1,
"反对": 2,
}
valuedict = {
"开心": 1.5,
"愤怒": -2.0,
"悲伤": -0.5,
"惊讶": 0.6,
"害羞": 2.0,
"平静": 0.3,
"恐惧": -1.5,
"厌恶": -1.0,
"困惑": 0.5,
}
person_id = person_info_manager.get_person_id(chat_stream.user_info.platform, chat_stream.user_info.user_id)
data = {
"platform": chat_stream.user_info.platform,
"user_id": chat_stream.user_info.user_id,
"nickname": chat_stream.user_info.user_nickname,
"konw_time": int(time.time()),
}
old_value = await person_info_manager.get_value(person_id, "relationship_value")
old_value = self.ensure_float(old_value, person_id)
if old_value > 1000:
old_value = 1000
elif old_value < -1000:
old_value = -1000
value = valuedict[label]
if old_value >= 0:
if valuedict[label] >= 0 and stancedict[stance] != 2:
value = value * math.cos(math.pi * old_value / 2000)
if old_value > 500:
rdict = await person_info_manager.get_specific_value_list("relationship_value", lambda x: x > 700)
high_value_count = len(rdict)
if old_value > 700:
value *= 3 / (high_value_count + 2) # 排除自己
else:
value *= 3 / (high_value_count + 3)
elif valuedict[label] < 0 and stancedict[stance] != 0:
value = value * math.exp(old_value / 2000)
else:
value = 0
elif old_value < 0:
if valuedict[label] >= 0 and stancedict[stance] != 2:
value = value * math.exp(old_value / 2000)
elif valuedict[label] < 0 and stancedict[stance] != 0:
value = value * math.cos(math.pi * old_value / 2000)
else:
value = 0
self.positive_feedback_sys(label, stance)
value = self.mood_feedback(value)
level_num = self.calculate_level_num(old_value + value)
relationship_level = ["厌恶", "冷漠", "一般", "友好", "喜欢", "暧昧"]
logger.info(
f"用户: {chat_stream.user_info.user_nickname}"
f"当前关系: {relationship_level[level_num]}, "
f"关系值: {old_value:.2f}, "
f"当前立场情感: {stance}-{label}, "
f"变更: {value:+.5f}"
)
await person_info_manager.update_one_field(person_id, "relationship_value", old_value + value, data)
return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
async def build_relationship_info(self, person) -> str: async def build_relationship_info(self, person) -> str:
person_id = person_info_manager.get_person_id(person[0], person[1]) person_id = person_info_manager.get_person_id(person[0], person[1])
relationship_value = await person_info_manager.get_value(person_id, "relationship_value") relationship_value = await person_info_manager.get_value(person_id, "relationship_value")

View File

@@ -126,6 +126,7 @@ def main():
"""主函数,启动心跳线程""" """主函数,启动心跳线程"""
# 配置 # 配置
SERVER_URL = "http://hyybuth.xyz:10058" SERVER_URL = "http://hyybuth.xyz:10058"
# SERVER_URL = "http://localhost:10058"
HEARTBEAT_INTERVAL = 300 # 5分钟 HEARTBEAT_INTERVAL = 300 # 5分钟
# 创建并启动心跳线程 # 创建并启动心跳线程

View File

@@ -3,7 +3,7 @@ import re
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
import asyncio import asyncio
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
import traceback # import traceback
logger = get_module_logger("prompt_build") logger = get_module_logger("prompt_build")
@@ -94,14 +94,32 @@ global_prompt_manager = PromptManager()
class Prompt(str): class Prompt(str):
# 临时标记,作为类常量
_TEMP_LEFT_BRACE = "__ESCAPED_LEFT_BRACE__"
_TEMP_RIGHT_BRACE = "__ESCAPED_RIGHT_BRACE__"
@staticmethod
def _process_escaped_braces(template: str) -> str:
"""处理模板中的转义花括号,将 \{\} 替换为临时标记"""
return template.replace("\\{", Prompt._TEMP_LEFT_BRACE).replace("\\}", Prompt._TEMP_RIGHT_BRACE)
@staticmethod
def _restore_escaped_braces(template: str) -> str:
"""将临时标记还原为实际的花括号字符"""
return template.replace(Prompt._TEMP_LEFT_BRACE, "{").replace(Prompt._TEMP_RIGHT_BRACE, "}")
def __new__(cls, fstr: str, name: Optional[str] = None, args: Union[List[Any], tuple[Any, ...]] = None, **kwargs): def __new__(cls, fstr: str, name: Optional[str] = None, args: Union[List[Any], tuple[Any, ...]] = None, **kwargs):
# 如果传入的是元组,转换为列表 # 如果传入的是元组,转换为列表
if isinstance(args, tuple): if isinstance(args, tuple):
args = list(args) args = list(args)
should_register = kwargs.pop("_should_register", True) should_register = kwargs.pop("_should_register", True)
# 预处理模板中的转义花括号
processed_fstr = cls._process_escaped_braces(fstr)
# 解析模板 # 解析模板
template_args = [] template_args = []
result = re.findall(r"\{(.*?)\}", fstr) result = re.findall(r"\{(.*?)\}", processed_fstr)
for expr in result: for expr in result:
if expr and expr not in template_args: if expr and expr not in template_args:
template_args.append(expr) template_args.append(expr)
@@ -142,8 +160,11 @@ class Prompt(str):
@classmethod @classmethod
def _format_template(cls, template: str, args: List[Any] = None, kwargs: Dict[str, Any] = None) -> str: def _format_template(cls, template: str, args: List[Any] = None, kwargs: Dict[str, Any] = None) -> str:
# 预处理模板中的转义花括号
processed_template = cls._process_escaped_braces(template)
template_args = [] template_args = []
result = re.findall(r"\{(.*?)\}", template) result = re.findall(r"\{(.*?)\}", processed_template)
for expr in result: for expr in result:
if expr and expr not in template_args: if expr and expr not in template_args:
template_args.append(expr) template_args.append(expr)
@@ -177,13 +198,15 @@ class Prompt(str):
try: try:
# 先用位置参数格式化 # 先用位置参数格式化
if args: if args:
template = template.format(**formatted_args) processed_template = processed_template.format(**formatted_args)
# 再用关键字参数格式化 # 再用关键字参数格式化
if kwargs: if kwargs:
template = template.format(**formatted_kwargs) processed_template = processed_template.format(**formatted_kwargs)
return template
# 将临时标记还原为实际的花括号
result = cls._restore_escaped_braces(processed_template)
return result
except (IndexError, KeyError) as e: except (IndexError, KeyError) as e:
raise ValueError( raise ValueError(
f"格式化模板失败: {template}, args={formatted_args}, kwargs={formatted_kwargs} {str(e)}" f"格式化模板失败: {template}, args={formatted_args}, kwargs={formatted_kwargs} {str(e)}"

View File

@@ -138,6 +138,7 @@ class LLMStatistics:
# user_id = str(doc.get("user_info", {}).get("user_id", "unknown")) # user_id = str(doc.get("user_info", {}).get("user_id", "unknown"))
chat_info = doc.get("chat_info", {}) chat_info = doc.get("chat_info", {})
user_info = doc.get("user_info", {}) user_info = doc.get("user_info", {})
user_id = str(user_info.get("user_id", "unknown"))
message_time = doc.get("time", 0) message_time = doc.get("time", 0)
group_info = chat_info.get("group_info") if chat_info else {} group_info = chat_info.get("group_info") if chat_info else {}
# print(f"group_info: {group_info}") # print(f"group_info: {group_info}")

View File

@@ -0,0 +1,151 @@
from time import perf_counter
from functools import wraps
from typing import Optional, Dict, Callable
import asyncio
"""
# 更好的计时器
使用形式:
- 上下文
- 装饰器
- 直接实例化
使用场景:
- 使用Timer在需要测量代码执行时间时如性能测试、计时器工具Timer类是更可靠、高精度的选择。
- 使用time.time()的场景:当需要记录实际时间点(如日志、时间戳)时使用,但避免用它测量时间间隔。
使用方式:
【装饰器】
time_dict = {}
@Timer("计数", time_dict)
def func():
pass
print(time_dict)
【上下文_1】
def func():
with Timer() as t:
pass
print(t)
print(t.human_readable)
【上下文_2】
def func():
time_dict = {}
with Timer("计数", time_dict):
pass
print(time_dict)
【直接实例化】
a = Timer()
print(a) # 直接输出当前 perf_counter 值
参数:
- name计时器的名字默认为 None
- storage计时器结果存储字典默认为 None
- auto_unit自动选择单位毫秒或秒默认为 True自动根据时间切换毫秒或秒
- do_type_check是否进行类型检查默认为 False不进行类型检查
属性human_readable
自定义错误TimerTypeError
"""
class TimerTypeError(TypeError):
"""自定义类型错误"""
__slots__ = ()
def __init__(self, param, expected_type, actual_type):
super().__init__(f"参数 '{param}' 类型错误,期望 {expected_type},实际得到 {actual_type.__name__}")
class Timer:
"""
Timer 支持三种模式:
1. 装饰器模式:用于测量函数/协程运行时间
2. 上下文管理器模式:用于 with 语句块内部计时
3. 直接实例化:如果不调用 __enter__打印对象时将显示当前 perf_counter 的值
"""
__slots__ = ("name", "storage", "elapsed", "auto_unit", "start")
def __init__(
self,
name: Optional[str] = None,
storage: Optional[Dict[str, float]] = None,
auto_unit: bool = True,
do_type_check: bool = False,
):
if do_type_check:
self._validate_types(name, storage)
self.name = name
self.storage = storage
self.elapsed = None
self.auto_unit = auto_unit
self.start = None
def _validate_types(self, name, storage):
"""类型检查"""
if name is not None and not isinstance(name, str):
raise TimerTypeError("name", "Optional[str]", type(name))
if storage is not None and not isinstance(storage, dict):
raise TimerTypeError("storage", "Optional[dict]", type(storage))
def __call__(self, func: Optional[Callable] = None) -> Callable:
"""装饰器模式"""
if func is None:
return lambda f: Timer(name=self.name or f.__name__, storage=self.storage, auto_unit=self.auto_unit)(f)
@wraps(func)
async def async_wrapper(*args, **kwargs):
with self:
return await func(*args, **kwargs)
@wraps(func)
def sync_wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
wrapper = async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
wrapper.__timer__ = self # 保留计时器引用
return wrapper
def __enter__(self):
"""上下文管理器入口"""
self.start = perf_counter()
return self
def __exit__(self, *args):
self.elapsed = perf_counter() - self.start
self._record_time()
return False
def _record_time(self):
"""记录时间"""
if self.storage is not None and self.name:
self.storage[self.name] = self.elapsed
@property
def human_readable(self) -> str:
"""人类可读时间格式"""
if self.elapsed is None:
return "未计时"
if self.auto_unit:
return f"{self.elapsed * 1000:.2f}毫秒" if self.elapsed < 1 else f"{self.elapsed:.2f}"
return f"{self.elapsed:.4f}"
def __str__(self):
if self.start is not None:
if self.elapsed is None:
current_elapsed = perf_counter() - self.start
return f"<Timer {self.name or '匿名'} [计时中: {current_elapsed:.4f}秒]>"
return f"<Timer {self.name or '匿名'} [{self.human_readable}]>"
return f"{perf_counter()}"

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "1.2.6" version = "1.3.0"
#以下是给开发人员阅读的,一般用户不需要阅读 #以下是给开发人员阅读的,一般用户不需要阅读
@@ -60,7 +60,7 @@ appearance = "用几句话描述外貌特征" # 外貌特征
enable_schedule_gen = true # 是否启用日程表(尚未完成) enable_schedule_gen = true # 是否启用日程表(尚未完成)
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表" prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒 schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
schedule_temperature = 0.2 # 日程表温度建议0.2-0.5 schedule_temperature = 0.1 # 日程表温度建议0.1-0.5
time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程 time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程
[platforms] # 必填项目,填写每个平台适配器提供的链接 [platforms] # 必填项目,填写每个平台适配器提供的链接
@@ -77,12 +77,16 @@ model_v3_probability = 0.3 # 麦麦回答时选择次要回复模型2 模型的
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒 sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒 sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒 sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval = 300 # 心流更新频率,间隔 单位秒 heart_flow_update_interval = 600 # 心流更新频率,间隔 单位秒
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
[message] [message]
max_context_size = 12 # 麦麦获得的上文数量建议12太短太长都会导致脑袋尖尖 max_context_size = 12 # 麦麦获得的上文数量建议12太短太长都会导致脑袋尖尖
emoji_chance = 0.2 # 麦麦使用表情包的概率 emoji_chance = 0.2 # 麦麦使用表情包的概率设置为1让麦麦自己决定发不发
thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃 thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃
max_response_length = 256 # 麦麦回答的最大token数 max_response_length = 256 # 麦麦回答的最大token数
message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟 message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟
@@ -159,8 +163,8 @@ min_freq=9 # 最小字频阈值
tone_error_rate=0.1 # 声调错误概率 tone_error_rate=0.1 # 声调错误概率
word_replace_rate=0.006 # 整词替换概率 word_replace_rate=0.006 # 整词替换概率
[response_spliter] [response_splitter]
enable_response_spliter = true # 是否启用回复分割器 enable_response_splitter = true # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度 response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 4 # 回复允许的最大句子数 response_max_sentence_num = 4 # 回复允许的最大句子数
@@ -189,11 +193,12 @@ pri_out = 16 #模型的输出价格(非必填,可以记录消耗)
#非推理模型 #非推理模型
[model.llm_normal] #V3 回复模型1 主要回复模型 [model.llm_normal] #V3 回复模型1 主要回复模型默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数
name = "Pro/deepseek-ai/DeepSeek-V3" name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 2 #模型的输入价格(非必填,可以记录消耗) pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
pri_out = 8 #模型的输出价格(非必填,可以记录消耗) pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
temp = 0.2 #模型的温度新V3建议0.1-0.3
[model.llm_emotion_judge] #表情包判断 [model.llm_emotion_judge] #表情包判断
name = "Qwen/Qwen2.5-14B-Instruct" name = "Qwen/Qwen2.5-14B-Instruct"
@@ -213,11 +218,11 @@ provider = "SILICONFLOW"
pri_in = 1.26 pri_in = 1.26
pri_out = 1.26 pri_out = 1.26
[model.moderation] #内容审核,开发中 [model.llm_tool_use] #工具调用模型需要使用支持工具调用的模型建议使用qwen2.5 32b
name = "" name = "Qwen/Qwen2.5-32B-Instruct"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 1.0 pri_in = 1.26
pri_out = 2.0 pri_out = 1.26
# 识图模型 # 识图模型
@@ -247,6 +252,7 @@ name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 2 pri_in = 2
pri_out = 8 pri_out = 8
temp = 0.2 #模型的温度新V3建议0.1-0.3
[model.llm_heartflow] #心流建议使用qwen2.5 32b [model.llm_heartflow] #心流建议使用qwen2.5 32b
# name = "Pro/Qwen/Qwen2.5-7B-Instruct" # name = "Pro/Qwen/Qwen2.5-7B-Instruct"