From a4105d06924f7547f00e85eb59ca2136c3b48fe9 Mon Sep 17 00:00:00 2001
From: Ronifue
Date: Tue, 15 Apr 2025 16:24:26 +0800
Subject: [PATCH 001/406] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E3=80=90?=
=?UTF-8?q?=E6=9B=B4=E5=A5=BD=E7=9A=84=E8=A1=A8=E6=83=85=E5=8C=85=E5=8F=91?=
=?UTF-8?q?=E9=80=81=E7=B3=BB=E7=BB=9F=E3=80=91=EF=BC=88=E8=BF=AB=E7=9C=9F?=
=?UTF-8?q?=EF=BC=89=E5=AF=BC=E8=87=B4=E7=9A=84=E9=A2=9C=E6=96=87=E5=AD=97?=
=?UTF-8?q?=E6=B6=88=E5=A4=B1=E9=97=AE=E9=A2=98?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat/utils.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index b4e2cb3c2..ba4781acc 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -324,11 +324,16 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]:
+ # 先保护颜文字
+ protected_text, kaomoji_mapping = protect_kaomoji(text)
+ logger.debug(f"保护颜文字后的文本: {protected_text}")
# 提取被 () 或 [] 包裹的内容
pattern = re.compile(r"[\(\[].*?[\)\]]")
- _extracted_contents = pattern.findall(text)
+ # _extracted_contents = pattern.findall(text)
+ _extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
# 去除 () 和 [] 及其包裹的内容
- cleaned_text = pattern.sub("", text)
+ # cleaned_text = pattern.sub("", text)
+ cleaned_text = pattern.sub("", protected_text)
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
# 对清理后的文本进行进一步处理
@@ -368,6 +373,8 @@ def process_llm_response(text: str) -> List[str]:
return [f"{global_config.BOT_NICKNAME}不知道哦"]
# sentences.extend(extracted_contents)
+ # 在所有句子处理完毕后,对包含占位符的列表进行恢复
+ sentences = recover_kaomoji(sentences, kaomoji_mapping)
return sentences
From 071366f89ce7ed2e9fb16e4c308ea0b2d87c1a63 Mon Sep 17 00:00:00 2001
From: HexatomicRing <54496918+HexatomicRing@users.noreply.github.com>
Date: Tue, 15 Apr 2025 16:33:39 +0800
Subject: [PATCH 002/406] =?UTF-8?q?=E4=BD=BF=E7=94=A8=E5=B7=A5=E5=85=B7?=
=?UTF-8?q?=E6=97=B6=E8=80=83=E8=99=91=E4=B8=8A=E6=96=87?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/do_tool/tool_use.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py
index b14927be8..2bb8a2c65 100644
--- a/src/do_tool/tool_use.py
+++ b/src/do_tool/tool_use.py
@@ -1,3 +1,4 @@
+from src.plugins.chat.utils import get_recent_group_detailed_plain_text
from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config
from src.plugins.chat.chat_stream import ChatStream
@@ -41,6 +42,12 @@ class ToolUser:
else:
mid_memory_info = ""
+ stream_id = chat_stream.stream_id
+ chat_talking_prompt = ""
+ if stream_id:
+ chat_talking_prompt = get_recent_group_detailed_plain_text(
+ stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
+ )
new_messages = list(
db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15)
)
@@ -54,9 +61,10 @@ class ToolUser:
prompt = ""
prompt += mid_memory_info
prompt += "你正在思考如何回复群里的消息。\n"
+ prompt += f"之前群里进行了如下讨论:\n"
+ prompt += chat_talking_prompt
prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
- prompt += f"注意你就是{bot_name},{bot_name}指的就是你。"
-
+ prompt += f"注意你就是{bot_name},{bot_name}是你的名字。根据之前的聊天记录补充问题信息,搜索时避开你的名字。\n"
prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
return prompt
From a0e4d5ea385b192b64f755aff02d62fbca076393 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Thu, 10 Apr 2025 22:31:22 +0800
Subject: [PATCH 003/406] =?UTF-8?q?=E6=9B=B4=E6=96=B0README=EF=BC=8C?=
=?UTF-8?q?=E5=92=8CDev=E5=88=86=E6=94=AF=E4=BF=9D=E6=8C=81=E4=B8=80?=
=?UTF-8?q?=E8=87=B4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 36 ++++++++++++++++++++++++++----------
1 file changed, 26 insertions(+), 10 deletions(-)
diff --git a/README.md b/README.md
index 325e3ad22..beea5757f 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
- 
+ 



@@ -37,7 +37,7 @@
-## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
+## 新版0.6.0部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
## 📝 项目简介
@@ -60,6 +60,23 @@
+### 📢 版本信息
+
+- 💭 **智能对话系统**:基于LLM的自然语言交互
+- 🤔 **实时思维系统**:模拟人类思考过程
+- 💝 **情感表达系统**:丰富的表情包和情绪表达
+- 🧠 **持久记忆系统**:基于MongoDB的长期记忆存储
+- 🔄 **动态人格系统**:自适应的性格特征
+
+
+
+
### 📢 版本信息
**最新版本: v0.6.2** ([查看更新日志](changelogs/changelog.md))
@@ -86,7 +103,7 @@
### ⚠️ 重要提示
-- 升级到v0.6.x版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
+- 升级到v0.6.0版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
- 本版本基于MaiCore重构,通过nonebot插件与QQ平台交互
- 项目处于活跃开发阶段,功能和API可能随时调整
@@ -115,22 +132,21 @@
| 模块 | 主要功能 | 特点 |
|------|---------|------|
-| 💬 聊天系统 | • 心流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
-| 🧠 心流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动
• 工具调用能力 | 智能化决策 |
-| 🧠 记忆系统 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
-| 😊 表情系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
+| 💬 聊天系统 | • 思维流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
+| 🧠 思维流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动 | 智能化决策 |
+| 🧠 记忆系统 2.0 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
+| 😊 表情包系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
| 📅 日程系统 | • 动态日程生成
• 自定义想象力
• 思维流联动 | 智能规划 |
-| 👥 关系系统 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
+| 👥 关系系统 2.0 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
| 📊 统计系统 | • 使用数据统计
• LLM调用记录
• 实时控制台显示 | 数据可视 |
| 🔧 系统功能 | • 优雅关闭机制
• 自动数据保存
• 异常处理完善 | 稳定可靠 |
-| 🛠️ 工具系统 | • 知识获取工具
• 自动注册机制
• 多工具支持 | 扩展功能 |
## 📐 项目架构
```mermaid
graph TD
A[MaiCore] --> B[对话系统]
- A --> C[心流系统]
+ A --> C[思维流系统]
A --> D[记忆系统]
A --> E[情感系统]
B --> F[多模型支持]
From 3a04e8a6ba295f33f8459760681cb36cf044087d Mon Sep 17 00:00:00 2001
From: Ark-Hakobune
Date: Tue, 15 Apr 2025 21:58:20 +0800
Subject: [PATCH 004/406] =?UTF-8?q?=E7=A7=BB=E9=99=A4=20maim=5Fmessage=20?=
=?UTF-8?q?=E7=9B=AE=E5=BD=95=E8=B7=9F=E8=B8=AA?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_llmcheck.py | 164 +++++++++++++++++++++++++++
1 file changed, 164 insertions(+)
create mode 100644 src/plugins/willing/mode_llmcheck.py
diff --git a/src/plugins/willing/mode_llmcheck.py b/src/plugins/willing/mode_llmcheck.py
new file mode 100644
index 000000000..62a8cedba
--- /dev/null
+++ b/src/plugins/willing/mode_llmcheck.py
@@ -0,0 +1,164 @@
+import time
+from loguru import logger
+from ..schedule.schedule_generator import bot_schedule
+from ..models.utils_model import LLM_request
+
+from ..config.config import global_config
+from ..chat.chat_stream import ChatStream
+from .mode_classical import WillingManager
+from ..chat.utils import get_recent_group_detailed_plain_text
+
+
+import re
+from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
+
+# 定义日志配置
+chat_config = LogConfig(
+ # 使用消息发送专用样式
+ console_format=CHAT_STYLE_CONFIG["console_format"],
+ file_format=CHAT_STYLE_CONFIG["file_format"],
+)
+
+# 配置主程序日志格式
+logger = get_module_logger("llm_willing", config=chat_config)
+
+class WillingManager_llm(WillingManager):
+
+ def __init__(self):
+ super().__init__()
+ self.model_v3 = LLM_request(model=global_config.llm_normal, temperature=0.3)
+
+ async def change_reply_willing_received(self, chat_stream: ChatStream, is_mentioned_bot: bool = False, config=None,
+ is_emoji: bool = False, interested_rate: float = 0, sender_id: str = None,
+ **kwargs) -> float:
+ stream_id = chat_stream.stream_id
+ if chat_stream.group_info and config:
+ if chat_stream.group_info.group_id not in config.talk_allowed_groups:
+ reply_probability = 0
+ return reply_probability
+
+ current_date = time.strftime("%Y-%m-%d", time.localtime())
+ current_time = time.strftime("%H:%M:%S", time.localtime())
+ bot_schedule_now_time, bot_schedule_now_activity = bot_schedule.get_current_task()
+
+ chat_in_group = True
+ chat_talking_prompt = ""
+ if stream_id:
+ chat_talking_prompt = get_recent_group_detailed_plain_text(
+ stream_id, limit=5, combine=True
+ )
+ if chat_stream.group_info:
+ if str(config.BOT_QQ) in chat_talking_prompt:
+ pass
+ # logger.info(f"{chat_talking_prompt}")
+ # logger.info(f"bot在群聊中5条内发过言,启动llm计算回复概率")
+ else:
+ return self.default_change_reply_willing_received(
+ chat_stream=chat_stream,
+ is_mentioned_bot=is_mentioned_bot,
+ config=config,
+ is_emoji=is_emoji,
+ interested_rate=interested_rate,
+ sender_id=sender_id,
+ )
+ else:
+ chat_in_group = False
+ chat_talking_prompt = chat_talking_prompt
+ # print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
+
+ # if is_mentioned_bot:
+ # return 1.0
+ prompt = f"""
+ 假设你正在查看一个群聊,你在这个群聊里的网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)},
+ 现在群里聊天的内容是{chat_talking_prompt},
+ 今天是{current_date},现在是{current_time},你现在正在{bot_schedule_now_activity}。
+ 综合群内的氛围和你自己之前的发言,给出你认为**最新的消息**需要你回复的概率,数值在0到1之间。请注意,群聊内容杂乱,很多时候对话连续,但很可能不是在和你说话。
+ 如果最新的消息和你之前的发言在内容上连续,或者提到了你的名字或者称谓,将其视作明确指向你的互动,给出高于0.8的概率。如果现在是睡眠时间,直接概率为0。如果话题内容与你之前不是紧密相关,请不要给出高于0.1的概率。
+ 请注意是判断概率,而不是编写回复内容,
+ 仅输出在0到1区间内的概率值,不要给出你的判断依据。
+ """
+
+ # 非群聊的意愿管理 未来可能可以用对话缓冲区来确定合适的回复时机
+ if not chat_in_group:
+ prompt = f"""
+ 假设你在和网友聊天,网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)},
+ 现在你和朋友私聊的内容是{chat_talking_prompt},
+ 今天是{current_date},现在是{current_time},你现在正在{bot_schedule_now_activity}。
+ 综合以上的内容,给出你认为最新的消息是在和你交流的概率,数值在0到1之间。如果现在是个人休息时间,直接概率为0,请注意是决定是否需要发言,而不是编写回复内容,
+ 仅输出在0到1区间内的概率值,不要给出你的判断依据。
+ """
+ content_check, reasoning_check, _ = await self.model_v3.generate_response(prompt)
+ # logger.info(f"{prompt}")
+ logger.info(f"{content_check} {reasoning_check}")
+ probability = self.extract_marked_probability(content_check)
+ if probability <= 0.1:
+ probability = min(0.03, probability)
+ if probability >= 0.8:
+ probability = max(probability, 0.90)
+ # 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整
+ # probability += (interested_rate * 0.25)
+ # probability = min(1.0, probability)
+ # 当前表情包理解能力较差,少说就少错
+ if is_emoji:
+ probability *= 0.1
+
+ return probability
+
+ @staticmethod
+ def extract_marked_probability(text):
+ """提取带标记的概率值 该方法主要用于测试微调prompt阶段"""
+ text = text.strip()
+ pattern = r'##PROBABILITY_START##(.*?)##PROBABILITY_END##'
+ match = re.search(pattern, text, re.DOTALL)
+ if match:
+ prob_str = match.group(1).strip()
+ # 处理百分比(65% → 0.65)
+ if '%' in prob_str:
+ return float(prob_str.replace('%', '')) / 100
+ # 处理分数(2/3 → 0.666...)
+ elif '/' in prob_str:
+ numerator, denominator = map(float, prob_str.split('/'))
+ return numerator / denominator
+ # 直接处理小数
+ else:
+ return float(prob_str)
+
+ percent_match = re.search(r'(\d{1,3})%', text) # 65%
+ decimal_match = re.search(r'(0\.\d+|1\.0+)', text) # 0.65
+ fraction_match = re.search(r'(\d+)/(\d+)', text) # 2/3
+ try:
+ if percent_match:
+ prob = float(percent_match.group(1)) / 100
+ elif decimal_match:
+ prob = float(decimal_match.group(0))
+ elif fraction_match:
+ numerator, denominator = map(float, fraction_match.groups())
+ prob = numerator / denominator
+ else:
+ return 0 # 无匹配格式
+
+ # 验证范围是否合法
+ if 0 <= prob <= 1:
+ return prob
+ return 0
+ except (ValueError, ZeroDivisionError):
+ return 0
+
+ def default_change_reply_willing_received(self, chat_stream: ChatStream, is_mentioned_bot: bool = False, config=None,
+ is_emoji: bool = False, interested_rate: float = 0, sender_id: str = None,
+ **kwargs) -> float:
+
+ current_willing = self.chat_reply_willing.get(chat_stream.stream_id, 0)
+ interested_rate = interested_rate * config.response_interested_rate_amplifier
+ if interested_rate > 0.4:
+ current_willing += interested_rate - 0.3
+ if is_mentioned_bot and current_willing < 1.0:
+ current_willing += 1
+ elif is_mentioned_bot:
+ current_willing += 0.05
+ if is_emoji:
+ current_willing *= 0.5
+ self.chat_reply_willing[chat_stream.stream_id] = min(current_willing, 3.0)
+ reply_probability = min(max((current_willing - 0.5), 0.01) * config.response_willing_amplifier * 2, 1)
+
+ return reply_probability
From be3c1e167a5e73dbae8c70b7801e4c2945b03484 Mon Sep 17 00:00:00 2001
From: Ark-Hakobune
Date: Mon, 14 Apr 2025 11:39:22 +0800
Subject: [PATCH 005/406] =?UTF-8?q?=E5=AE=8C=E6=88=90Maicore=E9=80=82?=
=?UTF-8?q?=E9=85=8D?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
docker-compose.yml | 28 ++++++++++++++++++++++++++++
src/plugins/willing/mode_llmcheck.py | 15 +++++++--------
2 files changed, 35 insertions(+), 8 deletions(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 000d00c35..807114674 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -25,9 +25,37 @@ services:
# ports:
# - "8000:8000"
volumes:
+<<<<<<< HEAD
- ./docker-config/mmc/.env:/MaiMBot/.env # 持久化env配置文件
- ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件
- ./data/MaiMBot:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
+=======
+ - ./config_ai/.env:/MaiMBot/.env # 持久化env配置文件
+ - ./config_ai:/MaiMBot/config # 持久化bot配置文件
+ - ./data_ai:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
+ - ./src:/MaiMBot/src
+ restart: always
+ depends_on:
+ - mongodb
+ networks:
+ - maim_bot
+
+ core_reimu:
+ container_name: reimu
+ image: mmc:local
+ # image: infinitycat/maimbot:main
+ environment:
+ - TZ=Asia/Shanghai
+ - EULA_AGREE=35362b6ea30f12891d46ef545122e84a # 同意EULA
+ - PRIVACY_AGREE=2402af06e133d2d10d9c6c643fdc9333 # 同意EULA
+ # ports:
+ # - "8001:8000"
+ volumes:
+ - ./config_reimu/.env:/MaiMBot/.env # 持久化env配置文件
+ - ./config_reimu:/MaiMBot/config # 持久化bot配置文件
+ - ./data_reimu/MaiMBot:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
+ - ./src:/MaiMBot/src
+>>>>>>> 7535f02 (完成Maicore适配)
restart: always
depends_on:
- mongodb
diff --git a/src/plugins/willing/mode_llmcheck.py b/src/plugins/willing/mode_llmcheck.py
index 62a8cedba..6c3e135f9 100644
--- a/src/plugins/willing/mode_llmcheck.py
+++ b/src/plugins/willing/mode_llmcheck.py
@@ -22,7 +22,7 @@ chat_config = LogConfig(
# 配置主程序日志格式
logger = get_module_logger("llm_willing", config=chat_config)
-class WillingManager_llm(WillingManager):
+class WillingManager(WillingManager):
def __init__(self):
super().__init__()
@@ -39,8 +39,6 @@ class WillingManager_llm(WillingManager):
current_date = time.strftime("%Y-%m-%d", time.localtime())
current_time = time.strftime("%H:%M:%S", time.localtime())
- bot_schedule_now_time, bot_schedule_now_activity = bot_schedule.get_current_task()
-
chat_in_group = True
chat_talking_prompt = ""
if stream_id:
@@ -71,7 +69,7 @@ class WillingManager_llm(WillingManager):
prompt = f"""
假设你正在查看一个群聊,你在这个群聊里的网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)},
现在群里聊天的内容是{chat_talking_prompt},
- 今天是{current_date},现在是{current_time},你现在正在{bot_schedule_now_activity}。
+ 今天是{current_date},现在是{current_time}。
综合群内的氛围和你自己之前的发言,给出你认为**最新的消息**需要你回复的概率,数值在0到1之间。请注意,群聊内容杂乱,很多时候对话连续,但很可能不是在和你说话。
如果最新的消息和你之前的发言在内容上连续,或者提到了你的名字或者称谓,将其视作明确指向你的互动,给出高于0.8的概率。如果现在是睡眠时间,直接概率为0。如果话题内容与你之前不是紧密相关,请不要给出高于0.1的概率。
请注意是判断概率,而不是编写回复内容,
@@ -83,7 +81,7 @@ class WillingManager_llm(WillingManager):
prompt = f"""
假设你在和网友聊天,网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)},
现在你和朋友私聊的内容是{chat_talking_prompt},
- 今天是{current_date},现在是{current_time},你现在正在{bot_schedule_now_activity}。
+ 今天是{current_date},现在是{current_time}。
综合以上的内容,给出你认为最新的消息是在和你交流的概率,数值在0到1之间。如果现在是个人休息时间,直接概率为0,请注意是决定是否需要发言,而不是编写回复内容,
仅输出在0到1区间内的概率值,不要给出你的判断依据。
"""
@@ -91,13 +89,14 @@ class WillingManager_llm(WillingManager):
# logger.info(f"{prompt}")
logger.info(f"{content_check} {reasoning_check}")
probability = self.extract_marked_probability(content_check)
+ # 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整
+ probability += (interested_rate * 0.25)
+ probability = min(1.0, probability)
if probability <= 0.1:
probability = min(0.03, probability)
if probability >= 0.8:
probability = max(probability, 0.90)
- # 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整
- # probability += (interested_rate * 0.25)
- # probability = min(1.0, probability)
+
# 当前表情包理解能力较差,少说就少错
if is_emoji:
probability *= 0.1
From 9dc4c4f5ac774e0183dfcf008dc6c453c257a827 Mon Sep 17 00:00:00 2001
From: Ark-Hakobune
Date: Tue, 15 Apr 2025 22:13:18 +0800
Subject: [PATCH 006/406] =?UTF-8?q?chore:=20=E5=90=8C=E6=AD=A5=E4=B8=8A?=
=?UTF-8?q?=E6=B8=B8=20README=20=E7=89=88=E6=9C=AC?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
README.md | 36 ++++++++++--------------------------
1 file changed, 10 insertions(+), 26 deletions(-)
diff --git a/README.md b/README.md
index beea5757f..325e3ad22 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,7 @@
- 
+ 



@@ -37,7 +37,7 @@
-## 新版0.6.0部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
+## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
## 📝 项目简介
@@ -60,23 +60,6 @@
-### 📢 版本信息
-
-- 💭 **智能对话系统**:基于LLM的自然语言交互
-- 🤔 **实时思维系统**:模拟人类思考过程
-- 💝 **情感表达系统**:丰富的表情包和情绪表达
-- 🧠 **持久记忆系统**:基于MongoDB的长期记忆存储
-- 🔄 **动态人格系统**:自适应的性格特征
-
-
-
-
### 📢 版本信息
**最新版本: v0.6.2** ([查看更新日志](changelogs/changelog.md))
@@ -103,7 +86,7 @@
### ⚠️ 重要提示
-- 升级到v0.6.0版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
+- 升级到v0.6.x版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
- 本版本基于MaiCore重构,通过nonebot插件与QQ平台交互
- 项目处于活跃开发阶段,功能和API可能随时调整
@@ -132,21 +115,22 @@
| 模块 | 主要功能 | 特点 |
|------|---------|------|
-| 💬 聊天系统 | • 思维流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
-| 🧠 思维流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动 | 智能化决策 |
-| 🧠 记忆系统 2.0 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
-| 😊 表情包系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
+| 💬 聊天系统 | • 心流/推理聊天
• 关键词主动发言
• 多模型支持
• 动态prompt构建
• 私聊功能(PFC) | 拟人化交互 |
+| 🧠 心流系统 | • 实时思考生成
• 自动启停机制
• 日程系统联动
• 工具调用能力 | 智能化决策 |
+| 🧠 记忆系统 | • 优化记忆抽取
• 海马体记忆机制
• 聊天记录概括 | 持久化记忆 |
+| 😊 表情系统 | • 情绪匹配发送
• GIF支持
• 自动收集与审查 | 丰富表达 |
| 📅 日程系统 | • 动态日程生成
• 自定义想象力
• 思维流联动 | 智能规划 |
-| 👥 关系系统 2.0 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
+| 👥 关系系统 | • 关系管理优化
• 丰富接口支持
• 个性化交互 | 深度社交 |
| 📊 统计系统 | • 使用数据统计
• LLM调用记录
• 实时控制台显示 | 数据可视 |
| 🔧 系统功能 | • 优雅关闭机制
• 自动数据保存
• 异常处理完善 | 稳定可靠 |
+| 🛠️ 工具系统 | • 知识获取工具
• 自动注册机制
• 多工具支持 | 扩展功能 |
## 📐 项目架构
```mermaid
graph TD
A[MaiCore] --> B[对话系统]
- A --> C[思维流系统]
+ A --> C[心流系统]
A --> D[记忆系统]
A --> E[情感系统]
B --> F[多模型支持]
From 307f6707191d7c92b149babeedd0256b7df67f35 Mon Sep 17 00:00:00 2001
From: Ark-Hakobune
Date: Tue, 15 Apr 2025 22:15:19 +0800
Subject: [PATCH 007/406] =?UTF-8?q?chore:=20=E5=90=8C=E6=AD=A5=E4=B8=8A?=
=?UTF-8?q?=E6=B8=B8docker-compose=E7=89=88=E6=9C=AC?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
docker-compose.yml | 28 ----------------------------
1 file changed, 28 deletions(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 807114674..000d00c35 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -25,37 +25,9 @@ services:
# ports:
# - "8000:8000"
volumes:
-<<<<<<< HEAD
- ./docker-config/mmc/.env:/MaiMBot/.env # 持久化env配置文件
- ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件
- ./data/MaiMBot:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
-=======
- - ./config_ai/.env:/MaiMBot/.env # 持久化env配置文件
- - ./config_ai:/MaiMBot/config # 持久化bot配置文件
- - ./data_ai:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
- - ./src:/MaiMBot/src
- restart: always
- depends_on:
- - mongodb
- networks:
- - maim_bot
-
- core_reimu:
- container_name: reimu
- image: mmc:local
- # image: infinitycat/maimbot:main
- environment:
- - TZ=Asia/Shanghai
- - EULA_AGREE=35362b6ea30f12891d46ef545122e84a # 同意EULA
- - PRIVACY_AGREE=2402af06e133d2d10d9c6c643fdc9333 # 同意EULA
- # ports:
- # - "8001:8000"
- volumes:
- - ./config_reimu/.env:/MaiMBot/.env # 持久化env配置文件
- - ./config_reimu:/MaiMBot/config # 持久化bot配置文件
- - ./data_reimu/MaiMBot:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题
- - ./src:/MaiMBot/src
->>>>>>> 7535f02 (完成Maicore适配)
restart: always
depends_on:
- mongodb
From 195fc7327cb71d142e601189334f63d4bf9153f9 Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Tue, 15 Apr 2025 22:39:33 +0800
Subject: [PATCH 008/406] =?UTF-8?q?=E9=87=8D=E6=9E=84mxp=E6=A8=A1=E5=BC=8F?=
=?UTF-8?q?=E7=9A=84=E5=9F=BA=E7=A1=80=E6=84=8F=E6=84=BF=E8=AE=A1=E7=AE=97?=
=?UTF-8?q?=E6=96=B9=E5=BC=8F?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_classical.py | 5 ----
src/plugins/willing/mode_dynamic.py | 5 ----
src/plugins/willing/mode_mxp.py | 41 +++++++-------------------
src/plugins/willing/willing_manager.py | 20 ++++++-------
4 files changed, 21 insertions(+), 50 deletions(-)
diff --git a/src/plugins/willing/mode_classical.py b/src/plugins/willing/mode_classical.py
index 294539d08..b74666215 100644
--- a/src/plugins/willing/mode_classical.py
+++ b/src/plugins/willing/mode_classical.py
@@ -75,8 +75,3 @@ class ClassicalWillingManager(BaseWillingManager):
async def not_reply_handle(self, message_id):
return await super().not_reply_handle(message_id)
- async def get_variable_parameters(self):
- return await super().get_variable_parameters()
-
- async def set_variable_parameters(self, parameters):
- return await super().set_variable_parameters(parameters)
diff --git a/src/plugins/willing/mode_dynamic.py b/src/plugins/willing/mode_dynamic.py
index 0487a1a98..1a5ebbd15 100644
--- a/src/plugins/willing/mode_dynamic.py
+++ b/src/plugins/willing/mode_dynamic.py
@@ -235,8 +235,3 @@ class DynamicWillingManager(BaseWillingManager):
async def after_generate_reply_handle(self, message_id):
return await super().after_generate_reply_handle(message_id)
- async def get_variable_parameters(self):
- return await super().get_variable_parameters()
-
- async def set_variable_parameters(self, parameters):
- return await super().set_variable_parameters(parameters)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index b4fc1448c..05b6c7e5c 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -37,8 +37,8 @@ class MxpWillingManager(BaseWillingManager):
# 可变参数
self.intention_decay_rate = 0.93 # 意愿衰减率
- self.message_expiration_time = 120 # 消息过期时间(秒)
- self.number_of_message_storage = 10 # 消息存储数量
+ self.number_of_message_storage = 12 # 消息存储数量
+ self.expected_replies_per_min = 3 # 每分钟预期回复数
self.basic_maximum_willing = 0.5 # 基础最大意愿值
self.mention_willing_gain = 0.6 # 提及意愿增益
self.interest_willing_gain = 0.3 # 兴趣意愿增益
@@ -193,7 +193,8 @@ class MxpWillingManager(BaseWillingManager):
# 清理过期消息
current_time = time.time()
message_times = [
- msg_time for msg_time in message_times if current_time - msg_time < self.message_expiration_time
+ msg_time for msg_time in message_times if current_time - msg_time <
+ self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60
]
self.chat_new_message_time[chat_id] = message_times
@@ -202,38 +203,13 @@ class MxpWillingManager(BaseWillingManager):
update_time = 20
elif len(message_times) == self.number_of_message_storage:
time_interval = current_time - message_times[0]
- basic_willing = self.basic_maximum_willing * math.sqrt(
- time_interval / self.message_expiration_time
- )
+ basic_willing = self._basic_willing_coefficient_culculate(time_interval)
self.chat_reply_willing[chat_id] = basic_willing
- update_time = 17 * math.sqrt(time_interval / self.message_expiration_time) + 3
+ update_time = 17 * basic_willing / self.basic_maximum_willing + 3
else:
self.logger.debug(f"聊天流{chat_id}消息时间数量异常,数量:{len(message_times)}")
self.chat_reply_willing[chat_id] = 0
- async def get_variable_parameters(self) -> Dict[str, str]:
- """获取可变参数"""
- return {
- "intention_decay_rate": "意愿衰减率",
- "message_expiration_time": "消息过期时间(秒)",
- "number_of_message_storage": "消息存储数量",
- "basic_maximum_willing": "基础最大意愿值",
- "mention_willing_gain": "提及意愿增益",
- "interest_willing_gain": "兴趣意愿增益",
- "emoji_response_penalty": "表情包回复惩罚",
- "down_frequency_rate": "降低回复频率的群组惩罚系数",
- "single_chat_gain": "单聊增益(不仅是私聊)",
- }
-
- async def set_variable_parameters(self, parameters: Dict[str, any]):
- """设置可变参数"""
- async with self.lock:
- for key, value in parameters.items():
- if hasattr(self, key):
- setattr(self, key, value)
- self.logger.debug(f"参数 {key} 已更新为 {value}")
- else:
- self.logger.debug(f"尝试设置未知参数 {key}")
def _get_relationship_level_num(self, relationship_value) -> int:
"""关系等级计算"""
@@ -253,5 +229,10 @@ class MxpWillingManager(BaseWillingManager):
level_num = 5 if relationship_value > 1000 else 0
return level_num - 2
+ def _basic_willing_coefficient_culculate(self, t: float) -> float:
+ """基础意愿值系数计算"""
+ return math.tan(t * self.expected_replies_per_min * math.pi
+ / 120 / self.number_of_message_storage) / 2
+
async def get_willing(self, chat_id):
return self.temporary_willing
diff --git a/src/plugins/willing/willing_manager.py b/src/plugins/willing/willing_manager.py
index ada995120..28185bff1 100644
--- a/src/plugins/willing/willing_manager.py
+++ b/src/plugins/willing/willing_manager.py
@@ -18,8 +18,8 @@ after_generate_reply_handle 确定要回复后,在生成回复后的处理
not_reply_handle 确定不回复后的处理
get_reply_probability 获取回复概率
bombing_buffer_message_handle 缓冲器炸飞消息后的处理
-get_variable_parameters 获取可变参数组,返回一个字典,key为参数名称,value为参数描述(此方法是为拆分全局设置准备)
-set_variable_parameters 设置可变参数组,你需要传入一个字典,key为参数名称,value为参数值(此方法是为拆分全局设置准备)
+get_variable_parameters 暂不确定
+set_variable_parameters 暂不确定
以下2个方法根据你的实现可以做调整:
get_willing 获取某聊天流意愿
set_willing 设置某聊天流意愿
@@ -152,15 +152,15 @@ class BaseWillingManager(ABC):
async with self.lock:
self.chat_reply_willing[chat_id] = willing
- @abstractmethod
- async def get_variable_parameters(self) -> Dict[str, str]:
- """抽象方法:获取可变参数"""
- pass
+ # @abstractmethod
+ # async def get_variable_parameters(self) -> Dict[str, str]:
+ # """抽象方法:获取可变参数"""
+ # pass
- @abstractmethod
- async def set_variable_parameters(self, parameters: Dict[str, any]):
- """抽象方法:设置可变参数"""
- pass
+ # @abstractmethod
+ # async def set_variable_parameters(self, parameters: Dict[str, any]):
+ # """抽象方法:设置可变参数"""
+ # pass
def init_willing_manager() -> BaseWillingManager:
From 7b261078b5877e091c8c5e396d58bf7b99d220c0 Mon Sep 17 00:00:00 2001
From: Ark-Hakobune
Date: Tue, 15 Apr 2025 22:50:04 +0800
Subject: [PATCH 009/406] =?UTF-8?q?0.6.2=E7=9A=84=E6=84=8F=E6=84=BF?=
=?UTF-8?q?=E6=96=87=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_llmcheck.py | 141 +++++++++++++--------------
1 file changed, 69 insertions(+), 72 deletions(-)
diff --git a/src/plugins/willing/mode_llmcheck.py b/src/plugins/willing/mode_llmcheck.py
index 6c3e135f9..4f472e87f 100644
--- a/src/plugins/willing/mode_llmcheck.py
+++ b/src/plugins/willing/mode_llmcheck.py
@@ -1,68 +1,87 @@
+"""
+llmcheck 模式:
+此模式的一些参数不会在配置文件中显示,要修改请在可变参数下修改
+此模式的特点:
+1.在群聊内的连续对话场景下,使用大语言模型来判断回复概率
+2.非连续对话场景,使用mxp模式的意愿管理器(可另外配置)
+3.默认配置的是model_v3,当前参数适用于deepseek-v3-0324
+
+继承自其他模式,实质上仅重写get_reply_probability方法,未来可能重构成一个插件,可方便地组装到其他意愿模式上。
+目前的使用方式是拓展到其他意愿管理模式
+
+"""
import time
from loguru import logger
-from ..schedule.schedule_generator import bot_schedule
from ..models.utils_model import LLM_request
-
from ..config.config import global_config
from ..chat.chat_stream import ChatStream
-from .mode_classical import WillingManager
from ..chat.utils import get_recent_group_detailed_plain_text
-
-
+from .willing_manager import BaseWillingManager
+from .mode_mxp import MxpWillingManager
import re
-from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
+from functools import wraps
-# 定义日志配置
-chat_config = LogConfig(
- # 使用消息发送专用样式
- console_format=CHAT_STYLE_CONFIG["console_format"],
- file_format=CHAT_STYLE_CONFIG["file_format"],
-)
-# 配置主程序日志格式
-logger = get_module_logger("llm_willing", config=chat_config)
+def is_continuous_chat(self, message_id: str):
+ # 判断是否是连续对话,出于成本考虑,默认限制5条
+ willing_info = self.ongoing_messages[message_id]
+ chat_id = willing_info.chat_id
+ group_info = willing_info.chat_id
+ config = self.global_config
+ length = 5
+ if chat_id:
+ chat_talking_text = get_recent_group_detailed_plain_text(
+ chat_id, limit=length, combine=True
+ )
+ if group_info:
+ if str(config.BOT_QQ) in chat_talking_text:
+ return True
+ else:
+ return False
+ return False
-class WillingManager(WillingManager):
+def llmcheck_decorator(trigger_condition_func):
+ def decorator(func):
+ @wraps(func)
+ def wrapper(self, message_id: str):
+ if trigger_condition_func(self, message_id):
+ # 满足条件,走llm流程
+ return self.get_llmreply_probability(message_id)
+ else:
+ # 不满足条件,走默认流程
+ return func(self, message_id)
+ return wrapper
+ return decorator
+
+
+class LlmcheckWillingManager(MxpWillingManager):
def __init__(self):
super().__init__()
self.model_v3 = LLM_request(model=global_config.llm_normal, temperature=0.3)
- async def change_reply_willing_received(self, chat_stream: ChatStream, is_mentioned_bot: bool = False, config=None,
- is_emoji: bool = False, interested_rate: float = 0, sender_id: str = None,
- **kwargs) -> float:
- stream_id = chat_stream.stream_id
- if chat_stream.group_info and config:
- if chat_stream.group_info.group_id not in config.talk_allowed_groups:
+
+
+ async def get_llmreply_probability(self, message_id: str):
+ message_info = self.ongoing_messages[message_id]
+ chat_id = message_info.chat_id
+ config = self.global_config
+ # 获取信息的长度
+ length = 5
+ if message_info.group_info and config:
+ if message_info.group_info.group_id not in config.talk_allowed_groups:
reply_probability = 0
return reply_probability
current_date = time.strftime("%Y-%m-%d", time.localtime())
current_time = time.strftime("%H:%M:%S", time.localtime())
- chat_in_group = True
chat_talking_prompt = ""
- if stream_id:
+ if chat_id:
chat_talking_prompt = get_recent_group_detailed_plain_text(
- stream_id, limit=5, combine=True
+ chat_id, limit=length, combine=True
)
- if chat_stream.group_info:
- if str(config.BOT_QQ) in chat_talking_prompt:
- pass
- # logger.info(f"{chat_talking_prompt}")
- # logger.info(f"bot在群聊中5条内发过言,启动llm计算回复概率")
- else:
- return self.default_change_reply_willing_received(
- chat_stream=chat_stream,
- is_mentioned_bot=is_mentioned_bot,
- config=config,
- is_emoji=is_emoji,
- interested_rate=interested_rate,
- sender_id=sender_id,
- )
- else:
- chat_in_group = False
- chat_talking_prompt = chat_talking_prompt
- # print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
+ else:
+ return 0
# if is_mentioned_bot:
# return 1.0
@@ -76,21 +95,12 @@ class WillingManager(WillingManager):
仅输出在0到1区间内的概率值,不要给出你的判断依据。
"""
- # 非群聊的意愿管理 未来可能可以用对话缓冲区来确定合适的回复时机
- if not chat_in_group:
- prompt = f"""
- 假设你在和网友聊天,网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)},
- 现在你和朋友私聊的内容是{chat_talking_prompt},
- 今天是{current_date},现在是{current_time}。
- 综合以上的内容,给出你认为最新的消息是在和你交流的概率,数值在0到1之间。如果现在是个人休息时间,直接概率为0,请注意是决定是否需要发言,而不是编写回复内容,
- 仅输出在0到1区间内的概率值,不要给出你的判断依据。
- """
content_check, reasoning_check, _ = await self.model_v3.generate_response(prompt)
# logger.info(f"{prompt}")
logger.info(f"{content_check} {reasoning_check}")
probability = self.extract_marked_probability(content_check)
# 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整
- probability += (interested_rate * 0.25)
+ probability += (message_info.interested_rate * 0.25)
probability = min(1.0, probability)
if probability <= 0.1:
probability = min(0.03, probability)
@@ -98,8 +108,8 @@ class WillingManager(WillingManager):
probability = max(probability, 0.90)
# 当前表情包理解能力较差,少说就少错
- if is_emoji:
- probability *= 0.1
+ if message_info.is_emoji:
+ probability *= global_config.emoji_response_penalty
return probability
@@ -143,21 +153,8 @@ class WillingManager(WillingManager):
except (ValueError, ZeroDivisionError):
return 0
- def default_change_reply_willing_received(self, chat_stream: ChatStream, is_mentioned_bot: bool = False, config=None,
- is_emoji: bool = False, interested_rate: float = 0, sender_id: str = None,
- **kwargs) -> float:
-
- current_willing = self.chat_reply_willing.get(chat_stream.stream_id, 0)
- interested_rate = interested_rate * config.response_interested_rate_amplifier
- if interested_rate > 0.4:
- current_willing += interested_rate - 0.3
- if is_mentioned_bot and current_willing < 1.0:
- current_willing += 1
- elif is_mentioned_bot:
- current_willing += 0.05
- if is_emoji:
- current_willing *= 0.5
- self.chat_reply_willing[chat_stream.stream_id] = min(current_willing, 3.0)
- reply_probability = min(max((current_willing - 0.5), 0.01) * config.response_willing_amplifier * 2, 1)
-
- return reply_probability
+ @llmcheck_decorator(is_continuous_chat)
+ def get_reply_probability(self, message_id):
+ return super().get_reply_probability(
+ message_id
+ )
From 37c58e8b7a02de9ad7ece253901e5cc523b56986 Mon Sep 17 00:00:00 2001
From: Ark-Hakobune
Date: Tue, 15 Apr 2025 23:02:43 +0800
Subject: [PATCH 010/406] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=BD=B1=E5=93=8D?=
=?UTF-8?q?=E7=A7=81=E8=81=8A=E7=9A=84bug?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_llmcheck.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/plugins/willing/mode_llmcheck.py b/src/plugins/willing/mode_llmcheck.py
index 4f472e87f..d4e867a37 100644
--- a/src/plugins/willing/mode_llmcheck.py
+++ b/src/plugins/willing/mode_llmcheck.py
@@ -26,7 +26,7 @@ def is_continuous_chat(self, message_id: str):
# 判断是否是连续对话,出于成本考虑,默认限制5条
willing_info = self.ongoing_messages[message_id]
chat_id = willing_info.chat_id
- group_info = willing_info.chat_id
+ group_info = willing_info.group_info
config = self.global_config
length = 5
if chat_id:
From 21179884cfd2396ba49a8e762839b41d0652c76a Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Tue, 15 Apr 2025 23:55:15 +0800
Subject: [PATCH 011/406] =?UTF-8?q?=E7=96=B2=E5=8A=B3=E7=B3=BB=E7=BB=9F?=
=?UTF-8?q?=E5=88=9D=E6=AD=A5=E5=AE=8C=E6=88=90?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_mxp.py | 51 +++++++++++++++++++++++++++++----
1 file changed, 46 insertions(+), 5 deletions(-)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index 05b6c7e5c..3fb1d7db2 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -10,6 +10,7 @@ Mxp 模式:梦溪畔独家赞助
4.限制同时思考的消息数量,防止喷射
5.拥有单聊增益,无论在群里还是私聊,只要bot一直和你聊,就会增加意愿值
6.意愿分为衰减意愿+临时意愿
+7.疲劳机制
如果你发现本模式出现了bug
上上策是询问智慧的小草神()
@@ -34,26 +35,47 @@ class MxpWillingManager(BaseWillingManager):
self.chat_new_message_time: Dict[str, list[float]] = {} # 聊天流ID: 消息时间
self.last_response_person: Dict[str, tuple[str, int]] = {} # 上次回复的用户信息
self.temporary_willing: float = 0 # 临时意愿值
+ self.chat_bot_message_time: Dict[str, list[float]] = {} # 聊天流ID: bot已回复消息时间
+ self.chat_fatigue_punishment_list: Dict[str, list[tuple[float, float]]] = {} # 聊天流疲劳惩罚列, 聊天流ID: 惩罚时间列(开始时间,持续时间)
+ self.chat_fatigue_willing_attenuation: Dict[str, float] = {} # 聊天流疲劳意愿衰减值
# 可变参数
self.intention_decay_rate = 0.93 # 意愿衰减率
+
self.number_of_message_storage = 12 # 消息存储数量
self.expected_replies_per_min = 3 # 每分钟预期回复数
self.basic_maximum_willing = 0.5 # 基础最大意愿值
+
self.mention_willing_gain = 0.6 # 提及意愿增益
self.interest_willing_gain = 0.3 # 兴趣意愿增益
self.emoji_response_penalty = self.global_config.emoji_response_penalty # 表情包回复惩罚
self.down_frequency_rate = self.global_config.down_frequency_rate # 降低回复频率的群组惩罚系数
self.single_chat_gain = 0.12 # 单聊增益
+ self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int)
+ self.fatigue_coefficient = 1.0 # 疲劳系数
+
async def async_task_starter(self) -> None:
"""异步任务启动器"""
asyncio.create_task(self._return_to_basic_willing())
asyncio.create_task(self._chat_new_message_to_change_basic_willing())
+ asyncio.create_task(self.fatigue_attenuation())
async def before_generate_reply_handle(self, message_id: str):
"""回复前处理"""
- pass
+ current_time = time.time()
+ async with self.lock:
+ w_info = self.ongoing_messages[message_id]
+ if w_info.chat_id not in self.chat_bot_message_time:
+ self.chat_bot_message_time[w_info.chat_id] = []
+ self.chat_bot_message_time[w_info.chat_id] = \
+ [t for t in self.chat_bot_message_time[w_info.chat_id] if current_time - t < 60]
+ self.chat_bot_message_time[w_info.chat_id].append(current_time)
+ if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num):
+ time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
+ if w_info.chat_id not in self.chat_fatigue_punishment_list:
+ self.chat_fatigue_punishment_list[w_info.chat_id] = []
+ self.chat_fatigue_punishment_list[w_info.chat_id].append(current_time, time_interval * 2)
async def after_generate_reply_handle(self, message_id: str):
"""回复后处理"""
@@ -122,6 +144,8 @@ class MxpWillingManager(BaseWillingManager):
elif len(chat_ongoing_messages) >= 4:
current_willing = 0
+ current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
+
probability = self._willing_to_probability(current_willing)
if w_info.is_emoji:
@@ -179,8 +203,10 @@ class MxpWillingManager(BaseWillingManager):
willing = max(0, willing)
if willing < 2:
probability = math.atan(willing * 2) / math.pi * 2
- else:
+ elif willing <2.5:
probability = math.atan(willing * 4) / math.pi * 2
+ else:
+ probability = 1
return probability
async def _chat_new_message_to_change_basic_willing(self):
@@ -203,7 +229,7 @@ class MxpWillingManager(BaseWillingManager):
update_time = 20
elif len(message_times) == self.number_of_message_storage:
time_interval = current_time - message_times[0]
- basic_willing = self._basic_willing_coefficient_culculate(time_interval)
+ basic_willing = self._basic_willing_culculate(time_interval)
self.chat_reply_willing[chat_id] = basic_willing
update_time = 17 * basic_willing / self.basic_maximum_willing + 3
else:
@@ -229,10 +255,25 @@ class MxpWillingManager(BaseWillingManager):
level_num = 5 if relationship_value > 1000 else 0
return level_num - 2
- def _basic_willing_coefficient_culculate(self, t: float) -> float:
- """基础意愿值系数计算"""
+ def _basic_willing_culculate(self, t: float) -> float:
+ """基础意愿值计算"""
return math.tan(t * self.expected_replies_per_min * math.pi
/ 120 / self.number_of_message_storage) / 2
+
+ async def fatigue_attenuation(self):
+ """疲劳衰减"""
+ while True:
+ current_time = time.time()
+ await asyncio.sleep(1)
+ async with self.lock:
+ for chat_id, fatigue_list in self.chat_fatigue_punishment_list.items():
+ fatigue_list = [z for z in fatigue_list if current_time - z[0] < z[1]]
+ self.chat_fatigue_willing_attenuation[chat_id] = 0
+ for start_time, duration in fatigue_list:
+ self.chat_fatigue_willing_attenuation[chat_id] += \
+ (self.chat_reply_willing[chat_id] * 2 / math.pi * math.asin(
+ 2 * (current_time - start_time) / duration - 1
+ ) - self.chat_reply_willing[chat_id]) * self.fatigue_coefficient
async def get_willing(self, chat_id):
return self.temporary_willing
From 10c89c0171d656247a1bc39b2c74eee9de2a5060 Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Wed, 16 Apr 2025 00:08:22 +0800
Subject: [PATCH 012/406] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E8=B0=83=E8=AF=95?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_mxp.py | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index 3fb1d7db2..e0e233aac 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -55,6 +55,8 @@ class MxpWillingManager(BaseWillingManager):
self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int)
self.fatigue_coefficient = 1.0 # 疲劳系数
+ self.is_debug = False # 是否开启调试模式
+
async def async_task_starter(self) -> None:
"""异步任务启动器"""
asyncio.create_task(self._return_to_basic_willing())
@@ -114,24 +116,38 @@ class MxpWillingManager(BaseWillingManager):
async with self.lock:
w_info = self.ongoing_messages[message_id]
current_willing = self.chat_person_reply_willing[w_info.chat_id][w_info.person_id]
+ if self.is_debug:
+ self.logger.debug(f"基础意愿值:{current_willing}")
if w_info.is_mentioned_bot:
current_willing += self.mention_willing_gain / (int(current_willing) + 1)
+ if self.is_debug:
+ self.logger.debug(f"提及增益:{self.mention_willing_gain / (int(current_willing) + 1)}")
if w_info.interested_rate > 0:
current_willing += math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
+ if self.is_debug:
+ self.logger.debug(f"兴趣增益:{math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain}")
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] = current_willing
rel_value = await w_info.person_info_manager.get_value(w_info.person_id, "relationship_value")
rel_level = self._get_relationship_level_num(rel_value)
current_willing += rel_level * 0.1
+ if self.is_debug:
+ self.logger.debug(f"关系增益:{rel_level * 0.1}")
if (
w_info.chat_id in self.last_response_person
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
):
current_willing += self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)
+ if self.is_debug:
+ self.logger.debug(f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}")
+
+ current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
+ if self.is_debug:
+ self.logger.debug(f"疲劳衰减:{self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)}")
chat_ongoing_messages = [msg for msg in self.ongoing_messages.values() if msg.chat_id == w_info.chat_id]
chat_person_ogoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id]
@@ -143,8 +159,10 @@ class MxpWillingManager(BaseWillingManager):
current_willing -= 1.5
elif len(chat_ongoing_messages) >= 4:
current_willing = 0
-
- current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
+ else:
+ if self.is_debug:
+ self.logger.debug("无进行中消息惩罚")
+
probability = self._willing_to_probability(current_willing)
From 9fc74cb066f935bdaf1e9de9c3a1ef963933909e Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Wed, 16 Apr 2025 01:13:18 +0800
Subject: [PATCH 013/406] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9E=E7=BB=9F?=
=?UTF-8?q?=E4=B8=80=E5=91=BD=E5=90=8D=E5=8A=9F=E8=83=BD=EF=BC=8C=E8=87=AA?=
=?UTF-8?q?=E5=8A=A8=E6=9B=BF=E6=8D=A2prompt=E5=86=85=E5=94=AF=E4=B8=80?=
=?UTF-8?q?=E6=A0=87=E8=AF=86=E7=AC=A6=EF=BC=8C=E4=BC=98=E5=8C=96prompt?=
=?UTF-8?q?=E6=95=88=E6=9E=9C?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
requirements.txt | Bin 584 -> 574 bytes
src/heart_flow/observation.py | 3 +-
src/heart_flow/sub_heartflow.py | 71 +++------
src/plugins/chat/message.py | 28 ++--
src/plugins/chat/message_buffer.py | 4 +-
src/plugins/chat/utils.py | 138 +++++++++++++++++
.../think_flow_chat/think_flow_chat.py | 44 +++++-
.../think_flow_chat/think_flow_generator.py | 72 ++-------
.../think_flow_prompt_builder.py | 8 +-
src/plugins/config/config.py | 6 +-
src/plugins/person_info/person_info.py | 140 ++++++++++++++++++
.../person_info/relationship_manager.py | 57 +++++++
src/plugins/remote/remote.py | 1 +
13 files changed, 443 insertions(+), 129 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 0fcb31f83c499ae63c79092dcb349ca3f4112a35..45fb7e6e52e77aea95193b9fc7b9d5e9530410e6 100644
GIT binary patch
delta 11
ScmX@XvX5nh9MfbCCItW(4+A^^
delta 17
YcmdnTa)M=p9209ULo!3bWJxAf0516icK`qY
diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py
index df78bd2db..cc225be8f 100644
--- a/src/heart_flow/observation.py
+++ b/src/heart_flow/observation.py
@@ -58,7 +58,8 @@ class ChattingObservation(Observation):
for msg in mid_memory_by_id["messages"]:
msg_str += f"{msg['detailed_plain_text']}"
time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
- mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
+ # mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
+ mid_memory_str += f"{msg_str}\n"
except Exception as e:
logger.error(f"获取mid_memory_id失败: {e}")
traceback.print_exc()
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index b9da0f7ee..c7ff4524f 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -3,8 +3,9 @@ import asyncio
from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLM_request
from src.plugins.config.config import global_config
-import re
import time
+from src.plugins.chat.message import UserInfo
+from src.plugins.chat.utils import parse_text_timestamps
# from src.plugins.schedule.schedule_generator import bot_schedule
# from src.plugins.memory_system.Hippocampus import HippocampusManager
@@ -37,11 +38,11 @@ def init_prompt():
prompt += "{prompt_personality}\n"
prompt += "刚刚你的想法是{current_thinking_info}。可以适当转换话题\n"
prompt += "-----------------------------------\n"
- prompt += "现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
+ prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n"
prompt += "你现在{mood_info}\n"
prompt += "你注意到{sender_name}刚刚说:{message_txt}\n"
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白"
- prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话\n"
+ prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n"
prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{bot_name},{bot_name}指的就是你。"
Prompt(prompt, "sub_heartflow_prompt_before")
@@ -49,7 +50,7 @@ def init_prompt():
# prompt += f"你现在正在做的事情是:{schedule_info}\n"
prompt += "{extra_info}\n"
prompt += "{prompt_personality}\n"
- prompt += "现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
+ prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n"
prompt += "刚刚你的想法是{current_thinking_info}。"
prompt += "你现在看到了网友们发的新消息:{message_new_info}\n"
prompt += "你刚刚回复了群友们:{reply_info}"
@@ -154,7 +155,7 @@ class SubHeartflow:
await observation.observe()
async def do_thinking_before_reply(
- self, message_txt: str, sender_name: str, chat_stream: ChatStream, extra_info: str, obs_id: int = None
+ self, message_txt: str, sender_info: UserInfo, chat_stream: ChatStream, extra_info: str, obs_id: int = None
):
current_thinking_info = self.current_mind
mood_info = self.current_state.mood
@@ -207,8 +208,10 @@ class SubHeartflow:
# f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
# )
relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format(
- relation_prompt, sender_name
+ relation_prompt, sender_info.user_nickname
)
+
+ sender_name_sign = f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>"
# prompt = ""
# # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
@@ -226,18 +229,24 @@ class SubHeartflow:
# prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
# prompt += f"记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{self.bot_name},{self.bot_name}指的就是你。"
+ time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
+
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
extra_info_prompt,
# prompt_schedule,
relation_prompt_all,
prompt_personality,
current_thinking_info,
+ time_now,
chat_observe_info,
mood_info,
- sender_name,
+ sender_name_sign,
message_txt,
self.bot_name,
)
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
@@ -285,16 +294,22 @@ class SubHeartflow:
message_new_info = chat_talking_prompt
reply_info = reply_content
+
+ time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format(
extra_info_prompt,
prompt_personality,
+ time_now,
chat_observe_info,
current_thinking_info,
message_new_info,
reply_info,
mood_info,
)
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
@@ -308,48 +323,6 @@ class SubHeartflow:
self.last_reply_time = time.time()
- async def judge_willing(self):
- # 开始构建prompt
- prompt_personality = "你"
- # person
- individuality = Individuality.get_instance()
-
- personality_core = individuality.personality.personality_core
- prompt_personality += personality_core
-
- personality_sides = individuality.personality.personality_sides
- random.shuffle(personality_sides)
- prompt_personality += f",{personality_sides[0]}"
-
- identity_detail = individuality.identity.identity_detail
- random.shuffle(identity_detail)
- prompt_personality += f",{identity_detail[0]}"
-
- # print("麦麦闹情绪了1")
- current_thinking_info = self.current_mind
- mood_info = self.current_state.mood
- # print("麦麦闹情绪了2")
- prompt = ""
- prompt += f"{prompt_personality}\n"
- prompt += "现在你正在上网,和qq群里的网友们聊天"
- prompt += f"你现在的想法是{current_thinking_info}。"
- prompt += f"你现在{mood_info}。"
- prompt += "现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。"
- prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
- try:
- response, reasoning_content = await self.llm_model.generate_response_async(prompt)
- # 解析willing值
- willing_match = re.search(r"<(\d+)>", response)
- except Exception as e:
- logger.error(f"意愿判断获取失败: {e}")
- willing_match = None
- if willing_match:
- self.current_state.willing = int(willing_match.group(1))
- else:
- self.current_state.willing = 0
-
- return self.current_state.willing
-
def update_current_mind(self, response):
self.past_mind.append(self.current_mind)
self.current_mind = response
diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py
index 5dc688c03..9f55b5741 100644
--- a/src/plugins/chat/message.py
+++ b/src/plugins/chat/message.py
@@ -142,14 +142,18 @@ class MessageRecv(Message):
def _generate_detailed_text(self) -> str:
"""生成详细文本,包含时间和用户信息"""
- time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ time = self.message_info.time
user_info = self.message_info.user_info
+ # name = (
+ # f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
+ # if user_info.user_cardname != None
+ # else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ # )
name = (
- f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
- if user_info.user_cardname != None
- else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
)
- return f"[{time_str}] {name}: {self.processed_plain_text}\n"
+ return f"[{time}] {name}: {self.processed_plain_text}\n"
@dataclass
@@ -239,14 +243,18 @@ class MessageProcessBase(Message):
def _generate_detailed_text(self) -> str:
"""生成详细文本,包含时间和用户信息"""
- time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ # time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
+ time = self.message_info.time
user_info = self.message_info.user_info
+ # name = (
+ # f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
+ # if user_info.user_cardname != None
+ # else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ # )
name = (
- f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
- if user_info.user_cardname != None
- else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
+ f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
)
- return f"[{time_str}] {name}: {self.processed_plain_text}\n"
+ return f"[{time}] {name}: {self.processed_plain_text}\n"
@dataclass
diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py
index f62e015b4..21e490433 100644
--- a/src/plugins/chat/message_buffer.py
+++ b/src/plugins/chat/message_buffer.py
@@ -153,11 +153,11 @@ class MessageBuffer:
# 更新当前消息的processed_plain_text
if combined_text and combined_text[0] != message.processed_plain_text and is_update:
if type == "text":
- message.processed_plain_text = "".join(combined_text)
+ message.processed_plain_text = ",".join(combined_text)
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容到当前消息")
elif type == "emoji":
combined_text.pop()
- message.processed_plain_text = "".join(combined_text)
+ message.processed_plain_text = ",".join(combined_text)
message.is_emoji = False
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容,覆盖当前emoji消息")
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index b4e2cb3c2..3b7d2fc3c 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -629,3 +629,141 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
except Exception as e:
logger.error(f"计算消息数量时出错: {str(e)}")
return 0, 0
+
+
+def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
+ """将时间戳转换为人类可读的时间格式
+
+ Args:
+ timestamp: 时间戳
+ mode: 转换模式,"normal"为标准格式,"relative"为相对时间格式
+
+ Returns:
+ str: 格式化后的时间字符串
+ """
+ if mode == "normal":
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
+ elif mode == "relative":
+ now = time.time()
+ diff = now - timestamp
+
+ if diff < 20:
+ return "刚刚:"
+ elif diff < 60:
+ return f"{int(diff)}秒前:"
+ elif diff < 1800:
+ return f"{int(diff / 60)}分钟前:"
+ elif diff < 3600:
+ return f"{int(diff / 60)}分钟前:\n"
+ elif diff < 86400:
+ return f"{int(diff / 3600)}小时前:\n"
+ elif diff < 604800:
+ return f"{int(diff / 86400)}天前:\n"
+ else:
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":"
+
+def parse_text_timestamps(text: str, mode: str = "normal") -> str:
+ """解析文本中的时间戳并转换为可读时间格式
+
+ Args:
+ text: 包含时间戳的文本,时间戳应以[]包裹
+ mode: 转换模式,传递给translate_timestamp_to_human_readable,"normal"或"relative"
+
+ Returns:
+ str: 替换后的文本
+
+ 转换规则:
+ - normal模式: 将文本中所有时间戳转换为可读格式
+ - lite模式:
+ - 第一个和最后一个时间戳必须转换
+ - 以5秒为间隔划分时间段,每段最多转换一个时间戳
+ - 不转换的时间戳替换为空字符串
+ """
+ # 匹配[数字]或[数字.数字]格式的时间戳
+ pattern = r'\[(\d+(?:\.\d+)?)\]'
+
+ # 找出所有匹配的时间戳
+ matches = list(re.finditer(pattern, text))
+
+ if not matches:
+ return text
+
+ # normal模式: 直接转换所有时间戳
+ if mode == "normal":
+ result_text = text
+ for match in matches:
+ timestamp = float(match.group(1))
+ readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
+ # 由于替换会改变文本长度,需要使用正则替换而非直接替换
+ pattern_instance = re.escape(match.group(0))
+ result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
+ return result_text
+ else:
+ # lite模式: 按5秒间隔划分并选择性转换
+ result_text = text
+
+ # 提取所有时间戳及其位置
+ timestamps = [(float(m.group(1)), m) for m in matches]
+ timestamps.sort(key=lambda x: x[0]) # 按时间戳升序排序
+
+ if not timestamps:
+ return text
+
+ # 获取第一个和最后一个时间戳
+ first_timestamp, first_match = timestamps[0]
+ last_timestamp, last_match = timestamps[-1]
+
+ # 将时间范围划分成5秒间隔的时间段
+ time_segments = {}
+
+ # 对所有时间戳按15秒间隔分组
+ for ts, match in timestamps:
+ segment_key = int(ts // 15) # 将时间戳除以15取整,作为时间段的键
+ if segment_key not in time_segments:
+ time_segments[segment_key] = []
+ time_segments[segment_key].append((ts, match))
+
+ # 记录需要转换的时间戳
+ to_convert = []
+
+ # 从每个时间段中选择一个时间戳进行转换
+ for segment, segment_timestamps in time_segments.items():
+ # 选择这个时间段中的第一个时间戳
+ to_convert.append(segment_timestamps[0])
+
+ # 确保第一个和最后一个时间戳在转换列表中
+ first_in_list = False
+ last_in_list = False
+
+ for ts, match in to_convert:
+ if ts == first_timestamp:
+ first_in_list = True
+ if ts == last_timestamp:
+ last_in_list = True
+
+ if not first_in_list:
+ to_convert.append((first_timestamp, first_match))
+ if not last_in_list:
+ to_convert.append((last_timestamp, last_match))
+
+ # 创建需要转换的时间戳集合,用于快速查找
+ to_convert_set = {match.group(0) for _, match in to_convert}
+
+ # 首先替换所有不需要转换的时间戳为空字符串
+ for ts, match in timestamps:
+ if match.group(0) not in to_convert_set:
+ pattern_instance = re.escape(match.group(0))
+ result_text = re.sub(pattern_instance, "", result_text, count=1)
+
+ # 按照时间戳原始顺序排序,避免替换时位置错误
+ to_convert.sort(key=lambda x: x[1].start())
+
+ # 执行替换
+ # 由于替换会改变文本长度,从后向前替换
+ to_convert.reverse()
+ for ts, match in to_convert:
+ readable_time = translate_timestamp_to_human_readable(ts, "relative")
+ pattern_instance = re.escape(match.group(0))
+ result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
+
+ return result_text
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
index 2e3a74693..1e8e844eb 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
@@ -235,6 +235,7 @@ class ThinkFlowChat:
do_reply = False
if random() < reply_probability:
try:
+
do_reply = True
# 回复前处理
@@ -258,7 +259,7 @@ class ThinkFlowChat:
await heartflow.get_subheartflow(chat.stream_id).do_observe()
except Exception as e:
logger.error(f"心流观察失败: {e}")
- traceback.print_exc()
+ logger.error(traceback.format_exc())
info_catcher.catch_after_observe(timing_results["观察"])
@@ -329,13 +330,17 @@ class ThinkFlowChat:
chat.stream_id
).do_thinking_before_reply(
message_txt=message.processed_plain_text,
- sender_name=message.message_info.user_info.user_nickname,
+ sender_info=message.message_info.user_info,
chat_stream=chat,
obs_id=get_mid_memory_id,
extra_info=tool_result_info,
)
except Exception as e:
logger.error(f"心流思考前脑内状态失败: {e}")
+ logger.error(traceback.format_exc())
+ # 确保变量被定义,即使在错误情况下
+ current_mind = ""
+ past_mind = ""
info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"], past_mind, current_mind)
@@ -373,6 +378,7 @@ class ThinkFlowChat:
except Exception as e:
logger.error(f"心流处理表情包失败: {e}")
+ # 思考后脑内状态更新
try:
with Timer("思考后脑内状态更新", timing_results):
stream_id = message.chat_stream.stream_id
@@ -387,9 +393,43 @@ class ThinkFlowChat:
)
except Exception as e:
logger.error(f"心流思考后脑内状态更新失败: {e}")
+ logger.error(traceback.format_exc())
# 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
+
+ # 处理认识关系
+ try:
+ is_known = await relationship_manager.is_known_some_one(
+ message.message_info.platform,
+ message.message_info.user_info.user_id
+ )
+ if not is_known:
+ logger.info(f"首次认识用户: {message.message_info.user_info.user_nickname}")
+ await relationship_manager.first_knowing_some_one(
+ message.message_info.platform,
+ message.message_info.user_info.user_id,
+ message.message_info.user_info.user_nickname,
+ message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
+ ""
+ )
+ else:
+ logger.debug(f"已认识用户: {message.message_info.user_info.user_nickname}")
+ if not await relationship_manager.is_qved_name(
+ message.message_info.platform,
+ message.message_info.user_info.user_id
+ ):
+ logger.info(f"更新已认识但未取名的用户: {message.message_info.user_info.user_nickname}")
+ await relationship_manager.first_knowing_some_one(
+ message.message_info.platform,
+ message.message_info.user_info.user_id,
+ message.message_info.user_info.user_nickname,
+ message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
+ ""
+ )
+ except Exception as e:
+ logger.error(f"处理认识关系失败: {e}")
+ logger.error(traceback.format_exc())
except Exception as e:
logger.error(f"心流处理消息失败: {e}")
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
index 325ecd5c6..6f6c8bf26 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
@@ -100,15 +100,17 @@ class ResponseGenerator:
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
- if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
- sender_name = (
- f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
- f"{message.chat_stream.user_info.user_cardname}"
- )
- elif message.chat_stream.user_info.user_nickname:
- sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
- else:
- sender_name = f"用户({message.chat_stream.user_info.user_id})"
+ # if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
+ # sender_name = (
+ # f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
+ # f"{message.chat_stream.user_info.user_cardname}"
+ # )
+ # elif message.chat_stream.user_info.user_nickname:
+ # sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
+ # else:
+ # sender_name = f"用户({message.chat_stream.user_info.user_id})"
+
+ sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
# 构建prompt
with Timer() as t_build_prompt:
@@ -119,14 +121,7 @@ class ResponseGenerator:
sender_name=sender_name,
stream_id=message.chat_stream.stream_id,
)
- elif mode == "simple":
- prompt = await prompt_builder._build_prompt_simple(
- message.chat_stream,
- message_txt=message.processed_plain_text,
- sender_name=sender_name,
- stream_id=message.chat_stream.stream_id,
- )
- logger.info(f"构建{mode}prompt时间: {t_build_prompt.human_readable}")
+ logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
@@ -141,49 +136,6 @@ class ResponseGenerator:
return content
- async def _check_response_with_model(
- self, message: MessageRecv, content: str, model: LLM_request, thinking_id: str
- ) -> str:
- _info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
-
- sender_name = ""
- if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
- sender_name = (
- f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
- f"{message.chat_stream.user_info.user_cardname}"
- )
- elif message.chat_stream.user_info.user_nickname:
- sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
- else:
- sender_name = f"用户({message.chat_stream.user_info.user_id})"
-
- # 构建prompt
- with Timer() as t_build_prompt_check:
- prompt = await prompt_builder._build_prompt_check_response(
- message.chat_stream,
- message_txt=message.processed_plain_text,
- sender_name=sender_name,
- stream_id=message.chat_stream.stream_id,
- content=content,
- )
- logger.info(f"构建check_prompt: {prompt}")
- logger.info(f"构建check_prompt时间: {t_build_prompt_check.human_readable}")
-
- try:
- checked_content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
-
- # info_catcher.catch_after_llm_generated(
- # prompt=prompt,
- # response=content,
- # reasoning_content=reasoning_content,
- # model_name=self.current_model_name)
-
- except Exception:
- logger.exception("检查回复时出错")
- return None
-
- return checked_content
-
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
"""提取情感标签,结合立场和情绪"""
try:
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
index ed7ca72f3..29863ba72 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
@@ -8,7 +8,8 @@ from src.common.logger import get_module_logger
from ....individuality.individuality import Individuality
from src.heart_flow.heartflow import heartflow
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
-
+from src.plugins.person_info.relationship_manager import relationship_manager
+from src.plugins.chat.utils import parse_text_timestamps
logger = get_module_logger("prompt")
@@ -160,7 +161,10 @@ class PromptBuilder:
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
-
+
+ prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
+ prompt = parse_text_timestamps(prompt, mode="lite")
+
return prompt
async def _build_prompt_simple(
diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py
index d0a209d35..8238078c2 100644
--- a/src/plugins/config/config.py
+++ b/src/plugins/config/config.py
@@ -26,9 +26,9 @@ config_config = LogConfig(
logger = get_module_logger("config", config=config_config)
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
-is_test = False
-mai_version_main = "0.6.2"
-mai_version_fix = ""
+is_test = True
+mai_version_main = "0.6.3"
+mai_version_fix = "snapshot-1"
if mai_version_fix:
if is_test:
diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py
index 1eb1d28dd..068c37d07 100644
--- a/src/plugins/person_info/person_info.py
+++ b/src/plugins/person_info/person_info.py
@@ -6,6 +6,9 @@ from typing import Any, Callable, Dict
import datetime
import asyncio
import numpy as np
+from src.plugins.models.utils_model import LLM_request
+from src.plugins.config.config import global_config
+from src.individuality.individuality import Individuality
import matplotlib
@@ -13,6 +16,8 @@ matplotlib.use("Agg")
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
+import json
+import re
"""
@@ -32,6 +37,8 @@ logger = get_module_logger("person_info")
person_info_default = {
"person_id": None,
+ "person_name": None,
+ "name_reason": None,
"platform": None,
"user_id": None,
"nickname": None,
@@ -48,16 +55,46 @@ person_info_default = {
class PersonInfoManager:
def __init__(self):
+ self.person_name_list = {}
+ self.qv_name_llm = LLM_request(
+ model=global_config.llm_normal,
+ max_tokens=256,
+ request_type="qv_name",
+ )
if "person_info" not in db.list_collection_names():
db.create_collection("person_info")
db.person_info.create_index("person_id", unique=True)
+
+ # 初始化时读取所有person_name
+ cursor = db.person_info.find(
+ {"person_name": {"$exists": True}},
+ {"person_id": 1, "person_name": 1, "_id": 0}
+ )
+ for doc in cursor:
+ if doc.get("person_name"):
+ self.person_name_list[doc["person_id"]] = doc["person_name"]
+ logger.debug(f"已加载 {len(self.person_name_list)} 个用户名称")
def get_person_id(self, platform: str, user_id: int):
"""获取唯一id"""
+ #如果platform中存在-,就截取-后面的部分
+ if "-" in platform:
+ platform = platform.split("-")[1]
+
components = [platform, str(user_id)]
key = "_".join(components)
return hashlib.md5(key.encode()).hexdigest()
+ def is_person_known(self, platform: str, user_id: int):
+ """判断是否认识某人"""
+ person_id = self.get_person_id(platform, user_id)
+ document = db.person_info.find_one({"person_id": person_id})
+ if document:
+ return True
+ else:
+ return False
+
+
async def create_person_info(self, person_id: str, data: dict = None):
"""创建一个项"""
if not person_id:
@@ -88,6 +125,109 @@ class PersonInfoManager:
Data[field_name] = value
logger.debug(f"更新时{person_id}不存在,已新建")
await self.create_person_info(person_id, Data)
+
+ async def has_one_field(self, person_id: str, field_name: str):
+ """判断是否存在某一个字段"""
+ document = db.person_info.find_one({"person_id": person_id}, {field_name: 1})
+ if document:
+ return True
+ else:
+ return False
+
+ def _extract_json_from_text(self, text: str) -> dict:
+ """从文本中提取JSON数据的高容错方法"""
+ try:
+
+ # 尝试直接解析
+ return json.loads(text)
+ except json.JSONDecodeError:
+ try:
+ # 尝试找到JSON格式的部分
+ json_pattern = r'\{[^{}]*\}'
+ matches = re.findall(json_pattern, text)
+ if matches:
+ return json.loads(matches[0])
+
+ # 如果上面都失败了,尝试提取键值对
+ nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
+ reason_pattern = r'"reason"[:\s]+"([^"]+)"'
+
+ nickname_match = re.search(nickname_pattern, text)
+ reason_match = re.search(reason_pattern, text)
+
+ if nickname_match:
+ return {
+ "nickname": nickname_match.group(1),
+ "reason": reason_match.group(1) if reason_match else "未提供理由"
+ }
+ except Exception as e:
+ logger.error(f"JSON提取失败: {str(e)}")
+
+ # 如果所有方法都失败了,返回空结果
+ return {"nickname": "", "reason": ""}
+
+ async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
+ """给某个用户取名"""
+ if not person_id:
+ logger.debug("取名失败:person_id不能为空")
+ return
+
+ old_name = await self.get_value(person_id, "person_name")
+ old_reason = await self.get_value(person_id, "name_reason")
+
+ max_retries = 5 # 最大重试次数
+ current_try = 0
+ existing_names = ""
+ while current_try < max_retries:
+ individuality = Individuality.get_instance()
+ prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
+ bot_name = individuality.personality.bot_nickname
+
+ qv_name_prompt = f"你是{bot_name},你{prompt_personality}"
+ qv_name_prompt += f"现在你想给一个用户取一个昵称,用户是的qq昵称是{user_nickname},"
+ qv_name_prompt += f"用户的qq群昵称名是{user_cardname},"
+ if user_avatar:
+ qv_name_prompt += f"用户的qq头像是{user_avatar},"
+ if old_name:
+ qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason},"
+
+ qv_name_prompt += "\n请根据以上用户信息,想想你叫他什么比较好,请最好使用用户的qq昵称,可以稍作修改"
+ if existing_names:
+ qv_name_prompt += f"\n请注意,以下名称已被使用,不要使用以下昵称:{existing_names}。\n"
+ qv_name_prompt += "请用json给出你的想法,并给出理由,示例如下:"
+ qv_name_prompt += '''{
+ "nickname": "昵称",
+ "reason": "理由"
+ }'''
+ logger.debug(f"取名提示词:{qv_name_prompt}")
+ response = await self.qv_name_llm.generate_response(qv_name_prompt)
+ logger.debug(f"取名回复:{response}")
+ result = self._extract_json_from_text(response[0])
+
+ if not result["nickname"]:
+ logger.error("生成的昵称为空,重试中...")
+ current_try += 1
+ continue
+
+ # 检查生成的昵称是否已存在
+ if result["nickname"] not in self.person_name_list.values():
+ # 更新数据库和内存中的列表
+ await self.update_one_field(person_id, "person_name", result["nickname"])
+ # await self.update_one_field(person_id, "nickname", user_nickname)
+ # await self.update_one_field(person_id, "avatar", user_avatar)
+ await self.update_one_field(person_id, "name_reason", result["reason"])
+
+ self.person_name_list[person_id] = result["nickname"]
+ logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}")
+ return result
+ else:
+ existing_names += f"{result['nickname']}、"
+
+ logger.debug(f"生成的昵称 {result['nickname']} 已存在,重试中...")
+ current_try += 1
+
+ logger.error(f"在{max_retries}次尝试后仍未能生成唯一昵称")
+ return None
async def del_one_document(self, person_id: str):
"""删除指定 person_id 的文档"""
diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py
index 23ae7f6c8..673e0b07f 100644
--- a/src/plugins/person_info/relationship_manager.py
+++ b/src/plugins/person_info/relationship_manager.py
@@ -4,6 +4,8 @@ import math
from bson.decimal128 import Decimal128
from .person_info import person_info_manager
import time
+import re
+import traceback
relationship_config = LogConfig(
# 使用关系专用样式
@@ -74,6 +76,61 @@ class RelationshipManager:
return mood_value * coefficient
else:
return mood_value / coefficient
+
+ async def is_known_some_one(self, platform , user_id):
+ """判断是否认识某人"""
+ is_known = person_info_manager.is_person_known(platform, user_id)
+ return is_known
+
+ async def is_qved_name(self, platform , user_id):
+ """判断是否认识某人"""
+ person_id = person_info_manager.get_person_id(platform, user_id)
+ is_qved = await person_info_manager.has_one_field(person_id, "person_name")
+ old_name = await person_info_manager.get_value(person_id, "person_name")
+ print(f"old_name: {old_name}")
+ print(f"is_qved: {is_qved}")
+ if is_qved and old_name != None:
+ return True
+ else:
+ return False
+
+ async def first_knowing_some_one(self, platform , user_id, user_nickname, user_cardname, user_avatar):
+ """判断是否认识某人"""
+ person_id = person_info_manager.get_person_id(platform,user_id)
+ await person_info_manager.update_one_field(person_id, "nickname", user_nickname)
+ # await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname)
+ # await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar)
+ await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar)
+
+ async def convert_all_person_sign_to_person_name(self,input_text:str):
+ """将所有人的格式转换为person_name"""
+ try:
+ # 使用正则表达式匹配格式
+ all_person = person_info_manager.person_name_list
+
+ pattern = r'<([^:]+):(\d+):([^:]+):([^>]+)>'
+ matches = re.findall(pattern, input_text)
+
+ # 遍历匹配结果,将替换为person_name
+ result_text = input_text
+ for platform, user_id, nickname, cardname in matches:
+ person_id = person_info_manager.get_person_id(platform, user_id)
+ # 默认使用昵称作为人名
+ person_name = nickname.strip() if nickname.strip() else cardname.strip()
+
+ if person_id in all_person:
+ if all_person[person_id] != None:
+ person_name = all_person[person_id]
+
+ print(f"将<{platform}:{user_id}:{nickname}:{cardname}>替换为{person_name}")
+
+
+ result_text = result_text.replace(f"<{platform}:{user_id}:{nickname}:{cardname}>", person_name)
+
+ return result_text
+ except Exception as e:
+ logger.error(traceback.format_exc())
+ return input_text
async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> tuple:
"""计算并变更关系值
diff --git a/src/plugins/remote/remote.py b/src/plugins/remote/remote.py
index a2084435f..bc7437057 100644
--- a/src/plugins/remote/remote.py
+++ b/src/plugins/remote/remote.py
@@ -126,6 +126,7 @@ def main():
"""主函数,启动心跳线程"""
# 配置
SERVER_URL = "http://hyybuth.xyz:10058"
+ # SERVER_URL = "http://localhost:10058"
HEARTBEAT_INTERVAL = 300 # 5分钟(秒)
# 创建并启动心跳线程
From a20ca1d0f2bfd5d6aca2ad94b880f74e04e34c3c Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Wed, 16 Apr 2025 01:43:36 +0800
Subject: [PATCH 014/406] =?UTF-8?q?=E4=BF=AE=E6=94=B9bug?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_mxp.py | 26 +++++++++++++++++---------
1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index e0e233aac..f2df94288 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -61,7 +61,7 @@ class MxpWillingManager(BaseWillingManager):
"""异步任务启动器"""
asyncio.create_task(self._return_to_basic_willing())
asyncio.create_task(self._chat_new_message_to_change_basic_willing())
- asyncio.create_task(self.fatigue_attenuation())
+ asyncio.create_task(self._fatigue_attenuation())
async def before_generate_reply_handle(self, message_id: str):
"""回复前处理"""
@@ -77,7 +77,7 @@ class MxpWillingManager(BaseWillingManager):
time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
if w_info.chat_id not in self.chat_fatigue_punishment_list:
self.chat_fatigue_punishment_list[w_info.chat_id] = []
- self.chat_fatigue_punishment_list[w_info.chat_id].append(current_time, time_interval * 2)
+ self.chat_fatigue_punishment_list[w_info.chat_id].append([current_time, time_interval * 2])
async def after_generate_reply_handle(self, message_id: str):
"""回复后处理"""
@@ -120,9 +120,10 @@ class MxpWillingManager(BaseWillingManager):
self.logger.debug(f"基础意愿值:{current_willing}")
if w_info.is_mentioned_bot:
- current_willing += self.mention_willing_gain / (int(current_willing) + 1)
+ current_willing_ = self.mention_willing_gain / (int(current_willing) + 1)
+ current_willing += current_willing_
if self.is_debug:
- self.logger.debug(f"提及增益:{self.mention_willing_gain / (int(current_willing) + 1)}")
+ self.logger.debug(f"提及增益:{current_willing_}")
if w_info.interested_rate > 0:
current_willing += math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
@@ -153,15 +154,20 @@ class MxpWillingManager(BaseWillingManager):
chat_person_ogoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id]
if len(chat_person_ogoing_messages) >= 2:
current_willing = 0
+ if self.is_debug:
+ self.logger.debug("进行中消息惩罚:归0")
elif len(chat_ongoing_messages) == 2:
current_willing -= 0.5
+ if self.is_debug:
+ self.logger.debug("进行中消息惩罚:-0.5")
elif len(chat_ongoing_messages) == 3:
current_willing -= 1.5
+ if self.is_debug:
+ self.logger.debug("进行中消息惩罚:-1.5")
elif len(chat_ongoing_messages) >= 4:
current_willing = 0
- else:
if self.is_debug:
- self.logger.debug("无进行中消息惩罚")
+ self.logger.debug("进行中消息惩罚:归0")
probability = self._willing_to_probability(current_willing)
@@ -229,8 +235,8 @@ class MxpWillingManager(BaseWillingManager):
async def _chat_new_message_to_change_basic_willing(self):
"""聊天流新消息改变基础意愿"""
+ update_time = 20
while True:
- update_time = 20
await asyncio.sleep(update_time)
async with self.lock:
for chat_id, message_times in self.chat_new_message_time.items():
@@ -253,6 +259,8 @@ class MxpWillingManager(BaseWillingManager):
else:
self.logger.debug(f"聊天流{chat_id}消息时间数量异常,数量:{len(message_times)}")
self.chat_reply_willing[chat_id] = 0
+ if self.is_debug:
+ self.logger.debug(f"聊天流意愿值更新:{self.chat_reply_willing}")
def _get_relationship_level_num(self, relationship_value) -> int:
@@ -278,11 +286,11 @@ class MxpWillingManager(BaseWillingManager):
return math.tan(t * self.expected_replies_per_min * math.pi
/ 120 / self.number_of_message_storage) / 2
- async def fatigue_attenuation(self):
+ async def _fatigue_attenuation(self):
"""疲劳衰减"""
while True:
- current_time = time.time()
await asyncio.sleep(1)
+ current_time = time.time()
async with self.lock:
for chat_id, fatigue_list in self.chat_fatigue_punishment_list.items():
fatigue_list = [z for z in fatigue_list if current_time - z[0] < z[1]]
From 0bba90b48cb011ef50f8d043ef22de78d5c17239 Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Wed, 16 Apr 2025 10:31:27 +0800
Subject: [PATCH 015/406] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=88=9D=E5=A7=8B?=
=?UTF-8?q?=E8=A1=B0=E5=87=8F?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_mxp.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index f2df94288..f063f6f24 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -75,8 +75,6 @@ class MxpWillingManager(BaseWillingManager):
self.chat_bot_message_time[w_info.chat_id].append(current_time)
if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num):
time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
- if w_info.chat_id not in self.chat_fatigue_punishment_list:
- self.chat_fatigue_punishment_list[w_info.chat_id] = []
self.chat_fatigue_punishment_list[w_info.chat_id].append([current_time, time_interval * 2])
async def after_generate_reply_handle(self, message_id: str):
@@ -99,7 +97,7 @@ class MxpWillingManager(BaseWillingManager):
async with self.lock:
w_info = self.ongoing_messages[message_id]
if w_info.is_mentioned_bot:
- self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += 0.2
+ self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.mention_willing_gain / 2.5
if (
w_info.chat_id in self.last_response_person
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
@@ -216,12 +214,17 @@ class MxpWillingManager(BaseWillingManager):
self.ongoing_messages[message.message_info.message_id].person_id, self.chat_reply_willing[chat.stream_id]
)
+ current_time = time.time()
if chat.stream_id not in self.chat_new_message_time:
self.chat_new_message_time[chat.stream_id] = []
- self.chat_new_message_time[chat.stream_id].append(time.time())
+ self.chat_new_message_time[chat.stream_id].append(current_time)
if len(self.chat_new_message_time[chat.stream_id]) > self.number_of_message_storage:
self.chat_new_message_time[chat.stream_id].pop(0)
+ if chat.stream_id not in self.chat_fatigue_punishment_list:
+ self.chat_fatigue_punishment_list[chat.stream_id] = [current_time,
+ self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60]
+
def _willing_to_probability(self, willing: float) -> float:
"""意愿值转化为概率"""
willing = max(0, willing)
From def7e7a000b12af903e915b9d3adf0e078261c32 Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Wed, 16 Apr 2025 11:29:43 +0800
Subject: [PATCH 016/406] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=B8=8A=E6=AC=A1?=
=?UTF-8?q?=E6=8F=90=E4=BA=A4=E4=BA=A7=E7=94=9F=E7=9A=84bug?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_mxp.py | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index f063f6f24..82b5850ae 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -85,9 +85,9 @@ class MxpWillingManager(BaseWillingManager):
rel_level = self._get_relationship_level_num(rel_value)
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += rel_level * 0.05
- now_chat_new_person = self.last_response_person.get(w_info.chat_id, ["", 0])
+ now_chat_new_person = self.last_response_person.get(w_info.chat_id, [w_info.person_id, 0])
if now_chat_new_person[0] == w_info.person_id:
- if now_chat_new_person[1] < 2:
+ if now_chat_new_person[1] < 3:
now_chat_new_person[1] += 1
else:
self.last_response_person[w_info.chat_id] = [w_info.person_id, 0]
@@ -101,9 +101,10 @@ class MxpWillingManager(BaseWillingManager):
if (
w_info.chat_id in self.last_response_person
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
+ and self.last_response_person[w_info.chat_id][1]
):
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.single_chat_gain * (
- 2 * self.last_response_person[w_info.chat_id][1] + 1
+ 2 * self.last_response_person[w_info.chat_id][1] - 1
)
now_chat_new_person = self.last_response_person.get(w_info.chat_id, ["", 0])
if now_chat_new_person[0] != w_info.person_id:
@@ -133,16 +134,17 @@ class MxpWillingManager(BaseWillingManager):
rel_value = await w_info.person_info_manager.get_value(w_info.person_id, "relationship_value")
rel_level = self._get_relationship_level_num(rel_value)
current_willing += rel_level * 0.1
- if self.is_debug:
+ if self.is_debug and rel_level != 0:
self.logger.debug(f"关系增益:{rel_level * 0.1}")
if (
w_info.chat_id in self.last_response_person
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
+ and self.last_response_person[w_info.chat_id][1]
):
current_willing += self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)
if self.is_debug:
- self.logger.debug(f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}")
+ self.logger.debug(f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] - 1)}")
current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
if self.is_debug:
@@ -222,8 +224,12 @@ class MxpWillingManager(BaseWillingManager):
self.chat_new_message_time[chat.stream_id].pop(0)
if chat.stream_id not in self.chat_fatigue_punishment_list:
- self.chat_fatigue_punishment_list[chat.stream_id] = [current_time,
- self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60]
+ self.chat_fatigue_punishment_list[chat.stream_id] = [
+ (current_time, self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60)
+ ]
+ self.chat_fatigue_willing_attenuation[chat.stream_id] = - 2 * self.basic_maximum_willing
+
+
def _willing_to_probability(self, willing: float) -> float:
"""意愿值转化为概率"""
From cce0c65ff567bf41e0016a8a88c1d8106536ca65 Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Wed, 16 Apr 2025 11:38:39 +0800
Subject: [PATCH 017/406] =?UTF-8?q?=E6=BC=8F=E4=BA=86=E4=B8=80=E5=A4=84?=
=?UTF-8?q?=E5=8F=82=E6=95=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/willing/mode_mxp.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/plugins/willing/mode_mxp.py b/src/plugins/willing/mode_mxp.py
index 82b5850ae..8ed3b60e4 100644
--- a/src/plugins/willing/mode_mxp.py
+++ b/src/plugins/willing/mode_mxp.py
@@ -227,7 +227,7 @@ class MxpWillingManager(BaseWillingManager):
self.chat_fatigue_punishment_list[chat.stream_id] = [
(current_time, self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60)
]
- self.chat_fatigue_willing_attenuation[chat.stream_id] = - 2 * self.basic_maximum_willing
+ self.chat_fatigue_willing_attenuation[chat.stream_id] = - 2 * self.basic_maximum_willing * self.fatigue_coefficient
From 1aad7f4f6e141ba001bdaa691ee3f4854dd6affc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Wed, 16 Apr 2025 15:04:00 +0900
Subject: [PATCH 018/406] =?UTF-8?q?refactor:=20=E7=A7=BB=E9=99=A4MongoDBMe?=
=?UTF-8?q?ssageStorage=E7=B1=BB=E4=B8=AD=E7=9A=84self.db=E5=BC=95?=
=?UTF-8?q?=E7=94=A8=EF=BC=8C=E7=9B=B4=E6=8E=A5=E4=BD=BF=E7=94=A8db?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/PFC/message_storage.py | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/src/plugins/PFC/message_storage.py b/src/plugins/PFC/message_storage.py
index 55bccb14e..b57f5d2b5 100644
--- a/src/plugins/PFC/message_storage.py
+++ b/src/plugins/PFC/message_storage.py
@@ -50,21 +50,18 @@ class MessageStorage(ABC):
class MongoDBMessageStorage(MessageStorage):
"""MongoDB消息存储实现"""
- def __init__(self):
- self.db = db
-
async def get_messages_after(self, chat_id: str, message_time: float) -> List[Dict[str, Any]]:
query = {"chat_id": chat_id}
# print(f"storage_check_message: {message_time}")
query["time"] = {"$gt": message_time}
- return list(self.db.messages.find(query).sort("time", 1))
+ return list(db.messages.find(query).sort("time", 1))
async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
query = {"chat_id": chat_id, "time": {"$lt": time_point}}
- messages = list(self.db.messages.find(query).sort("time", -1).limit(limit))
+ messages = list(db.messages.find(query).sort("time", -1).limit(limit))
# 将消息按时间正序排列
messages.reverse()
@@ -73,7 +70,7 @@ class MongoDBMessageStorage(MessageStorage):
async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
query = {"chat_id": chat_id, "time": {"$gt": after_time}}
- return self.db.messages.find_one(query) is not None
+ return db.messages.find_one(query) is not None
# # 创建一个内存消息存储实现,用于测试
From 4501e19dc8d4edb06d361f597557e882a44c6d16 Mon Sep 17 00:00:00 2001
From: meng_xi_pan
Date: Wed, 16 Apr 2025 14:06:16 +0800
Subject: [PATCH 019/406] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=9B=A0=E4=B8=BA?=
=?UTF-8?q?=E6=B6=88=E6=81=AF=E7=B1=BB=E5=9E=8B=E7=BB=93=E6=9E=84=E5=8F=98?=
=?UTF-8?q?=E5=8A=A8=E8=80=8C=E5=AF=BC=E8=87=B4buffer=E5=BC=82=E5=B8=B8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat/message_buffer.py | 24 +++++++++++++++----
.../reasoning_chat/reasoning_chat.py | 14 ++++++++---
.../think_flow_chat/think_flow_chat.py | 14 ++++++++---
3 files changed, 41 insertions(+), 11 deletions(-)
diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py
index 21e490433..c2cfaa826 100644
--- a/src/plugins/chat/message_buffer.py
+++ b/src/plugins/chat/message_buffer.py
@@ -3,7 +3,7 @@ from src.common.logger import get_module_logger
import asyncio
from dataclasses import dataclass, field
from .message import MessageRecv
-from ..message.message_base import BaseMessageInfo, GroupInfo
+from ..message.message_base import BaseMessageInfo, GroupInfo, Seg
import hashlib
from typing import Dict
from collections import OrderedDict
@@ -130,22 +130,36 @@ class MessageBuffer:
keep_msgs = OrderedDict()
combined_text = []
found = False
- type = "text"
+ type = "seglist"
is_update = True
for msg_id, msg in self.buffer_pool[person_id_].items():
if msg_id == message.message_info.message_id:
found = True
- type = msg.message.message_segment.type
+ if msg.message.message_segment.type != "seglist":
+ type = msg.message.message_segment.type
+ else:
+ if (isinstance(msg.message.message_segment.data, list)
+ and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
+ and len(msg.message.message_segment.data) == 1):
+ type = msg.message.message_segment.data[0].type
combined_text.append(msg.message.processed_plain_text)
continue
if found:
keep_msgs[msg_id] = msg
elif msg.result == "F":
# 收集F消息的文本内容
+ F_type = "seglist"
+ if msg.message.message_segment.type != "seglist":
+ F_type = msg.message.message_segment.type
+ else:
+ if (isinstance(msg.message.message_segment.data, list)
+ and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
+ and len(msg.message.message_segment.data) == 1):
+ F_type = msg.message.message_segment.data[0].type
if hasattr(msg.message, "processed_plain_text") and msg.message.processed_plain_text:
- if msg.message.message_segment.type == "text":
+ if F_type == "text":
combined_text.append(msg.message.processed_plain_text)
- elif msg.message.message_segment.type != "text":
+ elif F_type != "text":
is_update = False
elif msg.result == "U":
logger.debug(f"异常未处理信息id: {msg.message.message_info.message_id}")
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index 2ce218a6f..acc381f80 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -192,11 +192,19 @@ class ReasoningChat:
if not buffer_result:
await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
willing_manager.delete(message.message_info.message_id)
- if message.message_segment.type == "text":
+ F_type = "seglist"
+ if message.message_segment.type != "seglist":
+ F_type =message.message_segment.type
+ else:
+ if (isinstance(message.message_segment.data, list)
+ and all(isinstance(x, Seg) for x in message.message_segment.data)
+ and len(message.message_segment.data) == 1):
+ F_type = message.message_segment.data[0].type
+ if F_type == "text":
logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}")
- elif message.message_segment.type == "image":
+ elif F_type == "image":
logger.info("触发缓冲,已炸飞表情包/图片")
- elif message.message_segment.type == "seglist":
+ elif F_type == "seglist":
logger.info("触发缓冲,已炸飞消息列")
return
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
index 1e8e844eb..74d88dd4d 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
@@ -204,11 +204,19 @@ class ThinkFlowChat:
if not buffer_result:
await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
willing_manager.delete(message.message_info.message_id)
- if message.message_segment.type == "text":
+ F_type = "seglist"
+ if message.message_segment.type != "seglist":
+ F_type =message.message_segment.type
+ else:
+ if (isinstance(message.message_segment.data, list)
+ and all(isinstance(x, Seg) for x in message.message_segment.data)
+ and len(message.message_segment.data) == 1):
+ F_type = message.message_segment.data[0].type
+ if F_type == "text":
logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}")
- elif message.message_segment.type == "image":
+ elif F_type == "image":
logger.info("触发缓冲,已炸飞表情包/图片")
- elif message.message_segment.type == "seglist":
+ elif F_type == "seglist":
logger.info("触发缓冲,已炸飞消息列")
return
From 5421e625394edb28692d4c317c568d2e4639c2ad Mon Sep 17 00:00:00 2001
From: tcmofashi
Date: Wed, 16 Apr 2025 14:12:24 +0800
Subject: [PATCH 020/406] =?UTF-8?q?feat:=20=E6=94=AF=E6=8C=81=E5=AF=B9?=
=?UTF-8?q?=E6=B6=88=E6=81=AF=E6=B7=BB=E5=8A=A0is=5Fmentioned=E9=A1=B9?=
=?UTF-8?q?=E7=9B=AE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat/utils.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index b07c33c39..5c0c4df8d 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -46,6 +46,16 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
is_at = False
is_mentioned = False
+ if "is_mentioned" in message.message_info.additional_config.keys():
+ try:
+ reply_probability = float(message.message_info.additional_config.get("is_mentioned"))
+ is_mentioned = True
+ return is_mentioned, reply_probability
+ except Exception as e:
+ logger.warning(
+ f"消息中包含不合理的设置 is_mentioned: {message.message_info.additional_config.get('is_mentioned')}"
+ )
+
# 判断是否被@
if re.search(f"@[\s\S]*?(id:{global_config.BOT_QQ})", message.processed_plain_text):
is_at = True
From a8d48fc6cfad652daaf82f967b9ee328f0d22c82 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Wed, 16 Apr 2025 16:16:37 +0900
Subject: [PATCH 021/406] =?UTF-8?q?style:=20=E6=A0=BC=E5=BC=8F=E5=8C=96?=
=?UTF-8?q?=E4=BB=A3=E7=A0=81=EF=BC=8C=E4=BF=AE=E5=A4=8D=E4=B8=8D=E4=B8=80?=
=?UTF-8?q?=E8=87=B4=E7=9A=84=E7=A9=BA=E6=A0=BC=E5=92=8C=E6=B3=A8=E9=87=8A?=
=?UTF-8?q?=EF=BC=8C=E6=9B=B4=E6=96=B0ruff=20action?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/ruff.yml | 5 +-
src/heart_flow/observation.py | 2 +-
src/heart_flow/sub_heartflow.py | 14 +++--
src/plugins/chat/message.py | 8 +--
src/plugins/chat/utils.py | 61 ++++++++++---------
.../think_flow_chat/think_flow_chat.py | 19 +++---
.../think_flow_chat/think_flow_generator.py | 2 +-
.../think_flow_prompt_builder.py | 5 +-
src/plugins/person_info/person_info.py | 51 +++++++---------
.../person_info/relationship_manager.py | 33 +++++-----
10 files changed, 98 insertions(+), 102 deletions(-)
diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml
index 9c8cba5dc..b3056fa6a 100644
--- a/.github/workflows/ruff.yml
+++ b/.github/workflows/ruff.yml
@@ -12,7 +12,10 @@ jobs:
with:
fetch-depth: 0
ref: ${{ github.head_ref || github.ref_name }}
- - uses: astral-sh/ruff-action@v3
+ - name: Install the latest version of ruff
+ uses: astral-sh/ruff-action@v3
+ with:
+ version: "latest"
- run: ruff check --fix
- run: ruff format
- name: Commit changes
diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py
index cc225be8f..481e4beb7 100644
--- a/src/heart_flow/observation.py
+++ b/src/heart_flow/observation.py
@@ -57,7 +57,7 @@ class ChattingObservation(Observation):
msg_str = ""
for msg in mid_memory_by_id["messages"]:
msg_str += f"{msg['detailed_plain_text']}"
- time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
+ # time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
# mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
mid_memory_str += f"{msg_str}\n"
except Exception as e:
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index c7ff4524f..cb53c2240 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -210,8 +210,10 @@ class SubHeartflow:
relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format(
relation_prompt, sender_info.user_nickname
)
-
- sender_name_sign = f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>"
+
+ sender_name_sign = (
+ f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>"
+ )
# prompt = ""
# # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
@@ -230,7 +232,7 @@ class SubHeartflow:
# prompt += f"记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{self.bot_name},{self.bot_name}指的就是你。"
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
-
+
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
extra_info_prompt,
# prompt_schedule,
@@ -244,7 +246,7 @@ class SubHeartflow:
message_txt,
self.bot_name,
)
-
+
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
prompt = parse_text_timestamps(prompt, mode="lite")
@@ -294,7 +296,7 @@ class SubHeartflow:
message_new_info = chat_talking_prompt
reply_info = reply_content
-
+
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format(
@@ -307,7 +309,7 @@ class SubHeartflow:
reply_info,
mood_info,
)
-
+
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
prompt = parse_text_timestamps(prompt, mode="lite")
diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py
index 9f55b5741..6279e8f25 100644
--- a/src/plugins/chat/message.py
+++ b/src/plugins/chat/message.py
@@ -150,9 +150,7 @@ class MessageRecv(Message):
# if user_info.user_cardname != None
# else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
# )
- name = (
- f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
- )
+ name = f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
return f"[{time}] {name}: {self.processed_plain_text}\n"
@@ -251,9 +249,7 @@ class MessageProcessBase(Message):
# if user_info.user_cardname != None
# else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
# )
- name = (
- f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
- )
+ name = f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
return f"[{time}] {name}: {self.processed_plain_text}\n"
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 0172289ff..5f2d0105c 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -643,11 +643,11 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
"""将时间戳转换为人类可读的时间格式
-
+
Args:
timestamp: 时间戳
mode: 转换模式,"normal"为标准格式,"relative"为相对时间格式
-
+
Returns:
str: 格式化后的时间字符串
"""
@@ -656,7 +656,7 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
elif mode == "relative":
now = time.time()
diff = now - timestamp
-
+
if diff < 20:
return "刚刚:"
elif diff < 60:
@@ -671,33 +671,34 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
return f"{int(diff / 86400)}天前:\n"
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":"
-
+
+
def parse_text_timestamps(text: str, mode: str = "normal") -> str:
"""解析文本中的时间戳并转换为可读时间格式
-
+
Args:
text: 包含时间戳的文本,时间戳应以[]包裹
mode: 转换模式,传递给translate_timestamp_to_human_readable,"normal"或"relative"
-
+
Returns:
str: 替换后的文本
-
+
转换规则:
- normal模式: 将文本中所有时间戳转换为可读格式
- - lite模式:
+ - lite模式:
- 第一个和最后一个时间戳必须转换
- 以5秒为间隔划分时间段,每段最多转换一个时间戳
- 不转换的时间戳替换为空字符串
"""
# 匹配[数字]或[数字.数字]格式的时间戳
- pattern = r'\[(\d+(?:\.\d+)?)\]'
-
+ pattern = r"\[(\d+(?:\.\d+)?)\]"
+
# 找出所有匹配的时间戳
matches = list(re.finditer(pattern, text))
-
+
if not matches:
return text
-
+
# normal模式: 直接转换所有时间戳
if mode == "normal":
result_text = text
@@ -711,63 +712,63 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
else:
# lite模式: 按5秒间隔划分并选择性转换
result_text = text
-
+
# 提取所有时间戳及其位置
timestamps = [(float(m.group(1)), m) for m in matches]
timestamps.sort(key=lambda x: x[0]) # 按时间戳升序排序
-
+
if not timestamps:
return text
-
+
# 获取第一个和最后一个时间戳
first_timestamp, first_match = timestamps[0]
last_timestamp, last_match = timestamps[-1]
-
+
# 将时间范围划分成5秒间隔的时间段
time_segments = {}
-
+
# 对所有时间戳按15秒间隔分组
for ts, match in timestamps:
segment_key = int(ts // 15) # 将时间戳除以15取整,作为时间段的键
if segment_key not in time_segments:
time_segments[segment_key] = []
time_segments[segment_key].append((ts, match))
-
+
# 记录需要转换的时间戳
to_convert = []
-
+
# 从每个时间段中选择一个时间戳进行转换
- for segment, segment_timestamps in time_segments.items():
+ for _, segment_timestamps in time_segments.items():
# 选择这个时间段中的第一个时间戳
to_convert.append(segment_timestamps[0])
-
+
# 确保第一个和最后一个时间戳在转换列表中
first_in_list = False
last_in_list = False
-
- for ts, match in to_convert:
+
+ for ts, _ in to_convert:
if ts == first_timestamp:
first_in_list = True
if ts == last_timestamp:
last_in_list = True
-
+
if not first_in_list:
to_convert.append((first_timestamp, first_match))
if not last_in_list:
to_convert.append((last_timestamp, last_match))
-
+
# 创建需要转换的时间戳集合,用于快速查找
to_convert_set = {match.group(0) for _, match in to_convert}
-
+
# 首先替换所有不需要转换的时间戳为空字符串
- for ts, match in timestamps:
+ for _, match in timestamps:
if match.group(0) not in to_convert_set:
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, "", result_text, count=1)
-
+
# 按照时间戳原始顺序排序,避免替换时位置错误
to_convert.sort(key=lambda x: x[1].start())
-
+
# 执行替换
# 由于替换会改变文本长度,从后向前替换
to_convert.reverse()
@@ -775,5 +776,5 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
readable_time = translate_timestamp_to_human_readable(ts, "relative")
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
-
+
return result_text
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
index 1e8e844eb..6a896167a 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
@@ -235,7 +235,6 @@ class ThinkFlowChat:
do_reply = False
if random() < reply_probability:
try:
-
do_reply = True
# 回复前处理
@@ -397,12 +396,11 @@ class ThinkFlowChat:
# 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
-
+
# 处理认识关系
try:
is_known = await relationship_manager.is_known_some_one(
- message.message_info.platform,
- message.message_info.user_info.user_id
+ message.message_info.platform, message.message_info.user_info.user_id
)
if not is_known:
logger.info(f"首次认识用户: {message.message_info.user_info.user_nickname}")
@@ -410,22 +408,23 @@ class ThinkFlowChat:
message.message_info.platform,
message.message_info.user_info.user_id,
message.message_info.user_info.user_nickname,
- message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
- ""
+ message.message_info.user_info.user_cardname
+ or message.message_info.user_info.user_nickname,
+ "",
)
else:
logger.debug(f"已认识用户: {message.message_info.user_info.user_nickname}")
if not await relationship_manager.is_qved_name(
- message.message_info.platform,
- message.message_info.user_info.user_id
+ message.message_info.platform, message.message_info.user_info.user_id
):
logger.info(f"更新已认识但未取名的用户: {message.message_info.user_info.user_nickname}")
await relationship_manager.first_knowing_some_one(
message.message_info.platform,
message.message_info.user_info.user_id,
message.message_info.user_info.user_nickname,
- message.message_info.user_info.user_cardname or message.message_info.user_info.user_nickname,
- ""
+ message.message_info.user_info.user_cardname
+ or message.message_info.user_info.user_nickname,
+ "",
)
except Exception as e:
logger.error(f"处理认识关系失败: {e}")
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
index 6f6c8bf26..029ed160c 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
@@ -109,7 +109,7 @@ class ResponseGenerator:
# sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
# else:
# sender_name = f"用户({message.chat_stream.user_info.user_id})"
-
+
sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
# 构建prompt
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
index 29863ba72..eeee9ccb5 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_prompt_builder.py
@@ -10,6 +10,7 @@ from src.heart_flow.heartflow import heartflow
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.chat.utils import parse_text_timestamps
+
logger = get_module_logger("prompt")
@@ -161,10 +162,10 @@ class PromptBuilder:
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
-
+
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
prompt = parse_text_timestamps(prompt, mode="lite")
-
+
return prompt
async def _build_prompt_simple(
diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py
index 068c37d07..7a8a0d1d5 100644
--- a/src/plugins/person_info/person_info.py
+++ b/src/plugins/person_info/person_info.py
@@ -64,12 +64,9 @@ class PersonInfoManager:
if "person_info" not in db.list_collection_names():
db.create_collection("person_info")
db.person_info.create_index("person_id", unique=True)
-
+
# 初始化时读取所有person_name
- cursor = db.person_info.find(
- {"person_name": {"$exists": True}},
- {"person_id": 1, "person_name": 1, "_id": 0}
- )
+ cursor = db.person_info.find({"person_name": {"$exists": True}}, {"person_id": 1, "person_name": 1, "_id": 0})
for doc in cursor:
if doc.get("person_name"):
self.person_name_list[doc["person_id"]] = doc["person_name"]
@@ -77,10 +74,10 @@ class PersonInfoManager:
def get_person_id(self, platform: str, user_id: int):
"""获取唯一id"""
- #如果platform中存在-,就截取-后面的部分
+ # 如果platform中存在-,就截取-后面的部分
if "-" in platform:
platform = platform.split("-")[1]
-
+
components = [platform, str(user_id)]
key = "_".join(components)
return hashlib.md5(key.encode()).hexdigest()
@@ -93,8 +90,7 @@ class PersonInfoManager:
return True
else:
return False
-
-
+
async def create_person_info(self, person_id: str, data: dict = None):
"""创建一个项"""
if not person_id:
@@ -125,7 +121,7 @@ class PersonInfoManager:
Data[field_name] = value
logger.debug(f"更新时{person_id}不存在,已新建")
await self.create_person_info(person_id, Data)
-
+
async def has_one_field(self, person_id: str, field_name: str):
"""判断是否存在某一个字段"""
document = db.person_info.find_one({"person_id": person_id}, {field_name: 1})
@@ -133,36 +129,35 @@ class PersonInfoManager:
return True
else:
return False
-
+
def _extract_json_from_text(self, text: str) -> dict:
"""从文本中提取JSON数据的高容错方法"""
try:
-
# 尝试直接解析
return json.loads(text)
except json.JSONDecodeError:
try:
# 尝试找到JSON格式的部分
- json_pattern = r'\{[^{}]*\}'
+ json_pattern = r"\{[^{}]*\}"
matches = re.findall(json_pattern, text)
if matches:
return json.loads(matches[0])
-
+
# 如果上面都失败了,尝试提取键值对
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
-
+
nickname_match = re.search(nickname_pattern, text)
reason_match = re.search(reason_pattern, text)
-
+
if nickname_match:
return {
"nickname": nickname_match.group(1),
- "reason": reason_match.group(1) if reason_match else "未提供理由"
+ "reason": reason_match.group(1) if reason_match else "未提供理由",
}
except Exception as e:
logger.error(f"JSON提取失败: {str(e)}")
-
+
# 如果所有方法都失败了,返回空结果
return {"nickname": "", "reason": ""}
@@ -171,10 +166,10 @@ class PersonInfoManager:
if not person_id:
logger.debug("取名失败:person_id不能为空")
return
-
+
old_name = await self.get_value(person_id, "person_name")
old_reason = await self.get_value(person_id, "name_reason")
-
+
max_retries = 5 # 最大重试次数
current_try = 0
existing_names = ""
@@ -182,7 +177,7 @@ class PersonInfoManager:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
bot_name = individuality.personality.bot_nickname
-
+
qv_name_prompt = f"你是{bot_name},你{prompt_personality}"
qv_name_prompt += f"现在你想给一个用户取一个昵称,用户是的qq昵称是{user_nickname},"
qv_name_prompt += f"用户的qq群昵称名是{user_cardname},"
@@ -195,20 +190,20 @@ class PersonInfoManager:
if existing_names:
qv_name_prompt += f"\n请注意,以下名称已被使用,不要使用以下昵称:{existing_names}。\n"
qv_name_prompt += "请用json给出你的想法,并给出理由,示例如下:"
- qv_name_prompt += '''{
+ qv_name_prompt += """{
"nickname": "昵称",
"reason": "理由"
- }'''
+ }"""
logger.debug(f"取名提示词:{qv_name_prompt}")
response = await self.qv_name_llm.generate_response(qv_name_prompt)
logger.debug(f"取名回复:{response}")
result = self._extract_json_from_text(response[0])
-
+
if not result["nickname"]:
logger.error("生成的昵称为空,重试中...")
current_try += 1
continue
-
+
# 检查生成的昵称是否已存在
if result["nickname"] not in self.person_name_list.values():
# 更新数据库和内存中的列表
@@ -216,16 +211,16 @@ class PersonInfoManager:
# await self.update_one_field(person_id, "nickname", user_nickname)
# await self.update_one_field(person_id, "avatar", user_avatar)
await self.update_one_field(person_id, "name_reason", result["reason"])
-
+
self.person_name_list[person_id] = result["nickname"]
logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}")
return result
else:
existing_names += f"{result['nickname']}、"
-
+
logger.debug(f"生成的昵称 {result['nickname']} 已存在,重试中...")
current_try += 1
-
+
logger.error(f"在{max_retries}次尝试后仍未能生成唯一昵称")
return None
diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py
index 673e0b07f..696a6391b 100644
--- a/src/plugins/person_info/relationship_manager.py
+++ b/src/plugins/person_info/relationship_manager.py
@@ -76,13 +76,13 @@ class RelationshipManager:
return mood_value * coefficient
else:
return mood_value / coefficient
-
- async def is_known_some_one(self, platform , user_id):
+
+ async def is_known_some_one(self, platform, user_id):
"""判断是否认识某人"""
is_known = person_info_manager.is_person_known(platform, user_id)
return is_known
- async def is_qved_name(self, platform , user_id):
+ async def is_qved_name(self, platform, user_id):
"""判断是否认识某人"""
person_id = person_info_manager.get_person_id(platform, user_id)
is_qved = await person_info_manager.has_one_field(person_id, "person_name")
@@ -93,42 +93,41 @@ class RelationshipManager:
return True
else:
return False
-
- async def first_knowing_some_one(self, platform , user_id, user_nickname, user_cardname, user_avatar):
+
+ async def first_knowing_some_one(self, platform, user_id, user_nickname, user_cardname, user_avatar):
"""判断是否认识某人"""
- person_id = person_info_manager.get_person_id(platform,user_id)
+ person_id = person_info_manager.get_person_id(platform, user_id)
await person_info_manager.update_one_field(person_id, "nickname", user_nickname)
# await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname)
# await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar)
await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar)
-
- async def convert_all_person_sign_to_person_name(self,input_text:str):
+
+ async def convert_all_person_sign_to_person_name(self, input_text: str):
"""将所有人的格式转换为person_name"""
try:
# 使用正则表达式匹配格式
all_person = person_info_manager.person_name_list
-
- pattern = r'<([^:]+):(\d+):([^:]+):([^>]+)>'
+
+ pattern = r"<([^:]+):(\d+):([^:]+):([^>]+)>"
matches = re.findall(pattern, input_text)
-
+
# 遍历匹配结果,将替换为person_name
result_text = input_text
for platform, user_id, nickname, cardname in matches:
person_id = person_info_manager.get_person_id(platform, user_id)
# 默认使用昵称作为人名
person_name = nickname.strip() if nickname.strip() else cardname.strip()
-
+
if person_id in all_person:
if all_person[person_id] != None:
person_name = all_person[person_id]
-
+
print(f"将<{platform}:{user_id}:{nickname}:{cardname}>替换为{person_name}")
-
-
+
result_text = result_text.replace(f"<{platform}:{user_id}:{nickname}:{cardname}>", person_name)
-
+
return result_text
- except Exception as e:
+ except Exception:
logger.error(traceback.format_exc())
return input_text
From 328d759ace42e31404d1bd83e12bb9d06ed96702 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Wed, 16 Apr 2025 16:26:15 +0900
Subject: [PATCH 022/406] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E6=AD=A3?=
=?UTF-8?q?=E5=88=99=E8=A1=A8=E8=BE=BE=E5=BC=8F=E4=B8=AD=E7=9A=84=E6=8B=AC?=
=?UTF-8?q?=E5=8F=B7=E5=8C=B9=E9=85=8D=EF=BC=8C=E4=BC=98=E5=8C=96=E7=BB=9F?=
=?UTF-8?q?=E8=AE=A1=E8=BE=93=E5=87=BA=E6=A0=BC=E5=BC=8F?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.gitignore | 1 -
src/plugins/chat/utils.py | 19 ++++++++++---------
src/plugins/utils/prompt_builder.py | 4 ++--
src/plugins/utils/statistic.py | 16 +++-------------
4 files changed, 15 insertions(+), 25 deletions(-)
diff --git a/.gitignore b/.gitignore
index 3e9b98685..37813f433 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,7 +29,6 @@ config/bot_config_dev.toml
config/bot_config.toml
config/bot_config.toml.bak
src/plugins/remote/client_uuid.json
-run_none.bat
(测试版)麦麦生成人格.bat
(临时版)麦麦开始学习.bat
src/plugins/utils/statistic.py
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 5f2d0105c..b223e87da 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -46,12 +46,13 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
is_at = False
is_mentioned = False
- if "is_mentioned" in message.message_info.additional_config.keys():
+ if message.message_info.additional_config.get("is_mentioned") is not None:
try:
reply_probability = float(message.message_info.additional_config.get("is_mentioned"))
is_mentioned = True
return is_mentioned, reply_probability
except Exception as e:
+ logger.warning(e)
logger.warning(
f"消息中包含不合理的设置 is_mentioned: {message.message_info.additional_config.get('is_mentioned')}"
)
@@ -71,7 +72,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
is_mentioned = True
# 判断内容中是否被提及
- message_content = re.sub(r"\@[\s\S]*?((\d+))", "", message.processed_plain_text)
+ message_content = re.sub(r"@[\s\S]*?((\d+))", "", message.processed_plain_text)
message_content = re.sub(r"回复[\s\S]*?\((\d+)\)的消息,说: ", "", message_content)
for keyword in keywords:
if keyword in message_content:
@@ -335,7 +336,7 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]:
# 提取被 () 或 [] 包裹的内容
- pattern = re.compile(r"[\(\[].*?[\)\]]")
+ pattern = re.compile(r"[(\[].*?[\)\]")
_extracted_contents = pattern.findall(text)
# 去除 () 和 [] 及其包裹的内容
cleaned_text = pattern.sub("", text)
@@ -496,16 +497,16 @@ def protect_kaomoji(sentence):
"""
kaomoji_pattern = re.compile(
r"("
- r"[\(\[(【]" # 左括号
+ r"[(\[(【]" # 左括号
r"[^()\[\]()【】]*?" # 非括号字符(惰性匹配)
- r"[^\u4e00-\u9fa5a-zA-Z0-9\s]" # 非中文、非英文、非数字、非空格字符(必须包含至少一个)
+ r"[^一-龥a-zA-Z0-9\s]" # 非中文、非英文、非数字、非空格字符(必须包含至少一个)
r"[^()\[\]()【】]*?" # 非括号字符(惰性匹配)
- r"[\)\])】]" # 右括号
+ r"[\)\])】" # 右括号
+ r"]"
r")"
r"|"
- r"("
- r"[▼▽・ᴥω・﹏^><≧≦ ̄`´∀ヮДд︿﹀へ。゚╥╯╰︶︹•⁄]{2,15}"
- r")"
+ r"([▼▽・ᴥω・﹏^><≧≦ ̄`´∀ヮДд︿﹀へ。゚╥╯╰︶︹•⁄]{2,15"
+ r"}"
)
kaomoji_matches = kaomoji_pattern.findall(sentence)
diff --git a/src/plugins/utils/prompt_builder.py b/src/plugins/utils/prompt_builder.py
index f3de24e4f..578d9677f 100644
--- a/src/plugins/utils/prompt_builder.py
+++ b/src/plugins/utils/prompt_builder.py
@@ -119,7 +119,7 @@ class Prompt(str):
# 解析模板
template_args = []
- result = re.findall(r"\{(.*?)\}", processed_fstr)
+ result = re.findall(r"\{(.*?)}", processed_fstr)
for expr in result:
if expr and expr not in template_args:
template_args.append(expr)
@@ -164,7 +164,7 @@ class Prompt(str):
processed_template = cls._process_escaped_braces(template)
template_args = []
- result = re.findall(r"\{(.*?)\}", processed_template)
+ result = re.findall(r"\{(.*?)}", processed_template)
for expr in result:
if expr and expr not in template_args:
template_args.append(expr)
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index 10133f2b7..af7138744 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -175,13 +175,8 @@ class LLMStatistics:
def _format_stats_section(self, stats: Dict[str, Any], title: str) -> str:
"""格式化统计部分的输出"""
- output = []
+ output = ["\n" + "-" * 84, f"{title}", "-" * 84, f"总请求数: {stats['total_requests']}"]
- output.append("\n" + "-" * 84)
- output.append(f"{title}")
- output.append("-" * 84)
-
- output.append(f"总请求数: {stats['total_requests']}")
if stats["total_requests"] > 0:
output.append(f"总Token数: {stats['total_tokens']}")
output.append(f"总花费: {stats['total_cost']:.4f}¥")
@@ -238,11 +233,7 @@ class LLMStatistics:
def _format_stats_section_lite(self, stats: Dict[str, Any], title: str) -> str:
"""格式化统计部分的输出"""
- output = []
-
- output.append("\n" + "-" * 84)
- output.append(f"{title}")
- output.append("-" * 84)
+ output = ["\n" + "-" * 84, f"{title}", "-" * 84]
# output.append(f"总请求数: {stats['total_requests']}")
if stats["total_requests"] > 0:
@@ -303,8 +294,7 @@ class LLMStatistics:
"""将统计结果保存到文件"""
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
- output = []
- output.append(f"LLM请求统计报告 (生成时间: {current_time})")
+ output = [f"LLM请求统计报告 (生成时间: {current_time})"]
# 添加各个时间段的统计
sections = [
From a0b1b1f8d8c2e43f30f59e192a2c43127b837b3d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Wed, 16 Apr 2025 16:34:33 +0900
Subject: [PATCH 023/406] =?UTF-8?q?fix:=20=E4=BF=AE=E6=94=B9is=5Fmentioned?=
=?UTF-8?q?=5Fbot=5Fin=5Fmessage=E5=87=BD=E6=95=B0=EF=BC=8C=E8=BF=94?=
=?UTF-8?q?=E5=9B=9E=E7=B1=BB=E5=9E=8B=E6=94=B9=E4=B8=BA=E5=85=83=E7=BB=84?=
=?UTF-8?q?=E5=B9=B6=E4=BF=AE=E6=AD=A3=E5=9B=9E=E5=A4=8D=E6=A6=82=E7=8E=87?=
=?UTF-8?q?=E4=B8=BA=E6=B5=AE=E7=82=B9=E6=95=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat/utils.py | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index b223e87da..81c19af80 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -38,15 +38,18 @@ def db_message_to_str(message_dict: Dict) -> str:
return result
-def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
+def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
"""检查消息是否提到了机器人"""
keywords = [global_config.BOT_NICKNAME]
nicknames = global_config.BOT_ALIAS_NAMES
- reply_probability = 0
+ reply_probability = 0.0
is_at = False
is_mentioned = False
- if message.message_info.additional_config.get("is_mentioned") is not None:
+ if (
+ message.message_info.additional_config is not None
+ and message.message_info.additional_config.get("is_mentioned") is not None
+ ):
try:
reply_probability = float(message.message_info.additional_config.get("is_mentioned"))
is_mentioned = True
@@ -63,7 +66,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
is_mentioned = True
if is_at and global_config.at_bot_inevitable_reply:
- reply_probability = 1
+ reply_probability = 1.0
logger.info("被@,回复概率设置为100%")
else:
if not is_mentioned:
@@ -81,7 +84,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
if nickname in message_content:
is_mentioned = True
if is_mentioned and global_config.mentioned_bot_inevitable_reply:
- reply_probability = 1
+ reply_probability = 1.0
logger.info("被提及,回复概率设置为100%")
return is_mentioned, reply_probability
From dc2cf843e50cc5c1aba438c8b3fafa6ff8f2b726 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E6=98=A5=E6=B2=B3=E6=99=B4?=
Date: Wed, 16 Apr 2025 17:37:28 +0900
Subject: [PATCH 024/406] =?UTF-8?q?=E6=94=B9=E5=90=84=E7=A7=8D=E5=B0=8F?=
=?UTF-8?q?=E9=97=AE=E9=A2=98?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/do_tool/tool_use.py | 9 ++++---
src/heart_flow/heartflow.py | 4 +--
src/heart_flow/observation.py | 4 +--
src/heart_flow/sub_heartflow.py | 4 +--
src/individuality/offline_llm.py | 2 +-
src/individuality/per_bf_gen.py | 10 +++----
src/main.py | 2 +-
src/plugins/PFC/action_planner.py | 4 +--
src/plugins/PFC/message_sender.py | 2 +-
src/plugins/PFC/observation_info.py | 11 +++++---
src/plugins/PFC/pfc.py | 6 ++---
src/plugins/PFC/pfc_KnowledgeFetcher.py | 4 +--
src/plugins/PFC/reply_checker.py | 4 +--
src/plugins/PFC/reply_generator.py | 4 +--
src/plugins/chat/__init__.py | 2 +-
src/plugins/chat/bot.py | 4 +--
src/plugins/chat/emoji_manager.py | 6 ++---
src/plugins/chat/message_buffer.py | 8 +++---
.../{message_sender.py => messagesender.py} | 6 ++---
src/plugins/chat/utils.py | 6 ++---
src/plugins/chat/utils_image.py | 4 +--
.../reasoning_chat/reasoning_chat.py | 2 +-
.../reasoning_chat/reasoning_generator.py | 10 +++----
.../think_flow_chat/think_flow_chat.py | 2 +-
.../think_flow_chat/think_flow_generator.py | 8 +++---
src/plugins/config/config.py | 3 +--
src/plugins/memory_system/Hippocampus.py | 26 +++++++++----------
.../memory_system/manually_alter_memory.py | 22 ++++++++--------
src/plugins/memory_system/offline_llm.py | 2 +-
src/plugins/message/api.py | 3 ++-
src/plugins/models/utils_model.py | 2 +-
src/plugins/person_info/person_info.py | 13 +++++-----
src/plugins/personality_s/renqingziji.py | 4 +--
src/plugins/remote/remote.py | 8 +++---
src/plugins/schedule/schedule_generator.py | 6 ++---
.../topic_identify/topic_identifier.py | 4 +--
36 files changed, 114 insertions(+), 107 deletions(-)
rename src/plugins/chat/{message_sender.py => messagesender.py} (99%)
diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py
index b14927be8..af7c1b314 100644
--- a/src/do_tool/tool_use.py
+++ b/src/do_tool/tool_use.py
@@ -1,4 +1,4 @@
-from src.plugins.models.utils_model import LLM_request
+from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
from src.plugins.chat.chat_stream import ChatStream
from src.common.database import db
@@ -18,7 +18,7 @@ logger = get_module_logger("tool_use", config=tool_use_config)
class ToolUser:
def __init__(self):
- self.llm_model_tool = LLM_request(
+ self.llm_model_tool = LLMRequest(
model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
)
@@ -107,7 +107,7 @@ class ToolUser:
return None
async def use_tool(
- self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
+ self, message_txt: str, sender_name: str, chat_stream: ChatStream, sub_heartflow: SubHeartflow = None
):
"""使用工具辅助思考,判断是否需要额外信息
@@ -115,13 +115,14 @@ class ToolUser:
message_txt: 用户消息文本
sender_name: 发送者名称
chat_stream: 聊天流对象
+ sub_heartflow: 子心流对象(可选)
Returns:
dict: 工具使用结果,包含结构化的信息
"""
try:
# 构建提示词
- prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, subheartflow)
+ prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, sub_heartflow)
# 定义可用工具
tools = self._define_tools()
diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py
index d6116d0d5..9f0d202ea 100644
--- a/src/heart_flow/heartflow.py
+++ b/src/heart_flow/heartflow.py
@@ -1,7 +1,7 @@
from .sub_heartflow import SubHeartflow
from .observation import ChattingObservation
from src.plugins.moods.moods import MoodManager
-from src.plugins.models.utils_model import LLM_request
+from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
from src.plugins.schedule.schedule_generator import bot_schedule
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
@@ -60,7 +60,7 @@ class Heartflow:
self.current_mind = "你什么也没想"
self.past_mind = []
self.current_state: CurrentState = CurrentState()
- self.llm_model = LLM_request(
+ self.llm_model = LLMRequest(
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
)
diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py
index 481e4beb7..c1e321c17 100644
--- a/src/heart_flow/observation.py
+++ b/src/heart_flow/observation.py
@@ -1,7 +1,7 @@
# 定义了来自外部世界的信息
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
from datetime import datetime
-from src.plugins.models.utils_model import LLM_request
+from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
from src.common.database import db
from src.common.logger import get_module_logger
@@ -40,7 +40,7 @@ class ChattingObservation(Observation):
self.updating_old = False
- self.llm_summary = LLM_request(
+ self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index cb53c2240..e1ccb193e 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -1,7 +1,7 @@
from .observation import Observation, ChattingObservation
import asyncio
from src.plugins.moods.moods import MoodManager
-from src.plugins.models.utils_model import LLM_request
+from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
import time
from src.plugins.chat.message import UserInfo
@@ -79,7 +79,7 @@ class SubHeartflow:
self.current_mind = ""
self.past_mind = []
self.current_state: CurrentState = CurrentState()
- self.llm_model = LLM_request(
+ self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
max_tokens=600,
diff --git a/src/individuality/offline_llm.py b/src/individuality/offline_llm.py
index 7a698fc1d..2b5b6dc25 100644
--- a/src/individuality/offline_llm.py
+++ b/src/individuality/offline_llm.py
@@ -10,7 +10,7 @@ from src.common.logger import get_module_logger
logger = get_module_logger("offline_llm")
-class LLM_request_off:
+class LLMRequestOff:
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
diff --git a/src/individuality/per_bf_gen.py b/src/individuality/per_bf_gen.py
index d898ea5e3..7e630bdd9 100644
--- a/src/individuality/per_bf_gen.py
+++ b/src/individuality/per_bf_gen.py
@@ -19,7 +19,7 @@ with open(config_path, "r", encoding="utf-8") as f:
# 现在可以导入src模块
from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa E402
from src.individuality.questionnaire import FACTOR_DESCRIPTIONS # noqa E402
-from src.individuality.offline_llm import LLM_request_off # noqa E402
+from src.individuality.offline_llm import LLMRequestOff # noqa E402
# 加载环境变量
env_path = os.path.join(root_path, ".env")
@@ -65,7 +65,7 @@ def adapt_scene(scene: str) -> str:
现在,请你给出改编后的场景描述
"""
- llm = LLM_request_off(model_name=config["model"]["llm_normal"]["name"])
+ llm = LLMRequestOff(model_name=config["model"]["llm_normal"]["name"])
adapted_scene, _ = llm.generate_response(prompt)
# 检查返回的场景是否为空或错误信息
@@ -79,7 +79,7 @@ def adapt_scene(scene: str) -> str:
return scene
-class PersonalityEvaluator_direct:
+class PersonalityEvaluatorDirect:
def __init__(self):
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
self.scenarios = []
@@ -110,7 +110,7 @@ class PersonalityEvaluator_direct:
{"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
)
- self.llm = LLM_request_off()
+ self.llm = LLMRequestOff()
def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]:
"""
@@ -269,7 +269,7 @@ class PersonalityEvaluator_direct:
def main():
- evaluator = PersonalityEvaluator_direct()
+ evaluator = PersonalityEvaluatorDirect()
result = evaluator.run_evaluation()
# 准备简化的结果数据
diff --git a/src/main.py b/src/main.py
index d8f667153..11e01f7b6 100644
--- a/src/main.py
+++ b/src/main.py
@@ -9,7 +9,7 @@ from .plugins.willing.willing_manager import willing_manager
from .plugins.chat.chat_stream import chat_manager
from .heart_flow.heartflow import heartflow
from .plugins.memory_system.Hippocampus import HippocampusManager
-from .plugins.chat.message_sender import message_manager
+from .plugins.chat.messagesender import message_manager
from .plugins.storage.storage import MessageStorage
from .plugins.config.config import global_config
from .plugins.chat.bot import chat_bot
diff --git a/src/plugins/PFC/action_planner.py b/src/plugins/PFC/action_planner.py
index cc904662d..0b1b633ab 100644
--- a/src/plugins/PFC/action_planner.py
+++ b/src/plugins/PFC/action_planner.py
@@ -1,6 +1,6 @@
from typing import Tuple
from src.common.logger import get_module_logger
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..config.config import global_config
from .chat_observer import ChatObserver
from .pfc_utils import get_items_from_json
@@ -23,7 +23,7 @@ class ActionPlanner:
"""行动规划器"""
def __init__(self, stream_id: str):
- self.llm = LLM_request(
+ self.llm = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=1000,
diff --git a/src/plugins/PFC/message_sender.py b/src/plugins/PFC/message_sender.py
index bc4499ed9..6a5eb9709 100644
--- a/src/plugins/PFC/message_sender.py
+++ b/src/plugins/PFC/message_sender.py
@@ -4,7 +4,7 @@ from ..chat.chat_stream import ChatStream
from ..chat.message import Message
from ..message.message_base import Seg
from src.plugins.chat.message import MessageSending, MessageSet
-from src.plugins.chat.message_sender import message_manager
+from src.plugins.chat.messagesender import message_manager
logger = get_module_logger("message_sender")
diff --git a/src/plugins/PFC/observation_info.py b/src/plugins/PFC/observation_info.py
index 08ff3c046..97d9a6146 100644
--- a/src/plugins/PFC/observation_info.py
+++ b/src/plugins/PFC/observation_info.py
@@ -120,6 +120,10 @@ class ObservationInfo:
# #spec
# meta_plan_trigger: bool = False
+ def __init__(self):
+ self.last_message_id = None
+ self.chat_observer = None
+
def __post_init__(self):
"""初始化后创建handler"""
self.chat_observer = None
@@ -129,7 +133,7 @@ class ObservationInfo:
"""绑定到指定的chat_observer
Args:
- stream_id: 聊天流ID
+ chat_observer: 要绑定的ChatObserver实例
"""
self.chat_observer = chat_observer
self.chat_observer.notification_manager.register_handler(
@@ -171,7 +175,8 @@ class ObservationInfo:
self.last_bot_speak_time = message["time"]
else:
self.last_user_speak_time = message["time"]
- self.active_users.add(user_info.user_id)
+ if user_info.user_id is not None:
+ self.active_users.add(str(user_info.user_id))
self.new_messages_count += 1
self.unprocessed_messages.append(message)
@@ -227,7 +232,7 @@ class ObservationInfo:
"""清空未处理消息列表"""
# 将未处理消息添加到历史记录中
for message in self.unprocessed_messages:
- self.chat_history.append(message)
+ self.chat_history.append(message) # TODO NEED FIX TYPE???
# 清空未处理消息列表
self.has_unread_messages = False
self.unprocessed_messages.clear()
diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py
index a53258888..a3b4bb065 100644
--- a/src/plugins/PFC/pfc.py
+++ b/src/plugins/PFC/pfc.py
@@ -8,7 +8,7 @@ from src.common.logger import get_module_logger
from ..chat.chat_stream import ChatStream
from ..message.message_base import UserInfo, Seg
from ..chat.message import Message
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..config.config import global_config
from src.plugins.chat.message import MessageSending
from ..message.api import global_api
@@ -30,7 +30,7 @@ class GoalAnalyzer:
"""对话目标分析器"""
def __init__(self, stream_id: str):
- self.llm = LLM_request(
+ self.llm = LLMRequest(
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
)
@@ -350,7 +350,7 @@ class DirectMessageSender:
# logger.info(f"发送消息到{end_point}")
# logger.info(message_json)
try:
- await global_api.send_message_REST(end_point, message_json)
+ await global_api.send_message_rest(end_point, message_json)
except Exception as e:
logger.error(f"REST方式发送失败,出现错误: {str(e)}")
logger.info("尝试使用ws发送")
diff --git a/src/plugins/PFC/pfc_KnowledgeFetcher.py b/src/plugins/PFC/pfc_KnowledgeFetcher.py
index 9c5c55076..5458f61e3 100644
--- a/src/plugins/PFC/pfc_KnowledgeFetcher.py
+++ b/src/plugins/PFC/pfc_KnowledgeFetcher.py
@@ -1,7 +1,7 @@
from typing import List, Tuple
from src.common.logger import get_module_logger
from src.plugins.memory_system.Hippocampus import HippocampusManager
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..config.config import global_config
from ..chat.message import Message
@@ -12,7 +12,7 @@ class KnowledgeFetcher:
"""知识调取器"""
def __init__(self):
- self.llm = LLM_request(
+ self.llm = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=1000,
diff --git a/src/plugins/PFC/reply_checker.py b/src/plugins/PFC/reply_checker.py
index 6889f7ca8..f9753d44e 100644
--- a/src/plugins/PFC/reply_checker.py
+++ b/src/plugins/PFC/reply_checker.py
@@ -2,7 +2,7 @@ import json
import datetime
from typing import Tuple
from src.common.logger import get_module_logger
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..config.config import global_config
from .chat_observer import ChatObserver
from ..message.message_base import UserInfo
@@ -14,7 +14,7 @@ class ReplyChecker:
"""回复检查器"""
def __init__(self, stream_id: str):
- self.llm = LLM_request(
+ self.llm = LLMRequest(
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="reply_check"
)
self.name = global_config.BOT_NICKNAME
diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py
index 85a067d23..8a5d77fe2 100644
--- a/src/plugins/PFC/reply_generator.py
+++ b/src/plugins/PFC/reply_generator.py
@@ -1,6 +1,6 @@
from typing import Tuple
from src.common.logger import get_module_logger
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..config.config import global_config
from .chat_observer import ChatObserver
from .reply_checker import ReplyChecker
@@ -15,7 +15,7 @@ class ReplyGenerator:
"""回复生成器"""
def __init__(self, stream_id: str):
- self.llm = LLM_request(
+ self.llm = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=300,
diff --git a/src/plugins/chat/__init__.py b/src/plugins/chat/__init__.py
index 8d9aa1f8e..a68caaf1c 100644
--- a/src/plugins/chat/__init__.py
+++ b/src/plugins/chat/__init__.py
@@ -1,7 +1,7 @@
from .emoji_manager import emoji_manager
from ..person_info.relationship_manager import relationship_manager
from .chat_stream import chat_manager
-from .message_sender import message_manager
+from .messagesender import message_manager
from ..storage.storage import MessageStorage
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 3dc732274..bb83ade24 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -42,7 +42,7 @@ class ChatBot:
self._started = True
- async def _create_PFC_chat(self, message: MessageRecv):
+ async def _create_pfc_chat(self, message: MessageRecv):
try:
chat_id = str(message.chat_stream.stream_id)
@@ -112,7 +112,7 @@ class ChatBot:
)
message.update_chat_stream(chat)
await self.only_process_chat.process_message(message)
- await self._create_PFC_chat(message)
+ await self._create_pfc_chat(message)
else:
if groupinfo.group_id in global_config.talk_allowed_groups:
# logger.debug(f"开始群聊模式{str(message_data)[:50]}...")
diff --git a/src/plugins/chat/emoji_manager.py b/src/plugins/chat/emoji_manager.py
index de3a5a54d..7a860b855 100644
--- a/src/plugins/chat/emoji_manager.py
+++ b/src/plugins/chat/emoji_manager.py
@@ -13,7 +13,7 @@ from ...common.database import db
from ..config.config import global_config
from ..chat.utils import get_embedding
from ..chat.utils_image import ImageManager, image_path_to_base64
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from src.common.logger import get_module_logger
logger = get_module_logger("emoji")
@@ -34,8 +34,8 @@ class EmojiManager:
def __init__(self):
self._scan_task = None
- self.vlm = LLM_request(model=global_config.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
- self.llm_emotion_judge = LLM_request(
+ self.vlm = LLMRequest(model=global_config.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
+ self.llm_emotion_judge = LLMRequest(
model=global_config.llm_emotion_judge, max_tokens=600, temperature=0.8, request_type="emoji"
) # 更高的温度,更少的token(后续可以根据情绪来调整温度)
diff --git a/src/plugins/chat/message_buffer.py b/src/plugins/chat/message_buffer.py
index 21e490433..e175b442f 100644
--- a/src/plugins/chat/message_buffer.py
+++ b/src/plugins/chat/message_buffer.py
@@ -59,20 +59,20 @@ class MessageBuffer:
logger.debug(f"被新消息覆盖信息id: {cache_msg.message.message_info.message_id}")
# 查找最近的处理成功消息(T)
- recent_F_count = 0
+ recent_f_count = 0
for msg_id in reversed(self.buffer_pool[person_id_]):
msg = self.buffer_pool[person_id_][msg_id]
if msg.result == "T":
break
elif msg.result == "F":
- recent_F_count += 1
+ recent_f_count += 1
# 判断条件:最近T之后有超过3-5条F
- if recent_F_count >= random.randint(3, 5):
+ if recent_f_count >= random.randint(3, 5):
new_msg = CacheMessages(message=message, result="T")
new_msg.cache_determination.set()
self.buffer_pool[person_id_][message.message_info.message_id] = new_msg
- logger.debug(f"快速处理消息(已堆积{recent_F_count}条F): {message.message_info.message_id}")
+ logger.debug(f"快速处理消息(已堆积{recent_f_count}条F): {message.message_info.message_id}")
return
# 添加新消息
diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/messagesender.py
similarity index 99%
rename from src/plugins/chat/message_sender.py
rename to src/plugins/chat/messagesender.py
index c223bbe4d..7dfee271e 100644
--- a/src/plugins/chat/message_sender.py
+++ b/src/plugins/chat/messagesender.py
@@ -23,7 +23,7 @@ sender_config = LogConfig(
logger = get_module_logger("msg_sender", config=sender_config)
-class Message_Sender:
+class MessageSender:
"""发送器"""
def __init__(self):
@@ -83,7 +83,7 @@ class Message_Sender:
# logger.info(f"发送消息到{end_point}")
# logger.info(message_json)
try:
- await global_api.send_message_REST(end_point, message_json)
+ await global_api.send_message_rest(end_point, message_json)
except Exception as e:
logger.error(f"REST方式发送失败,出现错误: {str(e)}")
logger.info("尝试使用ws发送")
@@ -286,4 +286,4 @@ class MessageManager:
# 创建全局消息管理器实例
message_manager = MessageManager()
# 创建全局发送器实例
-message_sender = Message_Sender()
+message_sender = MessageSender()
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 81c19af80..492f46f31 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -8,7 +8,7 @@ import jieba
import numpy as np
from src.common.logger import get_module_logger
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..utils.typo_generator import ChineseTypoGenerator
from ..config.config import global_config
from .message import MessageRecv, Message
@@ -91,7 +91,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
async def get_embedding(text, request_type="embedding"):
"""获取文本的embedding向量"""
- llm = LLM_request(model=global_config.embedding, request_type=request_type)
+ llm = LLMRequest(model=global_config.embedding, request_type=request_type)
# return llm.get_embedding_sync(text)
try:
embedding = await llm.get_embedding(text)
@@ -105,7 +105,7 @@ async def get_recent_group_messages(chat_id: str, limit: int = 12) -> list:
"""从数据库获取群组最近的消息记录
Args:
- group_id: 群组ID
+ chat_id: 群组ID
limit: 获取消息数量,默认12条
Returns:
diff --git a/src/plugins/chat/utils_image.py b/src/plugins/chat/utils_image.py
index ed78dc17e..4fe0c5fcc 100644
--- a/src/plugins/chat/utils_image.py
+++ b/src/plugins/chat/utils_image.py
@@ -9,7 +9,7 @@ import io
from ...common.database import db
from ..config.config import global_config
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from src.common.logger import get_module_logger
@@ -32,7 +32,7 @@ class ImageManager:
self._ensure_description_collection()
self._ensure_image_dir()
self._initialized = True
- self._llm = LLM_request(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image")
+ self._llm = LLMRequest(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image")
def _ensure_image_dir(self):
"""确保图像存储目录存在"""
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index 2ce218a6f..82e041cb1 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -8,7 +8,7 @@ from ...config.config import global_config
from ...chat.emoji_manager import emoji_manager
from .reasoning_generator import ResponseGenerator
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
-from ...chat.message_sender import message_manager
+from ...chat.messagesender import message_manager
from ...storage.storage import MessageStorage
from ...chat.utils import is_mentioned_bot_in_message
from ...chat.utils_image import image_path_to_base64
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
index 46602b5d7..6c8221d12 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
@@ -1,7 +1,7 @@
from typing import List, Optional, Tuple, Union
import random
-from ...models.utils_model import LLM_request
+from ...models.utils_model import LLMRequest
from ...config.config import global_config
from ...chat.message import MessageThinking
from .reasoning_prompt_builder import prompt_builder
@@ -22,20 +22,20 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
- self.model_reasoning = LLM_request(
+ self.model_reasoning = LLMRequest(
model=global_config.llm_reasoning,
temperature=0.7,
max_tokens=3000,
request_type="response_reasoning",
)
- self.model_normal = LLM_request(
+ self.model_normal = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_reasoning",
)
- self.model_sum = LLM_request(
+ self.model_sum = LLMRequest(
model=global_config.llm_summary_by_topic, temperature=0.7, max_tokens=3000, request_type="relation"
)
self.current_model_type = "r1" # 默认使用 R1
@@ -68,7 +68,7 @@ class ResponseGenerator:
logger.info(f"{self.current_model_type}思考,失败")
return None
- async def _generate_response_with_model(self, message: MessageThinking, model: LLM_request, thinking_id: str):
+ async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):
sender_name = ""
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
index 6a896167a..611eb5966 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_chat.py
@@ -8,7 +8,7 @@ from ...config.config import global_config
from ...chat.emoji_manager import emoji_manager
from .think_flow_generator import ResponseGenerator
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
-from ...chat.message_sender import message_manager
+from ...chat.messagesender import message_manager
from ...storage.storage import MessageStorage
from ...chat.utils import is_mentioned_bot_in_message, get_recent_group_detailed_plain_text
from ...chat.utils_image import image_path_to_base64
diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
index 029ed160c..129feaab2 100644
--- a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
+++ b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py
@@ -2,7 +2,7 @@ from typing import List, Optional
import random
-from ...models.utils_model import LLM_request
+from ...models.utils_model import LLMRequest
from ...config.config import global_config
from ...chat.message import MessageRecv
from .think_flow_prompt_builder import prompt_builder
@@ -25,14 +25,14 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
- self.model_normal = LLM_request(
+ self.model_normal = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_heartflow",
)
- self.model_sum = LLM_request(
+ self.model_sum = LLMRequest(
model=global_config.llm_summary_by_topic, temperature=0.6, max_tokens=2000, request_type="relation"
)
self.current_model_type = "r1" # 默认使用 R1
@@ -94,7 +94,7 @@ class ResponseGenerator:
return None
async def _generate_response_with_model(
- self, message: MessageRecv, model: LLM_request, thinking_id: str, mode: str = "normal"
+ self, message: MessageRecv, model: LLMRequest, thinking_id: str, mode: str = "normal"
) -> str:
sender_name = ""
diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py
index 8238078c2..ebde77734 100644
--- a/src/plugins/config/config.py
+++ b/src/plugins/config/config.py
@@ -62,8 +62,7 @@ def update_config():
shutil.copy2(template_path, old_config_path)
logger.info(f"已创建新配置文件,请填写后重新运行: {old_config_path}")
# 如果是新创建的配置文件,直接返回
- quit()
- return
+ return quit()
# 读取旧配置文件和模板文件
with open(old_config_path, "r", encoding="utf-8") as f:
diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py
index c2c090d58..2378011e2 100644
--- a/src/plugins/memory_system/Hippocampus.py
+++ b/src/plugins/memory_system/Hippocampus.py
@@ -9,7 +9,7 @@ import networkx as nx
import numpy as np
from collections import Counter
from ...common.database import db
-from ...plugins.models.utils_model import LLM_request
+from ...plugins.models.utils_model import LLMRequest
from src.common.logger import get_module_logger, LogConfig, MEMORY_STYLE_CONFIG
from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
from .memory_config import MemoryConfig
@@ -91,7 +91,7 @@ memory_config = LogConfig(
logger = get_module_logger("memory_system", config=memory_config)
-class Memory_graph:
+class MemoryGraph:
def __init__(self):
self.G = nx.Graph() # 使用 networkx 的图结构
@@ -229,7 +229,7 @@ class Memory_graph:
# 海马体
class Hippocampus:
def __init__(self):
- self.memory_graph = Memory_graph()
+ self.memory_graph = MemoryGraph()
self.llm_topic_judge = None
self.llm_summary_by_topic = None
self.entorhinal_cortex = None
@@ -243,8 +243,8 @@ class Hippocampus:
self.parahippocampal_gyrus = ParahippocampalGyrus(self)
# 从数据库加载记忆图
self.entorhinal_cortex.sync_memory_from_db()
- self.llm_topic_judge = LLM_request(self.config.llm_topic_judge, request_type="memory")
- self.llm_summary_by_topic = LLM_request(self.config.llm_summary_by_topic, request_type="memory")
+ self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory")
+ self.llm_summary_by_topic = LLMRequest(self.config.llm_summary_by_topic, request_type="memory")
def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表"""
@@ -346,7 +346,8 @@ class Hippocampus:
Args:
text (str): 输入文本
- num (int, optional): 需要返回的记忆数量。默认为5。
+ max_memory_num (int, optional): 记忆数量限制。默认为3。
+ max_memory_length (int, optional): 记忆长度限制。默认为2。
max_depth (int, optional): 记忆检索深度。默认为2。
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
@@ -540,7 +541,6 @@ class Hippocampus:
Args:
text (str): 输入文本
- num (int, optional): 需要返回的记忆数量。默认为5。
max_depth (int, optional): 记忆检索深度。默认为2。
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
@@ -937,7 +937,7 @@ class EntorhinalCortex:
# 海马体
class Hippocampus:
def __init__(self):
- self.memory_graph = Memory_graph()
+ self.memory_graph = MemoryGraph()
self.llm_topic_judge = None
self.llm_summary_by_topic = None
self.entorhinal_cortex = None
@@ -951,8 +951,8 @@ class Hippocampus:
self.parahippocampal_gyrus = ParahippocampalGyrus(self)
# 从数据库加载记忆图
self.entorhinal_cortex.sync_memory_from_db()
- self.llm_topic_judge = LLM_request(self.config.llm_topic_judge, request_type="memory")
- self.llm_summary_by_topic = LLM_request(self.config.llm_summary_by_topic, request_type="memory")
+ self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory")
+ self.llm_summary_by_topic = LLMRequest(self.config.llm_summary_by_topic, request_type="memory")
def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表"""
@@ -1054,8 +1054,9 @@ class Hippocampus:
Args:
text (str): 输入文本
- num (int, optional): 需要返回的记忆数量。默认为5。
- max_depth (int, optional): 记忆检索深度。默认为2。
+ max_memory_num (int, optional): 返回的记忆条目数量上限。默认为3,表示最多返回3条与输入文本相关度最高的记忆。
+ max_memory_length (int, optional): 每个主题最多返回的记忆条目数量。默认为2,表示每个主题最多返回2条相似度最高的记忆。
+ max_depth (int, optional): 记忆检索深度。默认为3。值越大,检索范围越广,可以获取更多间接相关的记忆,但速度会变慢。
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
如果为False,使用LLM提取关键词,速度较慢但更准确。
@@ -1248,7 +1249,6 @@ class Hippocampus:
Args:
text (str): 输入文本
- num (int, optional): 需要返回的记忆数量。默认为5。
max_depth (int, optional): 记忆检索深度。默认为2。
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
diff --git a/src/plugins/memory_system/manually_alter_memory.py b/src/plugins/memory_system/manually_alter_memory.py
index ce1883e57..818742113 100644
--- a/src/plugins/memory_system/manually_alter_memory.py
+++ b/src/plugins/memory_system/manually_alter_memory.py
@@ -177,7 +177,7 @@ def remove_mem_edge(hippocampus: Hippocampus):
# 修改节点信息
def alter_mem_node(hippocampus: Hippocampus):
- batchEnviroment = dict()
+ batch_environment = dict()
while True:
concept = input("请输入节点概念名(输入'终止'以结束):\n")
if concept.lower() == "终止":
@@ -229,7 +229,7 @@ def alter_mem_node(hippocampus: Hippocampus):
break
try:
- user_exec(command, node_environment, batchEnviroment)
+ user_exec(command, node_environment, batch_environment)
except Exception as e:
console.print(e)
console.print(
@@ -239,7 +239,7 @@ def alter_mem_node(hippocampus: Hippocampus):
# 修改边信息
def alter_mem_edge(hippocampus: Hippocampus):
- batchEnviroment = dict()
+ batch_enviroment = dict()
while True:
source = input("请输入 **第一个节点** 名称(输入'终止'以结束):\n")
if source.lower() == "终止":
@@ -262,21 +262,21 @@ def alter_mem_edge(hippocampus: Hippocampus):
console.print("[yellow]你将获得一个执行任意代码的环境[/yellow]")
console.print("[red]你已经被警告过了。[/red]\n")
- edgeEnviroment = {"source": "<节点名>", "target": "<节点名>", "strength": "<强度值,装在一个list里>"}
+ edge_environment = {"source": "<节点名>", "target": "<节点名>", "strength": "<强度值,装在一个list里>"}
console.print(
"[green]环境变量中会有env与batchEnv两个dict, env在切换节点时会清空, batchEnv在操作终止时才会清空[/green]"
)
console.print(
- f"[green] env 会被初始化为[/green]\n{edgeEnviroment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
+ f"[green] env 会被初始化为[/green]\n{edge_environment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
)
console.print(
"[yellow]为便于书写临时脚本,请手动在输入代码通过Ctrl+C等方式触发KeyboardInterrupt来结束代码执行[/yellow]"
)
# 拷贝数据以防操作炸了
- edgeEnviroment["strength"] = [edge["strength"]]
- edgeEnviroment["source"] = source
- edgeEnviroment["target"] = target
+ edge_environment["strength"] = [edge["strength"]]
+ edge_environment["source"] = source
+ edge_environment["target"] = target
while True:
@@ -288,8 +288,8 @@ def alter_mem_edge(hippocampus: Hippocampus):
except KeyboardInterrupt:
# 稍微防一下小天才
try:
- if isinstance(edgeEnviroment["strength"][0], int):
- edge["strength"] = edgeEnviroment["strength"][0]
+ if isinstance(edge_environment["strength"][0], int):
+ edge["strength"] = edge_environment["strength"][0]
else:
raise Exception
@@ -301,7 +301,7 @@ def alter_mem_edge(hippocampus: Hippocampus):
break
try:
- user_exec(command, edgeEnviroment, batchEnviroment)
+ user_exec(command, edge_environment, batch_enviroment)
except Exception as e:
console.print(e)
console.print(
diff --git a/src/plugins/memory_system/offline_llm.py b/src/plugins/memory_system/offline_llm.py
index 9c3fa81d9..fc50b17bc 100644
--- a/src/plugins/memory_system/offline_llm.py
+++ b/src/plugins/memory_system/offline_llm.py
@@ -10,7 +10,7 @@ from src.common.logger import get_module_logger
logger = get_module_logger("offline_llm")
-class LLM_request_off:
+class LLMRequestOff:
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
diff --git a/src/plugins/message/api.py b/src/plugins/message/api.py
index 0c3e3a5a1..e01289e95 100644
--- a/src/plugins/message/api.py
+++ b/src/plugins/message/api.py
@@ -233,7 +233,8 @@ class MessageServer(BaseMessageHandler):
async def send_message(self, message: MessageBase):
await self.broadcast_to_platform(message.message_info.platform, message.to_dict())
- async def send_message_REST(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
+ @staticmethod
+ async def send_message_rest(url: str, data: Dict[str, Any]) -> Dict[str, Any]:
"""发送消息到指定端点"""
async with aiohttp.ClientSession() as session:
try:
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index 604e74155..f9d563e49 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -16,7 +16,7 @@ from ..config.config import global_config
logger = get_module_logger("model_utils")
-class LLM_request:
+class LLMRequest:
# 定义需要转换的模型列表,作为类变量避免重复
MODELS_NEEDING_TRANSFORMATION = [
"o3-mini",
diff --git a/src/plugins/person_info/person_info.py b/src/plugins/person_info/person_info.py
index 7a8a0d1d5..2b15adc62 100644
--- a/src/plugins/person_info/person_info.py
+++ b/src/plugins/person_info/person_info.py
@@ -6,7 +6,7 @@ from typing import Any, Callable, Dict
import datetime
import asyncio
import numpy as np
-from src.plugins.models.utils_model import LLM_request
+from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
from src.individuality.individuality import Individuality
@@ -56,7 +56,7 @@ person_info_default = {
class PersonInfoManager:
def __init__(self):
self.person_name_list = {}
- self.qv_name_llm = LLM_request(
+ self.qv_name_llm = LLMRequest(
model=global_config.llm_normal,
max_tokens=256,
request_type="qv_name",
@@ -107,7 +107,7 @@ class PersonInfoManager:
db.person_info.insert_one(_person_info_default)
- async def update_one_field(self, person_id: str, field_name: str, value, Data: dict = None):
+ async def update_one_field(self, person_id: str, field_name: str, value, data: dict = None):
"""更新某一个字段,会补全"""
if field_name not in person_info_default.keys():
logger.debug(f"更新'{field_name}'失败,未定义的字段")
@@ -118,11 +118,12 @@ class PersonInfoManager:
if document:
db.person_info.update_one({"person_id": person_id}, {"$set": {field_name: value}})
else:
- Data[field_name] = value
+ data[field_name] = value
logger.debug(f"更新时{person_id}不存在,已新建")
- await self.create_person_info(person_id, Data)
+ await self.create_person_info(person_id, data)
- async def has_one_field(self, person_id: str, field_name: str):
+ @staticmethod
+ async def has_one_field(person_id: str, field_name: str):
"""判断是否存在某一个字段"""
document = db.person_info.find_one({"person_id": person_id}, {field_name: 1})
if document:
diff --git a/src/plugins/personality_s/renqingziji.py b/src/plugins/personality_s/renqingziji.py
index 04cbec099..ce4c268b8 100644
--- a/src/plugins/personality_s/renqingziji.py
+++ b/src/plugins/personality_s/renqingziji.py
@@ -38,7 +38,7 @@ else:
print("将使用默认配置")
-class PersonalityEvaluator_direct:
+class PersonalityEvaluatorDirect:
def __init__(self):
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
self.scenarios = []
@@ -135,7 +135,7 @@ def main():
print("\n准备好了吗?按回车键开始...")
input()
- evaluator = PersonalityEvaluator_direct()
+ evaluator = PersonalityEvaluatorDirect()
final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
dimension_counts = {trait: 0 for trait in final_scores.keys()}
diff --git a/src/plugins/remote/remote.py b/src/plugins/remote/remote.py
index bc7437057..58be19dde 100644
--- a/src/plugins/remote/remote.py
+++ b/src/plugins/remote/remote.py
@@ -125,12 +125,12 @@ def main():
if global_config.remote_enable:
"""主函数,启动心跳线程"""
# 配置
- SERVER_URL = "http://hyybuth.xyz:10058"
- # SERVER_URL = "http://localhost:10058"
- HEARTBEAT_INTERVAL = 300 # 5分钟(秒)
+ server_url = "http://hyybuth.xyz:10058"
+ # server_url = "http://localhost:10058"
+ heartbeat_interval = 300 # 5分钟(秒)
# 创建并启动心跳线程
- heartbeat_thread = HeartbeatThread(SERVER_URL, HEARTBEAT_INTERVAL)
+ heartbeat_thread = HeartbeatThread(server_url, heartbeat_interval)
heartbeat_thread.start()
return heartbeat_thread # 返回线程对象,便于外部控制
diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py
index f75065cf8..a67de28fc 100644
--- a/src/plugins/schedule/schedule_generator.py
+++ b/src/plugins/schedule/schedule_generator.py
@@ -11,7 +11,7 @@ sys.path.append(root_path)
from src.common.database import db # noqa: E402
from src.common.logger import get_module_logger, SCHEDULE_STYLE_CONFIG, LogConfig # noqa: E402
-from src.plugins.models.utils_model import LLM_request # noqa: E402
+from src.plugins.models.utils_model import LLMRequest # noqa: E402
from src.plugins.config.config import global_config # noqa: E402
TIME_ZONE = tz.gettz(global_config.TIME_ZONE) # 设置时区
@@ -30,13 +30,13 @@ class ScheduleGenerator:
def __init__(self):
# 使用离线LLM模型
- self.llm_scheduler_all = LLM_request(
+ self.llm_scheduler_all = LLMRequest(
model=global_config.llm_reasoning,
temperature=global_config.SCHEDULE_TEMPERATURE + 0.3,
max_tokens=7000,
request_type="schedule",
)
- self.llm_scheduler_doing = LLM_request(
+ self.llm_scheduler_doing = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.SCHEDULE_TEMPERATURE,
max_tokens=2048,
diff --git a/src/plugins/topic_identify/topic_identifier.py b/src/plugins/topic_identify/topic_identifier.py
index 743e45870..9a1797ecd 100644
--- a/src/plugins/topic_identify/topic_identifier.py
+++ b/src/plugins/topic_identify/topic_identifier.py
@@ -1,7 +1,7 @@
from typing import List, Optional
-from ..models.utils_model import LLM_request
+from ..models.utils_model import LLMRequest
from ..config.config import global_config
from src.common.logger import get_module_logger, LogConfig, TOPIC_STYLE_CONFIG
@@ -17,7 +17,7 @@ logger = get_module_logger("topic_identifier", config=topic_config)
class TopicIdentifier:
def __init__(self):
- self.llm_topic_judge = LLM_request(model=global_config.llm_topic_judge, request_type="topic")
+ self.llm_topic_judge = LLMRequest(model=global_config.llm_topic_judge, request_type="topic")
async def identify_topic_llm(self, text: str) -> Optional[List[str]]:
"""识别消息主题,返回主题列表"""
From 1a165c067a717546f01a7616bbdd2e747e6948e1 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Wed, 16 Apr 2025 08:37:51 +0000
Subject: [PATCH 025/406] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/PFC/observation_info.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/plugins/PFC/observation_info.py b/src/plugins/PFC/observation_info.py
index 97d9a6146..f92f12306 100644
--- a/src/plugins/PFC/observation_info.py
+++ b/src/plugins/PFC/observation_info.py
@@ -232,7 +232,7 @@ class ObservationInfo:
"""清空未处理消息列表"""
# 将未处理消息添加到历史记录中
for message in self.unprocessed_messages:
- self.chat_history.append(message) # TODO NEED FIX TYPE???
+ self.chat_history.append(message) # TODO NEED FIX TYPE???
# 清空未处理消息列表
self.has_unread_messages = False
self.unprocessed_messages.clear()
From ae2713f7b3ea71b81431b02c9ab22f484b2c1fa8 Mon Sep 17 00:00:00 2001
From: tcmofashi
Date: Wed, 16 Apr 2025 19:07:21 +0800
Subject: [PATCH 026/406] =?UTF-8?q?fix:=20=E5=A2=9E=E5=8A=A0=E9=94=99?=
=?UTF-8?q?=E8=AF=AF=E6=8F=90=E7=A4=BA?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat_module/reasoning_chat/reasoning_chat.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index 82e041cb1..5b15266ea 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -1,6 +1,6 @@
import time
from random import random
-
+import traceback
from typing import List
from ...memory_system.Hippocampus import HippocampusManager
from ...moods.moods import MoodManager
@@ -242,7 +242,7 @@ class ReasoningChat:
info_catcher.catch_after_generate_response(timing_results["生成回复"])
except Exception as e:
- logger.error(f"回复生成出现错误:str{e}")
+ logger.error(f"回复生成出现错误:{str(e)} {traceback.format_exc()}")
response_set = None
if not response_set:
From 13dc4562f0f41823bed28017f25cdd6d7af271b3 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Wed, 16 Apr 2025 20:22:19 +0800
Subject: [PATCH 027/406] =?UTF-8?q?feat:=E4=BF=AE=E5=A4=8D=E4=B8=AD?=
=?UTF-8?q?=E6=96=87=E6=8B=AC=E5=8F=B7=E5=8C=B9=E9=85=8D=EF=BC=8C=E5=8A=A0?=
=?UTF-8?q?=E5=85=A5=E5=B0=8F=E5=8A=A9=E6=89=8B?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
config_helper/config_helper.py | 477 +++++++++++++++++++++++++++++++++
config_helper/config_notice.md | 10 +
src/plugins/chat/utils.py | 6 +-
3 files changed, 492 insertions(+), 1 deletion(-)
create mode 100644 config_helper/config_helper.py
create mode 100644 config_helper/config_notice.md
diff --git a/config_helper/config_helper.py b/config_helper/config_helper.py
new file mode 100644
index 000000000..b27aaead4
--- /dev/null
+++ b/config_helper/config_helper.py
@@ -0,0 +1,477 @@
+import os
+import tomli
+from packaging.specifiers import SpecifierSet
+from packaging.version import Version
+import sys
+
+import asyncio
+import os
+import time
+from typing import Tuple, Union, AsyncGenerator, Generator
+
+import aiohttp
+import requests
+import json
+
+
+class EnvInfo:
+ def __init__(self, env_path):
+ self.env_path = env_path
+ self.env_content_txt = None
+ self.env_content = {}
+ self.error_message = None
+
+
+ def check_env(self):
+ # 检查根目录是否存在.env文件
+ if not os.path.exists(self.env_path):
+ self.error_message = "根目录没有.env文件,请自己创建或者运行一次MaiBot\n你可以直接复制template/template.env文件到根目录并重命名为.env"
+ return "not_found"
+
+ #加载整个.env文件
+ with open(self.env_path, "r", encoding="utf-8") as f:
+ self.env_content_txt = f.read()
+
+ #逐行读取所有配置项
+ for line in self.env_content_txt.splitlines():
+ if line.strip() == "":
+ continue
+ key, value = line.split("=", 1)
+ self.env_content[key.strip()] = value.strip()
+
+ # 检查.env文件的SILICONFLOW_KEY和SILICONFLOW_BASE_URL是否为空
+ if "SILICONFLOW_KEY" not in self.env_content or "SILICONFLOW_BASE_URL" not in self.env_content:
+ if "DEEP_SEEK_BASE_URL" not in self.env_content or "DEEP_SEEK_KEY" not in self.env_content:
+ self.error_message = "没有设置可用的API和密钥,请检查.env文件,起码配置一个API来让帮助程序工作"
+ return "not_set"
+ else:
+ self.error_message = "你只设置了deepseek官方API,可能无法运行MaiBot,请检查.env文件"
+ return "only_ds"
+
+ return "success"
+
+class LLM_request_off:
+ def __init__(self, model_name="deepseek-ai/DeepSeek-V3", env_info: EnvInfo = None, **kwargs):
+ self.model_name = model_name
+ self.params = kwargs
+ if model_name == "deepseek-ai/DeepSeek-V3" or model_name == "Pro/deepseek-ai/DeepSeek-V3":
+ self.api_key = env_info.env_content["SILICONFLOW_KEY"]
+ self.base_url = env_info.env_content["SILICONFLOW_BASE_URL"]
+ elif model_name == "deepseek-chat":
+ self.api_key = env_info.env_content["DEEP_SEEK_KEY"]
+ self.base_url = env_info.env_content["DEEP_SEEK_BASE_URL"]
+ # logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
+
+ def generate_stream(self, prompt: str) -> Generator[str, None, None]:
+ """流式生成模型响应"""
+ headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
+
+ # 构建请求体,启用流式输出
+ data = {
+ "model": self.model_name,
+ "messages": [{"role": "user", "content": prompt}],
+ "temperature": 0.4,
+ "stream": True,
+ **self.params,
+ }
+
+ # 发送请求到完整的 chat/completions 端点
+ api_url = f"{self.base_url.rstrip('/')}/chat/completions"
+ print(f"Stream Request URL: {api_url}")
+
+ max_retries = 3
+ base_wait_time = 15
+
+ for retry in range(max_retries):
+ try:
+ response = requests.post(api_url, headers=headers, json=data, stream=True)
+
+ if response.status_code == 429:
+ wait_time = base_wait_time * (2**retry)
+ print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
+ time.sleep(wait_time)
+ continue
+
+ response.raise_for_status()
+
+ # 处理流式响应
+ for line in response.iter_lines():
+ if line:
+ line = line.decode('utf-8')
+ if line.startswith('data: ') and not line.startswith('data: [DONE]'):
+ json_str = line[6:] # 去掉 "data: " 前缀
+ try:
+ chunk_data = json.loads(json_str)
+ if (
+ "choices" in chunk_data
+ and len(chunk_data["choices"]) > 0
+ and "delta" in chunk_data["choices"][0]
+ and "content" in chunk_data["choices"][0]["delta"]
+ ):
+ content = chunk_data["choices"][0]["delta"]["content"]
+ yield content
+ except json.JSONDecodeError:
+ print(f"无法解析JSON: {json_str}")
+ return
+
+ except Exception as e:
+ if retry < max_retries - 1:
+ wait_time = base_wait_time * (2**retry)
+ print(f"[流式回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
+ time.sleep(wait_time)
+ else:
+ print(f"流式请求失败: {str(e)}")
+ yield f"流式请求失败: {str(e)}"
+ return
+
+ print("达到最大重试次数,流式请求仍然失败")
+ yield "达到最大重试次数,流式请求仍然失败"
+
+class ConfigInfo:
+ def __init__(self, config_path):
+ self.config_path = config_path
+ self.config_content = ""
+ self.config_content_txt = None
+ self.template_content = None
+ self.version = None
+ self.error_message = None
+
+ def check_bot_config(self):
+ """
+ 检查config/bot_config.toml文件中是否有缺失项目
+ """
+
+ if not os.path.exists(self.config_path):
+ self.error_message = f"错误:找不到配置文件 {self.config_path}"
+ return "not found"
+
+ # 读取配置文件,先去掉注释,再解析TOML
+ try:
+ # 首先读取原始文件内容
+ with open(self.config_path, "r", encoding="utf-8", errors="replace") as f:
+ file_content = f.read()
+
+ # 去掉注释并保留有效内容
+ cleaned_lines = []
+ for line in file_content.splitlines():
+ # 去掉行内注释
+ if "#" in line:
+ line = line.split("#")[0].rstrip()
+
+ # 如果行不是空的且不是以#开头的注释行,则添加到cleaned_lines
+ if line.strip() and not line.strip().startswith("#"):
+ cleaned_lines.append(line)
+
+ # 将处理后的内容用于解析TOML
+ self.config_content_txt = "\n".join(cleaned_lines)
+
+ # 使用tomli解析处理后的内容
+ self.config_content = tomli.loads(self.config_content_txt)
+ except tomli.TOMLDecodeError as e:
+ self.error_message = f"错误:配置文件格式错误:{e}"
+ # 配置内容已经在上面设置了,不需要再次读取
+ return "format_error"
+ except UnicodeDecodeError as e:
+ self.error_message = f"错误:配置文件编码错误,请使用UTF-8编码:{e}"
+ return "format_error"
+
+ # 读取模板配置文件
+ template_path = "template/bot_config_template.toml"
+ if not os.path.exists(template_path):
+ self.error_message = f"错误:找不到模板配置文件,请检查你是否启动过或者该程序是否位于根目录 {template_path}"
+ return "critical_error"
+
+ try:
+ with open(template_path, "rb") as f:
+ template_content = tomli.load(f)
+ except Exception as e:
+ self.error_message = f"错误:无法解析模板配置文件,文件损坏,建议重新下载MaiBot:{e}"
+ return "critical_error"
+
+ # 获取版本信息
+ inner_version = self.config_content.get("inner", {}).get("version")
+ if not inner_version:
+ self.error_message = "错误:配置文件中缺少版本信息"
+ return "critical_error"
+
+ try:
+ self.version = Version(inner_version)
+ except:
+ self.error_message = f"错误:版本号格式错误:{inner_version}"
+ return "critical_error"
+
+
+ # 检查所有必需的顶级配置项
+ required_sections = [
+ "bot", "groups", "personality", "identity", "platforms",
+ "response", "message", "willing", "emoji", "memory",
+ "mood", "model"
+ ]
+
+ missing_sections = []
+ for section in required_sections:
+ if section not in self.config_content:
+ missing_sections.append(section)
+
+ if missing_sections:
+ self.error_message = f"错误:配置文件缺少以下顶级配置项:{', '.join(missing_sections)}"
+ return "critical_error"
+
+ # 检查各个配置项内的必需字段
+ missing_fields = []
+
+ # 检查bot配置
+ if "bot" in self.config_content:
+ bot_config = self.config_content["bot"]
+ if "qq" not in bot_config:
+ missing_fields.append("bot.qq")
+ if "nickname" not in bot_config:
+ missing_fields.append("bot.nickname")
+
+ # 检查groups配置
+ if "groups" in self.config_content:
+ groups_config = self.config_content["groups"]
+ if "talk_allowed" not in groups_config:
+ missing_fields.append("groups.talk_allowed")
+
+ # 检查platforms配置
+ if "platforms" in self.config_content:
+ platforms_config = self.config_content["platforms"]
+ if not platforms_config or not isinstance(platforms_config, dict) or len(platforms_config) == 0:
+ missing_fields.append("platforms.(至少一个平台)")
+
+ # 检查模型配置
+ if "model" in self.config_content:
+ model_config = self.config_content["model"]
+ required_models = [
+ "llm_reasoning", "llm_normal", "llm_topic_judge",
+ "llm_summary_by_topic", "llm_emotion_judge", "embedding", "vlm"
+ ]
+
+ for model in required_models:
+ if model not in model_config:
+ missing_fields.append(f"model.{model}")
+ elif model in model_config:
+ model_item = model_config[model]
+ if "name" not in model_item:
+ missing_fields.append(f"model.{model}.name")
+ if "provider" not in model_item:
+ missing_fields.append(f"model.{model}.provider")
+
+ # 基于模板检查其它必需字段
+ def check_section(template_section, user_section, prefix):
+ if not isinstance(template_section, dict) or not isinstance(user_section, dict):
+ return
+
+ for key, value in template_section.items():
+ # 跳过注释和数组类型的配置项
+ if key.startswith("#") or isinstance(value, list):
+ continue
+
+ if key not in user_section:
+ missing_fields.append(f"{prefix}.{key}")
+ elif isinstance(value, dict) and key in user_section:
+ # 递归检查嵌套配置项
+ check_section(value, user_section[key], f"{prefix}.{key}")
+
+ for section in required_sections:
+ if section in template_content and section in self.config_content:
+ check_section(template_content[section], self.config_content[section], section)
+
+ # 输出结果
+ if missing_fields:
+ print(f"发现 {len(missing_fields)} 个缺失的配置项:")
+ for field in missing_fields:
+ print(f" - {field}")
+ else:
+ print("检查完成,没有发现缺失的必要配置项。")
+
+ def get_value(self, path):
+ """
+ 获取配置文件中指定路径的值
+ 参数:
+ path: 以点分隔的路径,例如 "bot.qq" 或 "model.llm_normal.name"
+ 返回:
+ 找到的值,如果路径不存在则返回None
+ """
+ if not self.config_content:
+ return None
+
+ parts = path.split('.')
+ current = self.config_content
+
+ try:
+ for part in parts:
+ if isinstance(current, dict) and part in current:
+ current = current[part]
+ else:
+ return None
+ return current
+ except:
+ return None
+
+ def has_field(self, path):
+ """
+ 检查配置文件中是否存在指定路径
+ 参数:
+ path: 以点分隔的路径,例如 "bot.qq" 或 "model.llm_normal.name"
+ 返回:
+ 布尔值,表示路径是否存在
+ """
+ return self.get_value(path) is not None
+
+ def get_section(self, section_name):
+ """
+ 获取配置文件中的整个部分
+ 参数:
+ section_name: 部分名称,例如 "bot" 或 "model"
+ 返回:
+ 字典形式的部分内容,如果部分不存在则返回空字典
+ """
+ if not self.config_content:
+ return {}
+
+ return self.config_content.get(section_name, {})
+
+ def get_all_models(self):
+ """
+ 获取配置中所有的模型配置
+ 返回:
+ 包含所有模型配置的字典
+ """
+ if not self.config_content or "model" not in self.config_content:
+ return {}
+
+ return self.config_content.get("model", {})
+
+ def __str__(self):
+ return f"配置文件路径:{self.config_path}\n配置文件版本:{self.version}\n错误信息:{self.error_message}"
+
+class ConfigHelper:
+ def __init__(self, config_info: ConfigInfo, model_using = "", env_info: EnvInfo = None):
+ self.config_info = config_info
+ self.config_notice = None
+ self.helper_model = LLM_request_off(model_name=model_using,env_info=env_info)
+
+ def deal_format_error(self, error_message, config_content_txt):
+ prompt = f"""
+ 这里有一份配置文件存在格式错误,请检查配置文件为什么会出现该错误以及建议如何修改,不要使用markdown格式
+ 错误信息:{error_message}
+ 配置文件内容:{config_content_txt}
+ 请根据错误信息和配置文件内容,用通俗易懂,简短的语言给出修改建议:
+ """
+
+ try:
+ # 使用流式输出获取分析结果
+ print("\n===== 麦麦分析结果 =====")
+ for chunk in self.helper_model.generate_stream(prompt):
+ print(chunk, end="", flush=True)
+ print("\n=====================")
+
+ except Exception as e:
+ print(f"请求麦麦分析时出错: {str(e)}")
+ print("请手动检查配置文件格式错误:", error_message)
+
+ def load_config_notice(self):
+ with open(os.path.join(os.path.dirname(__file__), "config_notice.md"), "r", encoding="utf-8") as f:
+ self.config_notice = f.read()
+
+ def deal_question(self, question):
+ prompt = f"""
+ 这里有一份配置文件,请根据问题给出回答
+ 配置文件内容:{self.config_info.config_content_txt}
+ 关于配置文件的说明:{self.config_notice}
+ 问题:{question}
+ """
+
+ try:
+ # 使用流式输出获取分析结果
+ print("\n===== 麦麦分析结果 =====")
+ for chunk in self.helper_model.generate_stream(prompt):
+ print(chunk, end="", flush=True)
+ print("\n=====================")
+
+ except Exception as e:
+ print(f"请求麦麦分析时出错: {str(e)}")
+
+
+
+if __name__ == "__main__":
+ model_using = "deepseek-ai/DeepSeek-V3"
+ # model_using = "Pro/deepseek-ai/DeepSeek-V3"
+ env_info = EnvInfo(".env")
+ result = env_info.check_env()
+ if result == "not_set":
+ print(env_info.error_message)
+ exit()
+ elif result == "only_ds":
+ model_using = "deepseek-chat"
+ print("你只设置了deepseek官方API,可能无法运行MaiBot,但是你仍旧可以运行这个帮助程序,请检查.env文件")
+ elif result == "not_found":
+ print(env_info.error_message)
+ exit()
+
+ config_path = "./config/bot_config.toml"
+ config_info = ConfigInfo(config_path)
+ print("开始检查config/bot_config.toml文件...")
+ result = config_info.check_bot_config()
+ print(config_info)
+
+ helper = ConfigHelper(config_info, model_using, env_info)
+ helper.load_config_notice()
+
+ # 如果配置文件读取成功,展示如何获取字段
+ if config_info.config_content:
+ print("\n配置文件读取成功,可以访问任意字段:")
+ # 获取机器人昵称
+ nickname = config_info.get_value("bot.nickname")
+ print(f"机器人昵称: {nickname}")
+
+ # 获取QQ号
+ qq = config_info.get_value("bot.qq")
+ print(f"机器人QQ: {qq}")
+
+ # 获取群聊配置
+ groups = config_info.get_section("groups")
+ print(f"允许聊天的群: {groups.get('talk_allowed', [])}")
+
+ # 获取模型信息
+ models = config_info.get_all_models()
+ print("\n模型配置信息:")
+ for model_name, model_info in models.items():
+ provider = model_info.get("provider", "未知")
+ model_path = model_info.get("name", "未知")
+ print(f" - {model_name}: {model_path} (提供商: {provider})")
+
+ # 检查某字段是否存在
+ if config_info.has_field("model.llm_normal.temp"):
+ temp = config_info.get_value("model.llm_normal.temp")
+ print(f"\n回复模型温度: {temp}")
+ else:
+ print("\n回复模型温度未设置")
+
+ # 获取心流相关设置
+ if config_info.has_field("heartflow"):
+ heartflow = config_info.get_section("heartflow")
+ print(f"\n心流更新间隔: {heartflow.get('heart_flow_update_interval')}秒")
+ print(f"子心流更新间隔: {heartflow.get('sub_heart_flow_update_interval')}秒")
+
+ if result == "critical_error":
+ print("配置文件存在严重错误,建议重新下载MaiBot")
+ exit()
+ elif result == "format_error":
+ print("配置文件格式错误,正在进行检查...")
+ error_message = config_info.error_message
+ config_content_txt = config_info.config_content_txt
+ helper.deal_format_error(error_message, config_content_txt)
+ else:
+ print("配置文件格式检查完成,没有发现问题")
+
+ while True:
+ question = input("请输入你遇到的问题,麦麦会帮助你分析(输入exit退出):")
+ if question == "exit":
+ break
+ else:
+ print("麦麦正在为你分析...")
+ helper.deal_question(question)
+
diff --git a/config_helper/config_notice.md b/config_helper/config_notice.md
new file mode 100644
index 000000000..b704c8e47
--- /dev/null
+++ b/config_helper/config_notice.md
@@ -0,0 +1,10 @@
+1.Q:为什么我的bot叫他名字不回消息?
+A:请检查qq和nickname字段是否正确填写
+请将默认字段:
+qq = 114514
+nickname = "麦麦"
+改为你自己的qq号和bot名称(需要与qq昵称相同)
+
+2. Q:如何修改日程表的内容,或者关闭日程表?
+A:日程表目前无法关闭
+如果日程表生成的内容太过科幻或者疯癫,可以尝试调整日程表的温度或者修改日程表描述
\ No newline at end of file
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index dbc12da4e..a6503b807 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -342,13 +342,17 @@ def process_llm_response(text: str) -> List[str]:
protected_text, kaomoji_mapping = protect_kaomoji(text)
logger.debug(f"保护颜文字后的文本: {protected_text}")
# 提取被 () 或 [] 包裹的内容
- pattern = re.compile(r"[\(\[].*?[\)\]]")
+ pattern = re.compile(r"[\(\[\(].*?[\)\]\)]")
# _extracted_contents = pattern.findall(text)
_extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
# 去除 () 和 [] 及其包裹的内容
# cleaned_text = pattern.sub("", text)
cleaned_text = pattern.sub("", protected_text)
+
+ if cleaned_text == '':
+ return ["呃呃"]
+
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
# 对清理后的文本进行进一步处理
From 3a1d0c623598831a7cca7c9b7658b8901aedc50d Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Wed, 16 Apr 2025 20:34:25 +0800
Subject: [PATCH 028/406] =?UTF-8?q?fix=EF=BC=9A=E8=AE=B0=E5=BF=86=E8=8E=B7?=
=?UTF-8?q?=E5=8F=96=E5=B7=A5=E5=85=B7?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/do_tool/tool_can_use/get_memory.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/do_tool/tool_can_use/get_memory.py b/src/do_tool/tool_can_use/get_memory.py
index 6a3c1c391..ae1677006 100644
--- a/src/do_tool/tool_can_use/get_memory.py
+++ b/src/do_tool/tool_can_use/get_memory.py
@@ -9,7 +9,7 @@ logger = get_module_logger("mid_chat_mem_tool")
class GetMemoryTool(BaseTool):
"""从记忆系统中获取相关记忆的工具"""
- name = "mid_chat_mem"
+ name = "get_memory"
description = "从记忆系统中获取相关记忆"
parameters = {
"type": "object",
@@ -49,10 +49,10 @@ class GetMemoryTool(BaseTool):
else:
content = f"你不太记得有关{text}的记忆,你对此不太了解"
- return {"name": "mid_chat_mem", "content": content}
+ return {"name": "get_memory", "content": content}
except Exception as e:
logger.error(f"记忆获取工具执行失败: {str(e)}")
- return {"name": "mid_chat_mem", "content": f"记忆获取失败: {str(e)}"}
+ return {"name": "get_memory", "content": f"记忆获取失败: {str(e)}"}
# 注册工具
From 8d88bc475c3c32f975d61dc6abfcabf97a5460b2 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Wed, 16 Apr 2025 20:43:46 +0800
Subject: [PATCH 029/406] =?UTF-8?q?fix=EF=BC=9A=E7=A7=BB=E9=99=A4=E6=97=A0?=
=?UTF-8?q?=E7=94=A8=E5=8A=9F=E8=83=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/personality_s/big5_test.py | 111 -------
src/plugins/personality_s/can_i_recog_u.py | 353 ---------------------
src/plugins/personality_s/combined_test.py | 349 --------------------
src/plugins/personality_s/offline_llm.py | 123 -------
src/plugins/personality_s/questionnaire.py | 142 ---------
src/plugins/personality_s/renqingziji.py | 195 ------------
src/plugins/personality_s/who_r_u.py | 156 ---------
src/plugins/personality_s/看我.txt | 1 -
8 files changed, 1430 deletions(-)
delete mode 100644 src/plugins/personality_s/big5_test.py
delete mode 100644 src/plugins/personality_s/can_i_recog_u.py
delete mode 100644 src/plugins/personality_s/combined_test.py
delete mode 100644 src/plugins/personality_s/offline_llm.py
delete mode 100644 src/plugins/personality_s/questionnaire.py
delete mode 100644 src/plugins/personality_s/renqingziji.py
delete mode 100644 src/plugins/personality_s/who_r_u.py
delete mode 100644 src/plugins/personality_s/看我.txt
diff --git a/src/plugins/personality_s/big5_test.py b/src/plugins/personality_s/big5_test.py
deleted file mode 100644
index a680bce94..000000000
--- a/src/plugins/personality_s/big5_test.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-
-# from .questionnaire import PERSONALITY_QUESTIONS, FACTOR_DESCRIPTIONS
-
-import os
-import sys
-from pathlib import Path
-import random
-
-current_dir = Path(__file__).resolve().parent
-project_root = current_dir.parent.parent.parent
-env_path = project_root / ".env"
-
-root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
-sys.path.append(root_path)
-
-from src.plugins.personality.questionnaire import PERSONALITY_QUESTIONS, FACTOR_DESCRIPTIONS # noqa: E402
-
-
-class BigFiveTest:
- def __init__(self):
- self.questions = PERSONALITY_QUESTIONS
- self.factors = FACTOR_DESCRIPTIONS
-
- def run_test(self):
- """运行测试并收集答案"""
- print("\n欢迎参加中国大五人格测试!")
- print("\n本测试采用六级评分,请根据每个描述与您的符合程度进行打分:")
- print("1 = 完全不符合")
- print("2 = 比较不符合")
- print("3 = 有点不符合")
- print("4 = 有点符合")
- print("5 = 比较符合")
- print("6 = 完全符合")
- print("\n请认真阅读每个描述,选择最符合您实际情况的选项。\n")
-
- # 创建题目序号到题目的映射
- questions_map = {q["id"]: q for q in self.questions}
-
- # 获取所有题目ID并随机打乱顺序
- question_ids = list(questions_map.keys())
- random.shuffle(question_ids)
-
- answers = {}
- total_questions = len(question_ids)
-
- for i, question_id in enumerate(question_ids, 1):
- question = questions_map[question_id]
- while True:
- try:
- print(f"\n[{i}/{total_questions}] {question['content']}")
- score = int(input("您的评分(1-6): "))
- if 1 <= score <= 6:
- answers[question_id] = score
- break
- else:
- print("请输入1-6之间的数字!")
- except ValueError:
- print("请输入有效的数字!")
-
- return self.calculate_scores(answers)
-
- def calculate_scores(self, answers):
- """计算各维度得分"""
- results = {}
- factor_questions = {"外向性": [], "神经质": [], "严谨性": [], "开放性": [], "宜人性": []}
-
- # 将题目按因子分类
- for q in self.questions:
- factor_questions[q["factor"]].append(q)
-
- # 计算每个维度的得分
- for factor, questions in factor_questions.items():
- total_score = 0
- for q in questions:
- score = answers[q["id"]]
- # 处理反向计分题目
- if q["reverse_scoring"]:
- score = 7 - score # 6分量表反向计分为7减原始分
- total_score += score
-
- # 计算平均分
- avg_score = round(total_score / len(questions), 2)
- results[factor] = {"得分": avg_score, "题目数": len(questions), "总分": total_score}
-
- return results
-
- def get_factor_description(self, factor):
- """获取因子的详细描述"""
- return self.factors[factor]
-
-
-def main():
- test = BigFiveTest()
- results = test.run_test()
-
- print("\n测试结果:")
- print("=" * 50)
- for factor, data in results.items():
- print(f"\n{factor}:")
- print(f"平均分: {data['得分']} (总分: {data['总分']}, 题目数: {data['题目数']})")
- print("-" * 30)
- description = test.get_factor_description(factor)
- print("维度说明:", description["description"][:100] + "...")
- print("\n特征词:", ", ".join(description["trait_words"]))
- print("=" * 50)
-
-
-if __name__ == "__main__":
- main()
diff --git a/src/plugins/personality_s/can_i_recog_u.py b/src/plugins/personality_s/can_i_recog_u.py
deleted file mode 100644
index c21048e6d..000000000
--- a/src/plugins/personality_s/can_i_recog_u.py
+++ /dev/null
@@ -1,353 +0,0 @@
-"""
-基于聊天记录的人格特征分析系统
-"""
-
-from typing import Dict, List
-import json
-import os
-from pathlib import Path
-from dotenv import load_dotenv
-import sys
-import random
-from collections import defaultdict
-import matplotlib.pyplot as plt
-import numpy as np
-from datetime import datetime
-import matplotlib.font_manager as fm
-
-current_dir = Path(__file__).resolve().parent
-project_root = current_dir.parent.parent.parent
-env_path = project_root / ".env"
-
-root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
-sys.path.append(root_path)
-
-from src.plugins.personality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402
-from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402
-from src.plugins.personality.offline_llm import LLMModel # noqa: E402
-from src.plugins.personality.who_r_u import MessageAnalyzer # noqa: E402
-
-# 加载环境变量
-if env_path.exists():
- print(f"从 {env_path} 加载环境变量")
- load_dotenv(env_path)
-else:
- print(f"未找到环境变量文件: {env_path}")
- print("将使用默认配置")
-
-
-class ChatBasedPersonalityEvaluator:
- def __init__(self):
- self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
- self.scenarios = []
- self.message_analyzer = MessageAnalyzer()
- self.llm = LLMModel()
- self.trait_scores_history = defaultdict(list) # 记录每个特质的得分历史
-
- # 为每个人格特质获取对应的场景
- for trait in PERSONALITY_SCENES:
- scenes = get_scene_by_factor(trait)
- if not scenes:
- continue
- scene_keys = list(scenes.keys())
- selected_scenes = random.sample(scene_keys, min(3, len(scene_keys)))
-
- for scene_key in selected_scenes:
- scene = scenes[scene_key]
- other_traits = [t for t in PERSONALITY_SCENES if t != trait]
- secondary_trait = random.choice(other_traits)
- self.scenarios.append(
- {"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
- )
-
- def analyze_chat_context(self, messages: List[Dict]) -> str:
- """
- 分析一组消息的上下文,生成场景描述
- """
- context = ""
- for msg in messages:
- nickname = msg.get("user_info", {}).get("user_nickname", "未知用户")
- content = msg.get("processed_plain_text", msg.get("detailed_plain_text", ""))
- if content:
- context += f"{nickname}: {content}\n"
- return context
-
- def evaluate_chat_response(
- self, user_nickname: str, chat_context: str, dimensions: List[str] = None
- ) -> Dict[str, float]:
- """
- 评估聊天内容在各个人格维度上的得分
- """
- # 使用所有维度进行评估
- dimensions = list(self.personality_traits.keys())
-
- dimension_descriptions = []
- for dim in dimensions:
- desc = FACTOR_DESCRIPTIONS.get(dim, "")
- if desc:
- dimension_descriptions.append(f"- {dim}:{desc}")
-
- dimensions_text = "\n".join(dimension_descriptions)
-
- prompt = f"""请根据以下聊天记录,评估"{user_nickname}"在大五人格模型中的维度得分(1-6分)。
-
-聊天记录:
-{chat_context}
-
-需要评估的维度说明:
-{dimensions_text}
-
-请按照以下格式输出评估结果,注意,你的评价对象是"{user_nickname}"(仅输出JSON格式):
-{{
- "开放性": 分数,
- "严谨性": 分数,
- "外向性": 分数,
- "宜人性": 分数,
- "神经质": 分数
-}}
-
-评分标准:
-1 = 非常不符合该维度特征
-2 = 比较不符合该维度特征
-3 = 有点不符合该维度特征
-4 = 有点符合该维度特征
-5 = 比较符合该维度特征
-6 = 非常符合该维度特征
-
-如果你觉得某个维度没有相关信息或者无法判断,请输出0分
-
-请根据聊天记录的内容和语气,结合维度说明进行评分。如果维度可以评分,确保分数在1-6之间。如果没有体现,请输出0分"""
-
- try:
- ai_response, _ = self.llm.generate_response(prompt)
- start_idx = ai_response.find("{")
- end_idx = ai_response.rfind("}") + 1
- if start_idx != -1 and end_idx != 0:
- json_str = ai_response[start_idx:end_idx]
- scores = json.loads(json_str)
- return {k: max(0, min(6, float(v))) for k, v in scores.items()}
- else:
- print("AI响应格式不正确,使用默认评分")
- return {dim: 0 for dim in dimensions}
- except Exception as e:
- print(f"评估过程出错:{str(e)}")
- return {dim: 0 for dim in dimensions}
-
- def evaluate_user_personality(self, qq_id: str, num_samples: int = 10, context_length: int = 5) -> Dict:
- """
- 基于用户的聊天记录评估人格特征
-
- Args:
- qq_id (str): 用户QQ号
- num_samples (int): 要分析的聊天片段数量
- context_length (int): 每个聊天片段的上下文长度
-
- Returns:
- Dict: 评估结果
- """
- # 获取用户的随机消息及其上下文
- chat_contexts, user_nickname = self.message_analyzer.get_user_random_contexts(
- qq_id, num_messages=num_samples, context_length=context_length
- )
- if not chat_contexts:
- return {"error": f"没有找到QQ号 {qq_id} 的消息记录"}
-
- # 初始化评分
- final_scores = defaultdict(float)
- dimension_counts = defaultdict(int)
- chat_samples = []
-
- # 清空历史记录
- self.trait_scores_history.clear()
-
- # 分析每个聊天上下文
- for chat_context in chat_contexts:
- # 评估这段聊天内容的所有维度
- scores = self.evaluate_chat_response(user_nickname, chat_context)
-
- # 记录样本
- chat_samples.append(
- {"聊天内容": chat_context, "评估维度": list(self.personality_traits.keys()), "评分": scores}
- )
-
- # 更新总分和历史记录
- for dimension, score in scores.items():
- if score > 0: # 只统计大于0的有效分数
- final_scores[dimension] += score
- dimension_counts[dimension] += 1
- self.trait_scores_history[dimension].append(score)
-
- # 计算平均分
- average_scores = {}
- for dimension in self.personality_traits:
- if dimension_counts[dimension] > 0:
- average_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
- else:
- average_scores[dimension] = 0 # 如果没有有效分数,返回0
-
- # 生成趋势图
- self._generate_trend_plot(qq_id, user_nickname)
-
- result = {
- "用户QQ": qq_id,
- "用户昵称": user_nickname,
- "样本数量": len(chat_samples),
- "人格特征评分": average_scores,
- "维度评估次数": dict(dimension_counts),
- "详细样本": chat_samples,
- "特质得分历史": {k: v for k, v in self.trait_scores_history.items()},
- }
-
- # 保存结果
- os.makedirs("results", exist_ok=True)
- result_file = f"results/personality_result_{qq_id}.json"
- with open(result_file, "w", encoding="utf-8") as f:
- json.dump(result, f, ensure_ascii=False, indent=2)
-
- return result
-
- def _generate_trend_plot(self, qq_id: str, user_nickname: str):
- """
- 生成人格特质累计平均分变化趋势图
- """
- # 查找系统中可用的中文字体
- chinese_fonts = []
- for f in fm.fontManager.ttflist:
- try:
- if "简" in f.name or "SC" in f.name or "黑" in f.name or "宋" in f.name or "微软" in f.name:
- chinese_fonts.append(f.name)
- except Exception:
- continue
-
- if chinese_fonts:
- plt.rcParams["font.sans-serif"] = chinese_fonts + ["SimHei", "Microsoft YaHei", "Arial Unicode MS"]
- else:
- # 如果没有找到中文字体,使用默认字体,并将中文昵称转换为拼音或英文
- try:
- from pypinyin import lazy_pinyin
-
- user_nickname = "".join(lazy_pinyin(user_nickname))
- except ImportError:
- user_nickname = "User" # 如果无法转换为拼音,使用默认英文
-
- plt.rcParams["axes.unicode_minus"] = False # 解决负号显示问题
-
- plt.figure(figsize=(12, 6))
- plt.style.use("bmh") # 使用内置的bmh样式,它有类似seaborn的美观效果
-
- colors = {
- "开放性": "#FF9999",
- "严谨性": "#66B2FF",
- "外向性": "#99FF99",
- "宜人性": "#FFCC99",
- "神经质": "#FF99CC",
- }
-
- # 计算每个维度在每个时间点的累计平均分
- cumulative_averages = {}
- for trait, scores in self.trait_scores_history.items():
- if not scores:
- continue
-
- averages = []
- total = 0
- valid_count = 0
- for score in scores:
- if score > 0: # 只计算大于0的有效分数
- total += score
- valid_count += 1
- if valid_count > 0:
- averages.append(total / valid_count)
- else:
- # 如果当前分数无效,使用前一个有效的平均分
- if averages:
- averages.append(averages[-1])
- else:
- continue # 跳过无效分数
-
- if averages: # 只有在有有效分数的情况下才添加到累计平均中
- cumulative_averages[trait] = averages
-
- # 绘制每个维度的累计平均分变化趋势
- for trait, averages in cumulative_averages.items():
- x = range(1, len(averages) + 1)
- plt.plot(x, averages, "o-", label=trait, color=colors.get(trait), linewidth=2, markersize=8)
-
- # 添加趋势线
- z = np.polyfit(x, averages, 1)
- p = np.poly1d(z)
- plt.plot(x, p(x), "--", color=colors.get(trait), alpha=0.5)
-
- plt.title(f"{user_nickname} 的人格特质累计平均分变化趋势", fontsize=14, pad=20)
- plt.xlabel("评估次数", fontsize=12)
- plt.ylabel("累计平均分", fontsize=12)
- plt.grid(True, linestyle="--", alpha=0.7)
- plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
- plt.ylim(0, 7)
- plt.tight_layout()
-
- # 保存图表
- os.makedirs("results/plots", exist_ok=True)
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- plot_file = f"results/plots/personality_trend_{qq_id}_{timestamp}.png"
- plt.savefig(plot_file, dpi=300, bbox_inches="tight")
- plt.close()
-
-
-def analyze_user_personality(qq_id: str, num_samples: int = 10, context_length: int = 5) -> str:
- """
- 分析用户人格特征的便捷函数
-
- Args:
- qq_id (str): 用户QQ号
- num_samples (int): 要分析的聊天片段数量
- context_length (int): 每个聊天片段的上下文长度
-
- Returns:
- str: 格式化的分析结果
- """
- evaluator = ChatBasedPersonalityEvaluator()
- result = evaluator.evaluate_user_personality(qq_id, num_samples, context_length)
-
- if "error" in result:
- return result["error"]
-
- # 格式化输出
- output = f"QQ号 {qq_id} ({result['用户昵称']}) 的人格特征分析结果:\n"
- output += "=" * 50 + "\n\n"
-
- output += "人格特征评分:\n"
- for trait, score in result["人格特征评分"].items():
- if score == 0:
- output += f"{trait}: 数据不足,无法判断 (评估次数: {result['维度评估次数'].get(trait, 0)})\n"
- else:
- output += f"{trait}: {score}/6 (评估次数: {result['维度评估次数'].get(trait, 0)})\n"
-
- # 添加变化趋势描述
- if trait in result["特质得分历史"] and len(result["特质得分历史"][trait]) > 1:
- scores = [s for s in result["特质得分历史"][trait] if s != 0] # 过滤掉无效分数
- if len(scores) > 1: # 确保有足够的有效分数计算趋势
- trend = np.polyfit(range(len(scores)), scores, 1)[0]
- if abs(trend) < 0.1:
- trend_desc = "保持稳定"
- elif trend > 0:
- trend_desc = "呈上升趋势"
- else:
- trend_desc = "呈下降趋势"
- output += f" 变化趋势: {trend_desc} (斜率: {trend:.2f})\n"
-
- output += f"\n分析样本数量:{result['样本数量']}\n"
- output += f"结果已保存至:results/personality_result_{qq_id}.json\n"
- output += "变化趋势图已保存至:results/plots/目录\n"
-
- return output
-
-
-if __name__ == "__main__":
- # 测试代码
- # test_qq = "" # 替换为要测试的QQ号
- # print(analyze_user_personality(test_qq, num_samples=30, context_length=20))
- # test_qq = ""
- # print(analyze_user_personality(test_qq, num_samples=30, context_length=20))
- test_qq = "1026294844"
- print(analyze_user_personality(test_qq, num_samples=30, context_length=30))
diff --git a/src/plugins/personality_s/combined_test.py b/src/plugins/personality_s/combined_test.py
deleted file mode 100644
index 1a1e9060e..000000000
--- a/src/plugins/personality_s/combined_test.py
+++ /dev/null
@@ -1,349 +0,0 @@
-from typing import Dict
-import json
-import os
-from pathlib import Path
-import sys
-from datetime import datetime
-import random
-from scipy import stats # 添加scipy导入用于t检验
-
-current_dir = Path(__file__).resolve().parent
-project_root = current_dir.parent.parent.parent
-env_path = project_root / ".env"
-
-root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
-sys.path.append(root_path)
-
-from src.plugins.personality.big5_test import BigFiveTest # noqa: E402
-from src.plugins.personality.renqingziji import PersonalityEvaluator_direct # noqa: E402
-from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS, PERSONALITY_QUESTIONS # noqa: E402
-
-
-class CombinedPersonalityTest:
- def __init__(self):
- self.big5_test = BigFiveTest()
- self.scenario_test = PersonalityEvaluator_direct()
- self.dimensions = ["开放性", "严谨性", "外向性", "宜人性", "神经质"]
-
- def run_combined_test(self):
- """运行组合测试"""
- print("\n=== 人格特征综合评估系统 ===")
- print("\n本测试将通过两种方式评估人格特征:")
- print("1. 传统问卷测评(约40题)")
- print("2. 情景反应测评(15个场景)")
- print("\n两种测评完成后,将对比分析结果的异同。")
- input("\n准备好开始第一部分(问卷测评)了吗?按回车继续...")
-
- # 运行问卷测试
- print("\n=== 第一部分:问卷测评 ===")
- print("本部分采用六级评分,请根据每个描述与您的符合程度进行打分:")
- print("1 = 完全不符合")
- print("2 = 比较不符合")
- print("3 = 有点不符合")
- print("4 = 有点符合")
- print("5 = 比较符合")
- print("6 = 完全符合")
- print("\n重要提示:您可以选择以下两种方式之一来回答问题:")
- print("1. 根据您自身的真实情况来回答")
- print("2. 根据您想要扮演的角色特征来回答")
- print("\n无论选择哪种方式,请保持一致并认真回答每个问题。")
- input("\n按回车开始答题...")
-
- questionnaire_results = self.run_questionnaire()
-
- # 转换问卷结果格式以便比较
- questionnaire_scores = {factor: data["得分"] for factor, data in questionnaire_results.items()}
-
- # 运行情景测试
- print("\n=== 第二部分:情景反应测评 ===")
- print("接下来,您将面对一系列具体场景,请描述您在每个场景中可能的反应。")
- print("每个场景都会评估不同的人格维度,共15个场景。")
- print("您可以选择提供自己的真实反应,也可以选择扮演一个您创作的角色来回答。")
- input("\n准备好开始了吗?按回车继续...")
-
- scenario_results = self.run_scenario_test()
-
- # 比较和展示结果
- self.compare_and_display_results(questionnaire_scores, scenario_results)
-
- # 保存结果
- self.save_results(questionnaire_scores, scenario_results)
-
- def run_questionnaire(self):
- """运行问卷测试部分"""
- # 创建题目序号到题目的映射
- questions_map = {q["id"]: q for q in PERSONALITY_QUESTIONS}
-
- # 获取所有题目ID并随机打乱顺序
- question_ids = list(questions_map.keys())
- random.shuffle(question_ids)
-
- answers = {}
- total_questions = len(question_ids)
-
- for i, question_id in enumerate(question_ids, 1):
- question = questions_map[question_id]
- while True:
- try:
- print(f"\n问题 [{i}/{total_questions}]")
- print(f"{question['content']}")
- score = int(input("您的评分(1-6): "))
- if 1 <= score <= 6:
- answers[question_id] = score
- break
- else:
- print("请输入1-6之间的数字!")
- except ValueError:
- print("请输入有效的数字!")
-
- # 每10题显示一次进度
- if i % 10 == 0:
- print(f"\n已完成 {i}/{total_questions} 题 ({int(i / total_questions * 100)}%)")
-
- return self.calculate_questionnaire_scores(answers)
-
- def calculate_questionnaire_scores(self, answers):
- """计算问卷测试的维度得分"""
- results = {}
- factor_questions = {"外向性": [], "神经质": [], "严谨性": [], "开放性": [], "宜人性": []}
-
- # 将题目按因子分类
- for q in PERSONALITY_QUESTIONS:
- factor_questions[q["factor"]].append(q)
-
- # 计算每个维度的得分
- for factor, questions in factor_questions.items():
- total_score = 0
- for q in questions:
- score = answers[q["id"]]
- # 处理反向计分题目
- if q["reverse_scoring"]:
- score = 7 - score # 6分量表反向计分为7减原始分
- total_score += score
-
- # 计算平均分
- avg_score = round(total_score / len(questions), 2)
- results[factor] = {"得分": avg_score, "题目数": len(questions), "总分": total_score}
-
- return results
-
- def run_scenario_test(self):
- """运行情景测试部分"""
- final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
- dimension_counts = {trait: 0 for trait in final_scores.keys()}
-
- # 随机打乱场景顺序
- scenarios = self.scenario_test.scenarios.copy()
- random.shuffle(scenarios)
-
- for i, scenario_data in enumerate(scenarios, 1):
- print(f"\n场景 [{i}/{len(scenarios)}] - {scenario_data['场景编号']}")
- print("-" * 50)
- print(scenario_data["场景"])
- print("\n请描述您在这种情况下会如何反应:")
- response = input().strip()
-
- if not response:
- print("反应描述不能为空!")
- continue
-
- print("\n正在评估您的描述...")
- scores = self.scenario_test.evaluate_response(scenario_data["场景"], response, scenario_data["评估维度"])
-
- # 更新分数
- for dimension, score in scores.items():
- final_scores[dimension] += score
- dimension_counts[dimension] += 1
-
- # print("\n当前场景评估结果:")
- # print("-" * 30)
- # for dimension, score in scores.items():
- # print(f"{dimension}: {score}/6")
-
- # 每5个场景显示一次总进度
- if i % 5 == 0:
- print(f"\n已完成 {i}/{len(scenarios)} 个场景 ({int(i / len(scenarios) * 100)}%)")
-
- if i < len(scenarios):
- input("\n按回车继续下一个场景...")
-
- # 计算平均分
- for dimension in final_scores:
- if dimension_counts[dimension] > 0:
- final_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
-
- return final_scores
-
- def compare_and_display_results(self, questionnaire_scores: Dict, scenario_scores: Dict):
- """比较和展示两种测试的结果"""
- print("\n=== 测评结果对比分析 ===")
- print("\n" + "=" * 60)
- print(f"{'维度':<8} {'问卷得分':>10} {'情景得分':>10} {'差异':>10} {'差异程度':>10}")
- print("-" * 60)
-
- # 收集每个维度的得分用于统计分析
- questionnaire_values = []
- scenario_values = []
- diffs = []
-
- for dimension in self.dimensions:
- q_score = questionnaire_scores[dimension]
- s_score = scenario_scores[dimension]
- diff = round(abs(q_score - s_score), 2)
-
- questionnaire_values.append(q_score)
- scenario_values.append(s_score)
- diffs.append(diff)
-
- # 计算差异程度
- diff_level = "低" if diff < 0.5 else "中" if diff < 1.0 else "高"
- print(f"{dimension:<8} {q_score:>10.2f} {s_score:>10.2f} {diff:>10.2f} {diff_level:>10}")
-
- print("=" * 60)
-
- # 计算整体统计指标
- mean_diff = sum(diffs) / len(diffs)
- std_diff = (sum((x - mean_diff) ** 2 for x in diffs) / (len(diffs) - 1)) ** 0.5
-
- # 计算效应量 (Cohen's d)
- pooled_std = (
- (
- sum((x - sum(questionnaire_values) / len(questionnaire_values)) ** 2 for x in questionnaire_values)
- + sum((x - sum(scenario_values) / len(scenario_values)) ** 2 for x in scenario_values)
- )
- / (2 * len(self.dimensions) - 2)
- ) ** 0.5
-
- if pooled_std != 0:
- cohens_d = abs(mean_diff / pooled_std)
-
- # 解释效应量
- if cohens_d < 0.2:
- effect_size = "微小"
- elif cohens_d < 0.5:
- effect_size = "小"
- elif cohens_d < 0.8:
- effect_size = "中等"
- else:
- effect_size = "大"
-
- # 对所有维度进行整体t检验
- t_stat, p_value = stats.ttest_rel(questionnaire_values, scenario_values)
- print("\n整体统计分析:")
- print(f"平均差异: {mean_diff:.3f}")
- print(f"差异标准差: {std_diff:.3f}")
- print(f"效应量(Cohen's d): {cohens_d:.3f}")
- print(f"效应量大小: {effect_size}")
- print(f"t统计量: {t_stat:.3f}")
- print(f"p值: {p_value:.3f}")
-
- if p_value < 0.05:
- print("结论: 两种测评方法的结果存在显著差异 (p < 0.05)")
- else:
- print("结论: 两种测评方法的结果无显著差异 (p >= 0.05)")
-
- print("\n维度说明:")
- for dimension in self.dimensions:
- print(f"\n{dimension}:")
- desc = FACTOR_DESCRIPTIONS[dimension]
- print(f"定义:{desc['description']}")
- print(f"特征词:{', '.join(desc['trait_words'])}")
-
- # 分析显著差异
- significant_diffs = []
- for dimension in self.dimensions:
- diff = abs(questionnaire_scores[dimension] - scenario_scores[dimension])
- if diff >= 1.0: # 差异大于等于1分视为显著
- significant_diffs.append(
- {
- "dimension": dimension,
- "diff": diff,
- "questionnaire": questionnaire_scores[dimension],
- "scenario": scenario_scores[dimension],
- }
- )
-
- if significant_diffs:
- print("\n\n显著差异分析:")
- print("-" * 40)
- for diff in significant_diffs:
- print(f"\n{diff['dimension']}维度的测评结果存在显著差异:")
- print(f"问卷得分:{diff['questionnaire']:.2f}")
- print(f"情景得分:{diff['scenario']:.2f}")
- print(f"差异值:{diff['diff']:.2f}")
-
- # 分析可能的原因
- if diff["questionnaire"] > diff["scenario"]:
- print("可能原因:在问卷中的自我评价较高,但在具体情景中的表现较为保守。")
- else:
- print("可能原因:在具体情景中表现出更多该维度特征,而在问卷自评时较为保守。")
-
- def save_results(self, questionnaire_scores: Dict, scenario_scores: Dict):
- """保存测试结果"""
- results = {
- "测试时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
- "问卷测评结果": questionnaire_scores,
- "情景测评结果": scenario_scores,
- "维度说明": FACTOR_DESCRIPTIONS,
- }
-
- # 确保目录存在
- os.makedirs("results", exist_ok=True)
-
- # 生成带时间戳的文件名
- filename = f"results/personality_combined_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
-
- # 保存到文件
- with open(filename, "w", encoding="utf-8") as f:
- json.dump(results, f, ensure_ascii=False, indent=2)
-
- print(f"\n完整的测评结果已保存到:{filename}")
-
-
-def load_existing_results():
- """检查并加载已有的测试结果"""
- results_dir = "results"
- if not os.path.exists(results_dir):
- return None
-
- # 获取所有personality_combined开头的文件
- result_files = [f for f in os.listdir(results_dir) if f.startswith("personality_combined_") and f.endswith(".json")]
-
- if not result_files:
- return None
-
- # 按文件修改时间排序,获取最新的结果文件
- latest_file = max(result_files, key=lambda f: os.path.getmtime(os.path.join(results_dir, f)))
-
- print(f"\n发现已有的测试结果:{latest_file}")
- try:
- with open(os.path.join(results_dir, latest_file), "r", encoding="utf-8") as f:
- results = json.load(f)
- return results
- except Exception as e:
- print(f"读取结果文件时出错:{str(e)}")
- return None
-
-
-def main():
- test = CombinedPersonalityTest()
-
- # 检查是否存在已有结果
- existing_results = load_existing_results()
-
- if existing_results:
- print("\n=== 使用已有测试结果进行分析 ===")
- print(f"测试时间:{existing_results['测试时间']}")
-
- questionnaire_scores = existing_results["问卷测评结果"]
- scenario_scores = existing_results["情景测评结果"]
-
- # 直接进行结果对比分析
- test.compare_and_display_results(questionnaire_scores, scenario_scores)
- else:
- print("\n未找到已有的测试结果,开始新的测试...")
- test.run_combined_test()
-
-
-if __name__ == "__main__":
- main()
diff --git a/src/plugins/personality_s/offline_llm.py b/src/plugins/personality_s/offline_llm.py
deleted file mode 100644
index db51ca00f..000000000
--- a/src/plugins/personality_s/offline_llm.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import asyncio
-import os
-import time
-from typing import Tuple, Union
-
-import aiohttp
-import requests
-from src.common.logger import get_module_logger
-
-logger = get_module_logger("offline_llm")
-
-
-class LLMModel:
- def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
- self.model_name = model_name
- self.params = kwargs
- self.api_key = os.getenv("SILICONFLOW_KEY")
- self.base_url = os.getenv("SILICONFLOW_BASE_URL")
-
- if not self.api_key or not self.base_url:
- raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
-
- logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
-
- def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
- """根据输入的提示生成模型的响应"""
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
-
- # 构建请求体
- data = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "temperature": 0.5,
- **self.params,
- }
-
- # 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
- logger.info(f"Request URL: {api_url}") # 记录请求的 URL
-
- max_retries = 3
- base_wait_time = 15 # 基础等待时间(秒)
-
- for retry in range(max_retries):
- try:
- response = requests.post(api_url, headers=headers, json=data)
-
- if response.status_code == 429:
- wait_time = base_wait_time * (2**retry) # 指数退避
- logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
- time.sleep(wait_time)
- continue
-
- response.raise_for_status() # 检查其他响应状态
-
- result = response.json()
- if "choices" in result and len(result["choices"]) > 0:
- content = result["choices"][0]["message"]["content"]
- reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
- return content, reasoning_content
- return "没有返回结果", ""
-
- except Exception as e:
- if retry < max_retries - 1: # 如果还有重试机会
- wait_time = base_wait_time * (2**retry)
- logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
- time.sleep(wait_time)
- else:
- logger.error(f"请求失败: {str(e)}")
- return f"请求失败: {str(e)}", ""
-
- logger.error("达到最大重试次数,请求仍然失败")
- return "达到最大重试次数,请求仍然失败", ""
-
- async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
- """异步方式根据输入的提示生成模型的响应"""
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
-
- # 构建请求体
- data = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "temperature": 0.5,
- **self.params,
- }
-
- # 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
- logger.info(f"Request URL: {api_url}") # 记录请求的 URL
-
- max_retries = 3
- base_wait_time = 15
-
- async with aiohttp.ClientSession() as session:
- for retry in range(max_retries):
- try:
- async with session.post(api_url, headers=headers, json=data) as response:
- if response.status == 429:
- wait_time = base_wait_time * (2**retry) # 指数退避
- logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
- await asyncio.sleep(wait_time)
- continue
-
- response.raise_for_status() # 检查其他响应状态
-
- result = await response.json()
- if "choices" in result and len(result["choices"]) > 0:
- content = result["choices"][0]["message"]["content"]
- reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
- return content, reasoning_content
- return "没有返回结果", ""
-
- except Exception as e:
- if retry < max_retries - 1: # 如果还有重试机会
- wait_time = base_wait_time * (2**retry)
- logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
- await asyncio.sleep(wait_time)
- else:
- logger.error(f"请求失败: {str(e)}")
- return f"请求失败: {str(e)}", ""
-
- logger.error("达到最大重试次数,请求仍然失败")
- return "达到最大重试次数,请求仍然失败", ""
diff --git a/src/plugins/personality_s/questionnaire.py b/src/plugins/personality_s/questionnaire.py
deleted file mode 100644
index 8e965061d..000000000
--- a/src/plugins/personality_s/questionnaire.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# 人格测试问卷题目
-# 王孟成, 戴晓阳, & 姚树桥. (2011).
-# 中国大五人格问卷的初步编制Ⅲ:简式版的制定及信效度检验. 中国临床心理学杂志, 19(04), Article 04.
-
-# 王孟成, 戴晓阳, & 姚树桥. (2010).
-# 中国大五人格问卷的初步编制Ⅰ:理论框架与信度分析. 中国临床心理学杂志, 18(05), Article 05.
-
-PERSONALITY_QUESTIONS = [
- # 神经质维度 (F1)
- {"id": 1, "content": "我常担心有什么不好的事情要发生", "factor": "神经质", "reverse_scoring": False},
- {"id": 2, "content": "我常感到害怕", "factor": "神经质", "reverse_scoring": False},
- {"id": 3, "content": "有时我觉得自己一无是处", "factor": "神经质", "reverse_scoring": False},
- {"id": 4, "content": "我很少感到忧郁或沮丧", "factor": "神经质", "reverse_scoring": True},
- {"id": 5, "content": "别人一句漫不经心的话,我常会联系在自己身上", "factor": "神经质", "reverse_scoring": False},
- {"id": 6, "content": "在面对压力时,我有种快要崩溃的感觉", "factor": "神经质", "reverse_scoring": False},
- {"id": 7, "content": "我常担忧一些无关紧要的事情", "factor": "神经质", "reverse_scoring": False},
- {"id": 8, "content": "我常常感到内心不踏实", "factor": "神经质", "reverse_scoring": False},
- # 严谨性维度 (F2)
- {"id": 9, "content": "在工作上,我常只求能应付过去便可", "factor": "严谨性", "reverse_scoring": True},
- {"id": 10, "content": "一旦确定了目标,我会坚持努力地实现它", "factor": "严谨性", "reverse_scoring": False},
- {"id": 11, "content": "我常常是仔细考虑之后才做出决定", "factor": "严谨性", "reverse_scoring": False},
- {"id": 12, "content": "别人认为我是个慎重的人", "factor": "严谨性", "reverse_scoring": False},
- {"id": 13, "content": "做事讲究逻辑和条理是我的一个特点", "factor": "严谨性", "reverse_scoring": False},
- {"id": 14, "content": "我喜欢一开头就把事情计划好", "factor": "严谨性", "reverse_scoring": False},
- {"id": 15, "content": "我工作或学习很勤奋", "factor": "严谨性", "reverse_scoring": False},
- {"id": 16, "content": "我是个倾尽全力做事的人", "factor": "严谨性", "reverse_scoring": False},
- # 宜人性维度 (F3)
- {
- "id": 17,
- "content": "尽管人类社会存在着一些阴暗的东西(如战争、罪恶、欺诈),我仍然相信人性总的来说是善良的",
- "factor": "宜人性",
- "reverse_scoring": False,
- },
- {"id": 18, "content": "我觉得大部分人基本上是心怀善意的", "factor": "宜人性", "reverse_scoring": False},
- {"id": 19, "content": "虽然社会上有骗子,但我觉得大部分人还是可信的", "factor": "宜人性", "reverse_scoring": False},
- {"id": 20, "content": "我不太关心别人是否受到不公正的待遇", "factor": "宜人性", "reverse_scoring": True},
- {"id": 21, "content": "我时常觉得别人的痛苦与我无关", "factor": "宜人性", "reverse_scoring": True},
- {"id": 22, "content": "我常为那些遭遇不幸的人感到难过", "factor": "宜人性", "reverse_scoring": False},
- {"id": 23, "content": "我是那种只照顾好自己,不替别人担忧的人", "factor": "宜人性", "reverse_scoring": True},
- {"id": 24, "content": "当别人向我诉说不幸时,我常感到难过", "factor": "宜人性", "reverse_scoring": False},
- # 开放性维度 (F4)
- {"id": 25, "content": "我的想象力相当丰富", "factor": "开放性", "reverse_scoring": False},
- {"id": 26, "content": "我头脑中经常充满生动的画面", "factor": "开放性", "reverse_scoring": False},
- {"id": 27, "content": "我对许多事情有着很强的好奇心", "factor": "开放性", "reverse_scoring": False},
- {"id": 28, "content": "我喜欢冒险", "factor": "开放性", "reverse_scoring": False},
- {"id": 29, "content": "我是个勇于冒险,突破常规的人", "factor": "开放性", "reverse_scoring": False},
- {"id": 30, "content": "我身上具有别人没有的冒险精神", "factor": "开放性", "reverse_scoring": False},
- {
- "id": 31,
- "content": "我渴望学习一些新东西,即使它们与我的日常生活无关",
- "factor": "开放性",
- "reverse_scoring": False,
- },
- {
- "id": 32,
- "content": "我很愿意也很容易接受那些新事物、新观点、新想法",
- "factor": "开放性",
- "reverse_scoring": False,
- },
- # 外向性维度 (F5)
- {"id": 33, "content": "我喜欢参加社交与娱乐聚会", "factor": "外向性", "reverse_scoring": False},
- {"id": 34, "content": "我对人多的聚会感到乏味", "factor": "外向性", "reverse_scoring": True},
- {"id": 35, "content": "我尽量避免参加人多的聚会和嘈杂的环境", "factor": "外向性", "reverse_scoring": True},
- {"id": 36, "content": "在热闹的聚会上,我常常表现主动并尽情玩耍", "factor": "外向性", "reverse_scoring": False},
- {"id": 37, "content": "有我在的场合一般不会冷场", "factor": "外向性", "reverse_scoring": False},
- {"id": 38, "content": "我希望成为领导者而不是被领导者", "factor": "外向性", "reverse_scoring": False},
- {"id": 39, "content": "在一个团体中,我希望处于领导地位", "factor": "外向性", "reverse_scoring": False},
- {"id": 40, "content": "别人多认为我是一个热情和友好的人", "factor": "外向性", "reverse_scoring": False},
-]
-
-# 因子维度说明
-FACTOR_DESCRIPTIONS = {
- "外向性": {
- "description": "反映个体神经系统的强弱和动力特征。外向性主要表现为个体在人际交往和社交活动中的倾向性,"
- "包括对社交活动的兴趣、"
- "对人群的态度、社交互动中的主动程度以及在群体中的影响力。高分者倾向于积极参与社交活动,乐于与人交往,善于表达自我,"
- "并往往在群体中发挥领导作用;低分者则倾向于独处,不喜欢热闹的社交场合,表现出内向、安静的特征。",
- "trait_words": ["热情", "活力", "社交", "主动"],
- "subfactors": {
- "合群性": "个体愿意与他人聚在一起,即接近人群的倾向;高分表现乐群、好交际,低分表现封闭、独处",
- "热情": "个体对待别人时所表现出的态度;高分表现热情好客,低分表现冷淡",
- "支配性": "个体喜欢指使、操纵他人,倾向于领导别人的特点;高分表现好强、发号施令,低分表现顺从、低调",
- "活跃": "个体精力充沛,活跃、主动性等特点;高分表现活跃,低分表现安静",
- },
- },
- "神经质": {
- "description": "反映个体情绪的状态和体验内心苦恼的倾向性。这个维度主要关注个体在面对压力、"
- "挫折和日常生活挑战时的情绪稳定性和适应能力。它包含了对焦虑、抑郁、愤怒等负面情绪的敏感程度,"
- "以及个体对这些情绪的调节和控制能力。高分者容易体验负面情绪,对压力较为敏感,情绪波动较大;"
- "低分者则表现出较强的情绪稳定性,能够较好地应对压力和挫折。",
- "trait_words": ["稳定", "沉着", "从容", "坚韧"],
- "subfactors": {
- "焦虑": "个体体验焦虑感的个体差异;高分表现坐立不安,低分表现平静",
- "抑郁": "个体体验抑郁情感的个体差异;高分表现郁郁寡欢,低分表现平静",
- "敏感多疑": "个体常常关注自己的内心活动,行为和过于意识人对自己的看法、评价;高分表现敏感多疑,"
- "低分表现淡定、自信",
- "脆弱性": "个体在危机或困难面前无力、脆弱的特点;高分表现无能、易受伤、逃避,低分表现坚强",
- "愤怒-敌意": "个体准备体验愤怒,及相关情绪的状态;高分表现暴躁易怒,低分表现平静",
- },
- },
- "严谨性": {
- "description": "反映个体在目标导向行为上的组织、坚持和动机特征。这个维度体现了个体在工作、"
- "学习等目标性活动中的自我约束和行为管理能力。它涉及到个体的责任感、自律性、计划性、条理性以及完成任务的态度。"
- "高分者往往表现出强烈的责任心、良好的组织能力、谨慎的决策风格和持续的努力精神;低分者则可能表现出随意性强、"
- "缺乏规划、做事马虎或易放弃的特点。",
- "trait_words": ["负责", "自律", "条理", "勤奋"],
- "subfactors": {
- "责任心": "个体对待任务和他人认真负责,以及对自己承诺的信守;高分表现有责任心、负责任,"
- "低分表现推卸责任、逃避处罚",
- "自我控制": "个体约束自己的能力,及自始至终的坚持性;高分表现自制、有毅力,低分表现冲动、无毅力",
- "审慎性": "个体在采取具体行动前的心理状态;高分表现谨慎、小心,低分表现鲁莽、草率",
- "条理性": "个体处理事务和工作的秩序,条理和逻辑性;高分表现整洁、有秩序,低分表现混乱、遗漏",
- "勤奋": "个体工作和学习的努力程度及为达到目标而表现出的进取精神;高分表现勤奋、刻苦,低分表现懒散",
- },
- },
- "开放性": {
- "description": "反映个体对新异事物、新观念和新经验的接受程度,以及在思维和行为方面的创新倾向。"
- "这个维度体现了个体在认知和体验方面的广度、深度和灵活性。它包括对艺术的欣赏能力、对知识的求知欲、想象力的丰富程度,"
- "以及对冒险和创新的态度。高分者往往具有丰富的想象力、广泛的兴趣、开放的思维方式和创新的倾向;低分者则倾向于保守、"
- "传统,喜欢熟悉和常规的事物。",
- "trait_words": ["创新", "好奇", "艺术", "冒险"],
- "subfactors": {
- "幻想": "个体富于幻想和想象的水平;高分表现想象力丰富,低分表现想象力匮乏",
- "审美": "个体对于艺术和美的敏感与热爱程度;高分表现富有艺术气息,低分表现一般对艺术不敏感",
- "好奇心": "个体对未知事物的态度;高分表现兴趣广泛、好奇心浓,低分表现兴趣少、无好奇心",
- "冒险精神": "个体愿意尝试有风险活动的个体差异;高分表现好冒险,低分表现保守",
- "价值观念": "个体对新事物、新观念、怪异想法的态度;高分表现开放、坦然接受新事物,低分则相反",
- },
- },
- "宜人性": {
- "description": "反映个体在人际关系中的亲和倾向,体现了对他人的关心、同情和合作意愿。"
- "这个维度主要关注个体与他人互动时的态度和行为特征,包括对他人的信任程度、同理心水平、"
- "助人意愿以及在人际冲突中的处理方式。高分者通常表现出友善、富有同情心、乐于助人的特质,善于与他人建立和谐关系;"
- "低分者则可能表现出较少的人际关注,在社交互动中更注重自身利益,较少考虑他人感受。",
- "trait_words": ["友善", "同理", "信任", "合作"],
- "subfactors": {
- "信任": "个体对他人和/或他人言论的相信程度;高分表现信任他人,低分表现怀疑",
- "体贴": "个体对别人的兴趣和需要的关注程度;高分表现体贴、温存,低分表现冷漠、不在乎",
- "同情": "个体对处于不利地位的人或物的态度;高分表现富有同情心,低分表现冷漠",
- },
- },
-}
diff --git a/src/plugins/personality_s/renqingziji.py b/src/plugins/personality_s/renqingziji.py
deleted file mode 100644
index ce4c268b8..000000000
--- a/src/plugins/personality_s/renqingziji.py
+++ /dev/null
@@ -1,195 +0,0 @@
-"""
-The definition of artificial personality in this paper follows the dispositional para-digm and adapts a definition of
-personality developed for humans [17]:
-Personality for a human is the "whole and organisation of relatively stable tendencies and patterns of experience and
-behaviour within one person (distinguishing it from other persons)". This definition is modified for artificial
-personality:
-Artificial personality describes the relatively stable tendencies and patterns of behav-iour of an AI-based machine that
-can be designed by developers and designers via different modalities, such as language, creating the impression
-of individuality of a humanized social agent when users interact with the machine."""
-
-from typing import Dict, List
-import json
-import os
-from pathlib import Path
-from dotenv import load_dotenv
-import sys
-
-"""
-第一种方案:基于情景评估的人格测定
-"""
-current_dir = Path(__file__).resolve().parent
-project_root = current_dir.parent.parent.parent
-env_path = project_root / ".env"
-
-root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
-sys.path.append(root_path)
-
-from src.plugins.personality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402
-from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402
-from src.plugins.personality.offline_llm import LLMModel # noqa: E402
-
-# 加载环境变量
-if env_path.exists():
- print(f"从 {env_path} 加载环境变量")
- load_dotenv(env_path)
-else:
- print(f"未找到环境变量文件: {env_path}")
- print("将使用默认配置")
-
-
-class PersonalityEvaluatorDirect:
- def __init__(self):
- self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
- self.scenarios = []
-
- # 为每个人格特质获取对应的场景
- for trait in PERSONALITY_SCENES:
- scenes = get_scene_by_factor(trait)
- if not scenes:
- continue
-
- # 从每个维度选择3个场景
- import random
-
- scene_keys = list(scenes.keys())
- selected_scenes = random.sample(scene_keys, min(3, len(scene_keys)))
-
- for scene_key in selected_scenes:
- scene = scenes[scene_key]
-
- # 为每个场景添加评估维度
- # 主维度是当前特质,次维度随机选择一个其他特质
- other_traits = [t for t in PERSONALITY_SCENES if t != trait]
- secondary_trait = random.choice(other_traits)
-
- self.scenarios.append(
- {"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
- )
-
- self.llm = LLMModel()
-
- def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]:
- """
- 使用 DeepSeek AI 评估用户对特定场景的反应
- """
- # 构建维度描述
- dimension_descriptions = []
- for dim in dimensions:
- desc = FACTOR_DESCRIPTIONS.get(dim, "")
- if desc:
- dimension_descriptions.append(f"- {dim}:{desc}")
-
- dimensions_text = "\n".join(dimension_descriptions)
-
- prompt = f"""请根据以下场景和用户描述,评估用户在大五人格模型中的相关维度得分(1-6分)。
-
-场景描述:
-{scenario}
-
-用户回应:
-{response}
-
-需要评估的维度说明:
-{dimensions_text}
-
-请按照以下格式输出评估结果(仅输出JSON格式):
-{{
- "{dimensions[0]}": 分数,
- "{dimensions[1]}": 分数
-}}
-
-评分标准:
-1 = 非常不符合该维度特征
-2 = 比较不符合该维度特征
-3 = 有点不符合该维度特征
-4 = 有点符合该维度特征
-5 = 比较符合该维度特征
-6 = 非常符合该维度特征
-
-请根据用户的回应,结合场景和维度说明进行评分。确保分数在1-6之间,并给出合理的评估。"""
-
- try:
- ai_response, _ = self.llm.generate_response(prompt)
- # 尝试从AI响应中提取JSON部分
- start_idx = ai_response.find("{")
- end_idx = ai_response.rfind("}") + 1
- if start_idx != -1 and end_idx != 0:
- json_str = ai_response[start_idx:end_idx]
- scores = json.loads(json_str)
- # 确保所有分数在1-6之间
- return {k: max(1, min(6, float(v))) for k, v in scores.items()}
- else:
- print("AI响应格式不正确,使用默认评分")
- return {dim: 3.5 for dim in dimensions}
- except Exception as e:
- print(f"评估过程出错:{str(e)}")
- return {dim: 3.5 for dim in dimensions}
-
-
-def main():
- print("欢迎使用人格形象创建程序!")
- print("接下来,您将面对一系列场景(共15个)。请根据您想要创建的角色形象,描述在该场景下可能的反应。")
- print("每个场景都会评估不同的人格维度,最终得出完整的人格特征评估。")
- print("评分标准:1=非常不符合,2=比较不符合,3=有点不符合,4=有点符合,5=比较符合,6=非常符合")
- print("\n准备好了吗?按回车键开始...")
- input()
-
- evaluator = PersonalityEvaluatorDirect()
- final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
- dimension_counts = {trait: 0 for trait in final_scores.keys()}
-
- for i, scenario_data in enumerate(evaluator.scenarios, 1):
- print(f"\n场景 {i}/{len(evaluator.scenarios)} - {scenario_data['场景编号']}:")
- print("-" * 50)
- print(scenario_data["场景"])
- print("\n请描述您的角色在这种情况下会如何反应:")
- response = input().strip()
-
- if not response:
- print("反应描述不能为空!")
- continue
-
- print("\n正在评估您的描述...")
- scores = evaluator.evaluate_response(scenario_data["场景"], response, scenario_data["评估维度"])
-
- # 更新最终分数
- for dimension, score in scores.items():
- final_scores[dimension] += score
- dimension_counts[dimension] += 1
-
- print("\n当前评估结果:")
- print("-" * 30)
- for dimension, score in scores.items():
- print(f"{dimension}: {score}/6")
-
- if i < len(evaluator.scenarios):
- print("\n按回车键继续下一个场景...")
- input()
-
- # 计算平均分
- for dimension in final_scores:
- if dimension_counts[dimension] > 0:
- final_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
-
- print("\n最终人格特征评估结果:")
- print("-" * 30)
- for trait, score in final_scores.items():
- print(f"{trait}: {score}/6")
- print(f"测试场景数:{dimension_counts[trait]}")
-
- # 保存结果
- result = {"final_scores": final_scores, "dimension_counts": dimension_counts, "scenarios": evaluator.scenarios}
-
- # 确保目录存在
- os.makedirs("results", exist_ok=True)
-
- # 保存到文件
- with open("results/personality_result.json", "w", encoding="utf-8") as f:
- json.dump(result, f, ensure_ascii=False, indent=2)
-
- print("\n结果已保存到 results/personality_result.json")
-
-
-if __name__ == "__main__":
- main()
diff --git a/src/plugins/personality_s/who_r_u.py b/src/plugins/personality_s/who_r_u.py
deleted file mode 100644
index 4877fb8c9..000000000
--- a/src/plugins/personality_s/who_r_u.py
+++ /dev/null
@@ -1,156 +0,0 @@
-import random
-import os
-import sys
-from pathlib import Path
-import datetime
-from typing import List, Dict, Optional
-
-current_dir = Path(__file__).resolve().parent
-project_root = current_dir.parent.parent.parent
-env_path = project_root / ".env"
-
-root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
-sys.path.append(root_path)
-
-from src.common.database import db # noqa: E402
-
-
-class MessageAnalyzer:
- def __init__(self):
- self.messages_collection = db["messages"]
-
- def get_message_context(self, message_id: int, context_length: int = 5) -> Optional[List[Dict]]:
- """
- 获取指定消息ID的上下文消息列表
-
- Args:
- message_id (int): 消息ID
- context_length (int): 上下文长度(单侧,总长度为 2*context_length + 1)
-
- Returns:
- Optional[List[Dict]]: 消息列表,如果未找到则返回None
- """
- # 从数据库获取指定消息
- target_message = self.messages_collection.find_one({"message_id": message_id})
- if not target_message:
- return None
-
- # 获取该消息的stream_id
- stream_id = target_message.get("chat_info", {}).get("stream_id")
- if not stream_id:
- return None
-
- # 获取同一stream_id的所有消息
- stream_messages = list(self.messages_collection.find({"chat_info.stream_id": stream_id}).sort("time", 1))
-
- # 找到目标消息在列表中的位置
- target_index = None
- for i, msg in enumerate(stream_messages):
- if msg["message_id"] == message_id:
- target_index = i
- break
-
- if target_index is None:
- return None
-
- # 获取目标消息前后的消息
- start_index = max(0, target_index - context_length)
- end_index = min(len(stream_messages), target_index + context_length + 1)
-
- return stream_messages[start_index:end_index]
-
- def format_messages(self, messages: List[Dict], target_message_id: Optional[int] = None) -> str:
- """
- 格式化消息列表为可读字符串
-
- Args:
- messages (List[Dict]): 消息列表
- target_message_id (Optional[int]): 目标消息ID,用于标记
-
- Returns:
- str: 格式化的消息字符串
- """
- if not messages:
- return "没有消息记录"
-
- reply = ""
- for msg in messages:
- # 消息时间
- msg_time = datetime.datetime.fromtimestamp(int(msg["time"])).strftime("%Y-%m-%d %H:%M:%S")
-
- # 获取消息内容
- message_text = msg.get("processed_plain_text", msg.get("detailed_plain_text", "无消息内容"))
- nickname = msg.get("user_info", {}).get("user_nickname", "未知用户")
-
- # 标记当前消息
- is_target = "→ " if target_message_id and msg["message_id"] == target_message_id else " "
-
- reply += f"{is_target}[{msg_time}] {nickname}: {message_text}\n"
-
- if target_message_id and msg["message_id"] == target_message_id:
- reply += " " + "-" * 50 + "\n"
-
- return reply
-
- def get_user_random_contexts(
- self, qq_id: str, num_messages: int = 10, context_length: int = 5
- ) -> tuple[List[str], str]: # noqa: E501
- """
- 获取用户的随机消息及其上下文
-
- Args:
- qq_id (str): QQ号
- num_messages (int): 要获取的随机消息数量
- context_length (int): 每条消息的上下文长度(单侧)
-
- Returns:
- tuple[List[str], str]: (每个消息上下文的格式化字符串列表, 用户昵称)
- """
- if not qq_id:
- return [], ""
-
- # 获取用户所有消息
- all_messages = list(self.messages_collection.find({"user_info.user_id": int(qq_id)}))
- if not all_messages:
- return [], ""
-
- # 获取用户昵称
- user_nickname = all_messages[0].get("chat_info", {}).get("user_info", {}).get("user_nickname", "未知用户")
-
- # 随机选择指定数量的消息
- selected_messages = random.sample(all_messages, min(num_messages, len(all_messages)))
- # 按时间排序
- selected_messages.sort(key=lambda x: int(x["time"]))
-
- # 存储所有上下文消息
- context_list = []
-
- # 获取每条消息的上下文
- for msg in selected_messages:
- message_id = msg["message_id"]
-
- # 获取消息上下文
- context_messages = self.get_message_context(message_id, context_length)
- if context_messages:
- formatted_context = self.format_messages(context_messages, message_id)
- context_list.append(formatted_context)
-
- return context_list, user_nickname
-
-
-if __name__ == "__main__":
- # 测试代码
- analyzer = MessageAnalyzer()
- test_qq = "1026294844" # 替换为要测试的QQ号
- print(f"测试QQ号: {test_qq}")
- print("-" * 50)
- # 获取5条消息,每条消息前后各3条上下文
- contexts, nickname = analyzer.get_user_random_contexts(test_qq, num_messages=5, context_length=3)
-
- print(f"用户昵称: {nickname}\n")
- # 打印每个上下文
- for i, context in enumerate(contexts, 1):
- print(f"\n随机消息 {i}/{len(contexts)}:")
- print("-" * 30)
- print(context)
- print("=" * 50)
diff --git a/src/plugins/personality_s/看我.txt b/src/plugins/personality_s/看我.txt
deleted file mode 100644
index d5d6f8903..000000000
--- a/src/plugins/personality_s/看我.txt
+++ /dev/null
@@ -1 +0,0 @@
-那是以后会用到的妙妙小工具.jpg
\ No newline at end of file
From 0a74aba2ef19fd5c78470047ca8dce47cd7f6e93 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Wed, 16 Apr 2025 20:45:52 +0800
Subject: [PATCH 030/406] =?UTF-8?q?fix=EF=BC=9A=E6=9B=B4=E5=90=8D?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
config_helper/config_helper.py | 477 ------------------
config_helper/config_notice.md | 10 -
...2请先看我.txt => 从0.6.0升级0.6.3请先看我.txt | 0
3 files changed, 487 deletions(-)
delete mode 100644 config_helper/config_helper.py
delete mode 100644 config_helper/config_notice.md
rename 从0.6.0升级0.6.2请先看我.txt => 从0.6.0升级0.6.3请先看我.txt (100%)
diff --git a/config_helper/config_helper.py b/config_helper/config_helper.py
deleted file mode 100644
index b27aaead4..000000000
--- a/config_helper/config_helper.py
+++ /dev/null
@@ -1,477 +0,0 @@
-import os
-import tomli
-from packaging.specifiers import SpecifierSet
-from packaging.version import Version
-import sys
-
-import asyncio
-import os
-import time
-from typing import Tuple, Union, AsyncGenerator, Generator
-
-import aiohttp
-import requests
-import json
-
-
-class EnvInfo:
- def __init__(self, env_path):
- self.env_path = env_path
- self.env_content_txt = None
- self.env_content = {}
- self.error_message = None
-
-
- def check_env(self):
- # 检查根目录是否存在.env文件
- if not os.path.exists(self.env_path):
- self.error_message = "根目录没有.env文件,请自己创建或者运行一次MaiBot\n你可以直接复制template/template.env文件到根目录并重命名为.env"
- return "not_found"
-
- #加载整个.env文件
- with open(self.env_path, "r", encoding="utf-8") as f:
- self.env_content_txt = f.read()
-
- #逐行读取所有配置项
- for line in self.env_content_txt.splitlines():
- if line.strip() == "":
- continue
- key, value = line.split("=", 1)
- self.env_content[key.strip()] = value.strip()
-
- # 检查.env文件的SILICONFLOW_KEY和SILICONFLOW_BASE_URL是否为空
- if "SILICONFLOW_KEY" not in self.env_content or "SILICONFLOW_BASE_URL" not in self.env_content:
- if "DEEP_SEEK_BASE_URL" not in self.env_content or "DEEP_SEEK_KEY" not in self.env_content:
- self.error_message = "没有设置可用的API和密钥,请检查.env文件,起码配置一个API来让帮助程序工作"
- return "not_set"
- else:
- self.error_message = "你只设置了deepseek官方API,可能无法运行MaiBot,请检查.env文件"
- return "only_ds"
-
- return "success"
-
-class LLM_request_off:
- def __init__(self, model_name="deepseek-ai/DeepSeek-V3", env_info: EnvInfo = None, **kwargs):
- self.model_name = model_name
- self.params = kwargs
- if model_name == "deepseek-ai/DeepSeek-V3" or model_name == "Pro/deepseek-ai/DeepSeek-V3":
- self.api_key = env_info.env_content["SILICONFLOW_KEY"]
- self.base_url = env_info.env_content["SILICONFLOW_BASE_URL"]
- elif model_name == "deepseek-chat":
- self.api_key = env_info.env_content["DEEP_SEEK_KEY"]
- self.base_url = env_info.env_content["DEEP_SEEK_BASE_URL"]
- # logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
-
- def generate_stream(self, prompt: str) -> Generator[str, None, None]:
- """流式生成模型响应"""
- headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
-
- # 构建请求体,启用流式输出
- data = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "temperature": 0.4,
- "stream": True,
- **self.params,
- }
-
- # 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
- print(f"Stream Request URL: {api_url}")
-
- max_retries = 3
- base_wait_time = 15
-
- for retry in range(max_retries):
- try:
- response = requests.post(api_url, headers=headers, json=data, stream=True)
-
- if response.status_code == 429:
- wait_time = base_wait_time * (2**retry)
- print(f"遇到请求限制(429),等待{wait_time}秒后重试...")
- time.sleep(wait_time)
- continue
-
- response.raise_for_status()
-
- # 处理流式响应
- for line in response.iter_lines():
- if line:
- line = line.decode('utf-8')
- if line.startswith('data: ') and not line.startswith('data: [DONE]'):
- json_str = line[6:] # 去掉 "data: " 前缀
- try:
- chunk_data = json.loads(json_str)
- if (
- "choices" in chunk_data
- and len(chunk_data["choices"]) > 0
- and "delta" in chunk_data["choices"][0]
- and "content" in chunk_data["choices"][0]["delta"]
- ):
- content = chunk_data["choices"][0]["delta"]["content"]
- yield content
- except json.JSONDecodeError:
- print(f"无法解析JSON: {json_str}")
- return
-
- except Exception as e:
- if retry < max_retries - 1:
- wait_time = base_wait_time * (2**retry)
- print(f"[流式回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
- time.sleep(wait_time)
- else:
- print(f"流式请求失败: {str(e)}")
- yield f"流式请求失败: {str(e)}"
- return
-
- print("达到最大重试次数,流式请求仍然失败")
- yield "达到最大重试次数,流式请求仍然失败"
-
-class ConfigInfo:
- def __init__(self, config_path):
- self.config_path = config_path
- self.config_content = ""
- self.config_content_txt = None
- self.template_content = None
- self.version = None
- self.error_message = None
-
- def check_bot_config(self):
- """
- 检查config/bot_config.toml文件中是否有缺失项目
- """
-
- if not os.path.exists(self.config_path):
- self.error_message = f"错误:找不到配置文件 {self.config_path}"
- return "not found"
-
- # 读取配置文件,先去掉注释,再解析TOML
- try:
- # 首先读取原始文件内容
- with open(self.config_path, "r", encoding="utf-8", errors="replace") as f:
- file_content = f.read()
-
- # 去掉注释并保留有效内容
- cleaned_lines = []
- for line in file_content.splitlines():
- # 去掉行内注释
- if "#" in line:
- line = line.split("#")[0].rstrip()
-
- # 如果行不是空的且不是以#开头的注释行,则添加到cleaned_lines
- if line.strip() and not line.strip().startswith("#"):
- cleaned_lines.append(line)
-
- # 将处理后的内容用于解析TOML
- self.config_content_txt = "\n".join(cleaned_lines)
-
- # 使用tomli解析处理后的内容
- self.config_content = tomli.loads(self.config_content_txt)
- except tomli.TOMLDecodeError as e:
- self.error_message = f"错误:配置文件格式错误:{e}"
- # 配置内容已经在上面设置了,不需要再次读取
- return "format_error"
- except UnicodeDecodeError as e:
- self.error_message = f"错误:配置文件编码错误,请使用UTF-8编码:{e}"
- return "format_error"
-
- # 读取模板配置文件
- template_path = "template/bot_config_template.toml"
- if not os.path.exists(template_path):
- self.error_message = f"错误:找不到模板配置文件,请检查你是否启动过或者该程序是否位于根目录 {template_path}"
- return "critical_error"
-
- try:
- with open(template_path, "rb") as f:
- template_content = tomli.load(f)
- except Exception as e:
- self.error_message = f"错误:无法解析模板配置文件,文件损坏,建议重新下载MaiBot:{e}"
- return "critical_error"
-
- # 获取版本信息
- inner_version = self.config_content.get("inner", {}).get("version")
- if not inner_version:
- self.error_message = "错误:配置文件中缺少版本信息"
- return "critical_error"
-
- try:
- self.version = Version(inner_version)
- except:
- self.error_message = f"错误:版本号格式错误:{inner_version}"
- return "critical_error"
-
-
- # 检查所有必需的顶级配置项
- required_sections = [
- "bot", "groups", "personality", "identity", "platforms",
- "response", "message", "willing", "emoji", "memory",
- "mood", "model"
- ]
-
- missing_sections = []
- for section in required_sections:
- if section not in self.config_content:
- missing_sections.append(section)
-
- if missing_sections:
- self.error_message = f"错误:配置文件缺少以下顶级配置项:{', '.join(missing_sections)}"
- return "critical_error"
-
- # 检查各个配置项内的必需字段
- missing_fields = []
-
- # 检查bot配置
- if "bot" in self.config_content:
- bot_config = self.config_content["bot"]
- if "qq" not in bot_config:
- missing_fields.append("bot.qq")
- if "nickname" not in bot_config:
- missing_fields.append("bot.nickname")
-
- # 检查groups配置
- if "groups" in self.config_content:
- groups_config = self.config_content["groups"]
- if "talk_allowed" not in groups_config:
- missing_fields.append("groups.talk_allowed")
-
- # 检查platforms配置
- if "platforms" in self.config_content:
- platforms_config = self.config_content["platforms"]
- if not platforms_config or not isinstance(platforms_config, dict) or len(platforms_config) == 0:
- missing_fields.append("platforms.(至少一个平台)")
-
- # 检查模型配置
- if "model" in self.config_content:
- model_config = self.config_content["model"]
- required_models = [
- "llm_reasoning", "llm_normal", "llm_topic_judge",
- "llm_summary_by_topic", "llm_emotion_judge", "embedding", "vlm"
- ]
-
- for model in required_models:
- if model not in model_config:
- missing_fields.append(f"model.{model}")
- elif model in model_config:
- model_item = model_config[model]
- if "name" not in model_item:
- missing_fields.append(f"model.{model}.name")
- if "provider" not in model_item:
- missing_fields.append(f"model.{model}.provider")
-
- # 基于模板检查其它必需字段
- def check_section(template_section, user_section, prefix):
- if not isinstance(template_section, dict) or not isinstance(user_section, dict):
- return
-
- for key, value in template_section.items():
- # 跳过注释和数组类型的配置项
- if key.startswith("#") or isinstance(value, list):
- continue
-
- if key not in user_section:
- missing_fields.append(f"{prefix}.{key}")
- elif isinstance(value, dict) and key in user_section:
- # 递归检查嵌套配置项
- check_section(value, user_section[key], f"{prefix}.{key}")
-
- for section in required_sections:
- if section in template_content and section in self.config_content:
- check_section(template_content[section], self.config_content[section], section)
-
- # 输出结果
- if missing_fields:
- print(f"发现 {len(missing_fields)} 个缺失的配置项:")
- for field in missing_fields:
- print(f" - {field}")
- else:
- print("检查完成,没有发现缺失的必要配置项。")
-
- def get_value(self, path):
- """
- 获取配置文件中指定路径的值
- 参数:
- path: 以点分隔的路径,例如 "bot.qq" 或 "model.llm_normal.name"
- 返回:
- 找到的值,如果路径不存在则返回None
- """
- if not self.config_content:
- return None
-
- parts = path.split('.')
- current = self.config_content
-
- try:
- for part in parts:
- if isinstance(current, dict) and part in current:
- current = current[part]
- else:
- return None
- return current
- except:
- return None
-
- def has_field(self, path):
- """
- 检查配置文件中是否存在指定路径
- 参数:
- path: 以点分隔的路径,例如 "bot.qq" 或 "model.llm_normal.name"
- 返回:
- 布尔值,表示路径是否存在
- """
- return self.get_value(path) is not None
-
- def get_section(self, section_name):
- """
- 获取配置文件中的整个部分
- 参数:
- section_name: 部分名称,例如 "bot" 或 "model"
- 返回:
- 字典形式的部分内容,如果部分不存在则返回空字典
- """
- if not self.config_content:
- return {}
-
- return self.config_content.get(section_name, {})
-
- def get_all_models(self):
- """
- 获取配置中所有的模型配置
- 返回:
- 包含所有模型配置的字典
- """
- if not self.config_content or "model" not in self.config_content:
- return {}
-
- return self.config_content.get("model", {})
-
- def __str__(self):
- return f"配置文件路径:{self.config_path}\n配置文件版本:{self.version}\n错误信息:{self.error_message}"
-
-class ConfigHelper:
- def __init__(self, config_info: ConfigInfo, model_using = "", env_info: EnvInfo = None):
- self.config_info = config_info
- self.config_notice = None
- self.helper_model = LLM_request_off(model_name=model_using,env_info=env_info)
-
- def deal_format_error(self, error_message, config_content_txt):
- prompt = f"""
- 这里有一份配置文件存在格式错误,请检查配置文件为什么会出现该错误以及建议如何修改,不要使用markdown格式
- 错误信息:{error_message}
- 配置文件内容:{config_content_txt}
- 请根据错误信息和配置文件内容,用通俗易懂,简短的语言给出修改建议:
- """
-
- try:
- # 使用流式输出获取分析结果
- print("\n===== 麦麦分析结果 =====")
- for chunk in self.helper_model.generate_stream(prompt):
- print(chunk, end="", flush=True)
- print("\n=====================")
-
- except Exception as e:
- print(f"请求麦麦分析时出错: {str(e)}")
- print("请手动检查配置文件格式错误:", error_message)
-
- def load_config_notice(self):
- with open(os.path.join(os.path.dirname(__file__), "config_notice.md"), "r", encoding="utf-8") as f:
- self.config_notice = f.read()
-
- def deal_question(self, question):
- prompt = f"""
- 这里有一份配置文件,请根据问题给出回答
- 配置文件内容:{self.config_info.config_content_txt}
- 关于配置文件的说明:{self.config_notice}
- 问题:{question}
- """
-
- try:
- # 使用流式输出获取分析结果
- print("\n===== 麦麦分析结果 =====")
- for chunk in self.helper_model.generate_stream(prompt):
- print(chunk, end="", flush=True)
- print("\n=====================")
-
- except Exception as e:
- print(f"请求麦麦分析时出错: {str(e)}")
-
-
-
-if __name__ == "__main__":
- model_using = "deepseek-ai/DeepSeek-V3"
- # model_using = "Pro/deepseek-ai/DeepSeek-V3"
- env_info = EnvInfo(".env")
- result = env_info.check_env()
- if result == "not_set":
- print(env_info.error_message)
- exit()
- elif result == "only_ds":
- model_using = "deepseek-chat"
- print("你只设置了deepseek官方API,可能无法运行MaiBot,但是你仍旧可以运行这个帮助程序,请检查.env文件")
- elif result == "not_found":
- print(env_info.error_message)
- exit()
-
- config_path = "./config/bot_config.toml"
- config_info = ConfigInfo(config_path)
- print("开始检查config/bot_config.toml文件...")
- result = config_info.check_bot_config()
- print(config_info)
-
- helper = ConfigHelper(config_info, model_using, env_info)
- helper.load_config_notice()
-
- # 如果配置文件读取成功,展示如何获取字段
- if config_info.config_content:
- print("\n配置文件读取成功,可以访问任意字段:")
- # 获取机器人昵称
- nickname = config_info.get_value("bot.nickname")
- print(f"机器人昵称: {nickname}")
-
- # 获取QQ号
- qq = config_info.get_value("bot.qq")
- print(f"机器人QQ: {qq}")
-
- # 获取群聊配置
- groups = config_info.get_section("groups")
- print(f"允许聊天的群: {groups.get('talk_allowed', [])}")
-
- # 获取模型信息
- models = config_info.get_all_models()
- print("\n模型配置信息:")
- for model_name, model_info in models.items():
- provider = model_info.get("provider", "未知")
- model_path = model_info.get("name", "未知")
- print(f" - {model_name}: {model_path} (提供商: {provider})")
-
- # 检查某字段是否存在
- if config_info.has_field("model.llm_normal.temp"):
- temp = config_info.get_value("model.llm_normal.temp")
- print(f"\n回复模型温度: {temp}")
- else:
- print("\n回复模型温度未设置")
-
- # 获取心流相关设置
- if config_info.has_field("heartflow"):
- heartflow = config_info.get_section("heartflow")
- print(f"\n心流更新间隔: {heartflow.get('heart_flow_update_interval')}秒")
- print(f"子心流更新间隔: {heartflow.get('sub_heart_flow_update_interval')}秒")
-
- if result == "critical_error":
- print("配置文件存在严重错误,建议重新下载MaiBot")
- exit()
- elif result == "format_error":
- print("配置文件格式错误,正在进行检查...")
- error_message = config_info.error_message
- config_content_txt = config_info.config_content_txt
- helper.deal_format_error(error_message, config_content_txt)
- else:
- print("配置文件格式检查完成,没有发现问题")
-
- while True:
- question = input("请输入你遇到的问题,麦麦会帮助你分析(输入exit退出):")
- if question == "exit":
- break
- else:
- print("麦麦正在为你分析...")
- helper.deal_question(question)
-
diff --git a/config_helper/config_notice.md b/config_helper/config_notice.md
deleted file mode 100644
index b704c8e47..000000000
--- a/config_helper/config_notice.md
+++ /dev/null
@@ -1,10 +0,0 @@
-1.Q:为什么我的bot叫他名字不回消息?
-A:请检查qq和nickname字段是否正确填写
-请将默认字段:
-qq = 114514
-nickname = "麦麦"
-改为你自己的qq号和bot名称(需要与qq昵称相同)
-
-2. Q:如何修改日程表的内容,或者关闭日程表?
-A:日程表目前无法关闭
-如果日程表生成的内容太过科幻或者疯癫,可以尝试调整日程表的温度或者修改日程表描述
\ No newline at end of file
diff --git a/从0.6.0升级0.6.2请先看我.txt b/从0.6.0升级0.6.3请先看我.txt
similarity index 100%
rename from 从0.6.0升级0.6.2请先看我.txt
rename to 从0.6.0升级0.6.3请先看我.txt
From b4f284abca920e33833a3d163cdb6c4fba98e3c5 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Wed, 16 Apr 2025 21:10:34 +0800
Subject: [PATCH 031/406] =?UTF-8?q?fix=EF=BC=9A=E7=A7=BB=E5=8A=A8config?=
=?UTF-8?q?=E4=BD=8D=E7=BD=AE=EF=BC=8C=E4=BF=AE=E5=A4=8D=E9=A2=9C=E6=96=87?=
=?UTF-8?q?=E5=AD=97bug?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/{plugins => }/config/auto_update.py | 0
src/{plugins => }/config/config.py | 4 +-
src/do_tool/tool_can_use/change_mood.py | 2 +-
src/do_tool/tool_use.py | 5 +-
src/gui/logger_gui.py | 378 ------------------
src/gui/reasoning_gui.py | 342 ----------------
src/heart_flow/heartflow.py | 2 +-
src/heart_flow/observation.py | 2 +-
src/heart_flow/sub_heartflow.py | 4 +-
src/main.py | 2 +-
src/plugins/PFC/action_planner.py | 2 +-
src/plugins/PFC/chat_observer.py | 2 +-
src/plugins/PFC/pfc.py | 2 +-
src/plugins/PFC/pfc_KnowledgeFetcher.py | 2 +-
src/plugins/PFC/reply_checker.py | 2 +-
src/plugins/PFC/reply_generator.py | 2 +-
src/plugins/PFC/waiter.py | 2 +-
src/plugins/chat/bot.py | 2 +-
src/plugins/chat/emoji_manager.py | 2 +-
src/plugins/chat/message_buffer.py | 2 +-
src/plugins/chat/messagesender.py | 2 +-
src/plugins/chat/utils.py | 58 ++-
src/plugins/chat/utils_image.py | 2 +-
.../only_process/only_message_process.py | 2 +-
.../reasoning_chat/reasoning_chat.py | 2 +-
.../reasoning_chat/reasoning_generator.py | 2 +-
.../reasoning_prompt_builder.py | 2 +-
.../think_flow_chat/think_flow_chat.py | 2 +-
.../think_flow_chat/think_flow_generator.py | 2 +-
.../think_flow_prompt_builder.py | 2 +-
src/plugins/config/config_env.py | 59 ---
src/plugins/memory_system/debug_memory.py | 2 +-
src/plugins/models/utils_model.py | 2 +-
src/plugins/moods/moods.py | 2 +-
src/plugins/person_info/person_info.py | 2 +-
src/plugins/remote/remote.py | 2 +-
.../respon_info_catcher/info_catcher.py | 2 +-
src/plugins/schedule/schedule_generator.py | 2 +-
.../topic_identify/topic_identifier.py | 2 +-
src/plugins/willing/mode_llmcheck.py | 2 +-
src/plugins/willing/willing_manager.py | 2 +-
41 files changed, 86 insertions(+), 830 deletions(-)
rename src/{plugins => }/config/auto_update.py (100%)
rename src/{plugins => }/config/config.py (99%)
delete mode 100644 src/gui/logger_gui.py
delete mode 100644 src/gui/reasoning_gui.py
delete mode 100644 src/plugins/config/config_env.py
diff --git a/src/plugins/config/auto_update.py b/src/config/auto_update.py
similarity index 100%
rename from src/plugins/config/auto_update.py
rename to src/config/auto_update.py
diff --git a/src/plugins/config/config.py b/src/config/config.py
similarity index 99%
rename from src/plugins/config/config.py
rename to src/config/config.py
index ebde77734..332be7442 100644
--- a/src/plugins/config/config.py
+++ b/src/config/config.py
@@ -44,7 +44,7 @@ else:
def update_config():
# 获取根目录路径
- root_dir = Path(__file__).parent.parent.parent.parent
+ root_dir = Path(__file__).parent.parent.parent
template_dir = root_dir / "template"
config_dir = root_dir / "config"
old_config_dir = config_dir / "old"
@@ -305,7 +305,7 @@ class BotConfig:
def get_config_dir() -> str:
"""获取配置文件目录"""
current_dir = os.path.dirname(os.path.abspath(__file__))
- root_dir = os.path.abspath(os.path.join(current_dir, "..", "..", ".."))
+ root_dir = os.path.abspath(os.path.join(current_dir, "..", ".."))
config_dir = os.path.join(root_dir, "config")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
diff --git a/src/do_tool/tool_can_use/change_mood.py b/src/do_tool/tool_can_use/change_mood.py
index 53410068f..1c13b1e5f 100644
--- a/src/do_tool/tool_can_use/change_mood.py
+++ b/src/do_tool/tool_can_use/change_mood.py
@@ -1,5 +1,5 @@
from src.do_tool.tool_can_use.base_tool import BaseTool
-from src.plugins.config.config import global_config
+from src.config.config import global_config
from src.common.logger import get_module_logger
from src.plugins.moods.moods import MoodManager
from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
diff --git a/src/do_tool/tool_use.py b/src/do_tool/tool_use.py
index b8c35d912..b323f0452 100644
--- a/src/do_tool/tool_use.py
+++ b/src/do_tool/tool_use.py
@@ -1,5 +1,5 @@
from src.plugins.models.utils_model import LLMRequest
-from src.plugins.config.config import global_config
+from src.config.config import global_config
from src.plugins.chat.chat_stream import ChatStream
from src.common.database import db
import time
@@ -7,6 +7,8 @@ import json
from src.common.logger import get_module_logger, TOOL_USE_STYLE_CONFIG, LogConfig
from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance
from src.heart_flow.sub_heartflow import SubHeartflow
+import traceback
+from src.plugins.chat.utils import get_recent_group_detailed_plain_text
tool_use_config = LogConfig(
# 使用消息发送专用样式
@@ -195,6 +197,7 @@ class ToolUser:
except Exception as e:
logger.error(f"工具调用过程中出错: {str(e)}")
+ logger.error(f"工具调用过程中出错: {traceback.format_exc()}")
return {
"used_tools": False,
"error": str(e),
diff --git a/src/gui/logger_gui.py b/src/gui/logger_gui.py
deleted file mode 100644
index ad6edafb8..000000000
--- a/src/gui/logger_gui.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# import customtkinter as ctk
-# import subprocess
-# import threading
-# import queue
-# import re
-# import os
-# import signal
-# from collections import deque
-# import sys
-
-# # 设置应用的外观模式和默认颜色主题
-# ctk.set_appearance_mode("dark")
-# ctk.set_default_color_theme("blue")
-
-
-# class LogViewerApp(ctk.CTk):
-# """日志查看器应用的主类,继承自customtkinter的CTk类"""
-
-# def __init__(self):
-# """初始化日志查看器应用的界面和状态"""
-# super().__init__()
-# self.title("日志查看器")
-# self.geometry("1200x800")
-
-# # 标记GUI是否运行中
-# self.is_running = True
-
-# # 程序关闭时的清理操作
-# self.protocol("WM_DELETE_WINDOW", self._on_closing)
-
-# # 初始化进程、日志队列、日志数据等变量
-# self.process = None
-# self.log_queue = queue.Queue()
-# self.log_data = deque(maxlen=10000) # 使用固定长度队列
-# self.available_levels = set()
-# self.available_modules = set()
-# self.sorted_modules = []
-# self.module_checkboxes = {} # 存储模块复选框的字典
-
-# # 日志颜色配置
-# self.color_config = {
-# "time": "#888888",
-# "DEBUG": "#2196F3",
-# "INFO": "#4CAF50",
-# "WARNING": "#FF9800",
-# "ERROR": "#F44336",
-# "module": "#D4D0AB",
-# "default": "#FFFFFF",
-# }
-
-# # 列可见性配置
-# self.column_visibility = {"show_time": True, "show_level": True, "show_module": True}
-
-# # 选中的日志等级和模块
-# self.selected_levels = set()
-# self.selected_modules = set()
-
-# # 创建界面组件并启动日志队列处理
-# self.create_widgets()
-# self.after(100, self.process_log_queue)
-
-# def create_widgets(self):
-# """创建应用界面的各个组件"""
-# self.grid_columnconfigure(0, weight=1)
-# self.grid_rowconfigure(1, weight=1)
-
-# # 控制面板
-# control_frame = ctk.CTkFrame(self)
-# control_frame.grid(row=0, column=0, sticky="ew", padx=10, pady=5)
-
-# self.start_btn = ctk.CTkButton(control_frame, text="启动", command=self.start_process)
-# self.start_btn.pack(side="left", padx=5)
-
-# self.stop_btn = ctk.CTkButton(control_frame, text="停止", command=self.stop_process, state="disabled")
-# self.stop_btn.pack(side="left", padx=5)
-
-# self.clear_btn = ctk.CTkButton(control_frame, text="清屏", command=self.clear_logs)
-# self.clear_btn.pack(side="left", padx=5)
-
-# column_filter_frame = ctk.CTkFrame(control_frame)
-# column_filter_frame.pack(side="left", padx=20)
-
-# self.time_check = ctk.CTkCheckBox(column_filter_frame, text="显示时间", command=self.refresh_logs)
-# self.time_check.pack(side="left", padx=5)
-# self.time_check.select()
-
-# self.level_check = ctk.CTkCheckBox(column_filter_frame, text="显示等级", command=self.refresh_logs)
-# self.level_check.pack(side="left", padx=5)
-# self.level_check.select()
-
-# self.module_check = ctk.CTkCheckBox(column_filter_frame, text="显示模块", command=self.refresh_logs)
-# self.module_check.pack(side="left", padx=5)
-# self.module_check.select()
-
-# # 筛选面板
-# filter_frame = ctk.CTkFrame(self)
-# filter_frame.grid(row=0, column=1, rowspan=2, sticky="ns", padx=5)
-
-# ctk.CTkLabel(filter_frame, text="日志等级筛选").pack(pady=5)
-# self.level_scroll = ctk.CTkScrollableFrame(filter_frame, width=150, height=200)
-# self.level_scroll.pack(fill="both", expand=True, padx=5)
-
-# ctk.CTkLabel(filter_frame, text="模块筛选").pack(pady=5)
-# self.module_filter_entry = ctk.CTkEntry(filter_frame, placeholder_text="输入模块过滤词")
-# self.module_filter_entry.pack(pady=5)
-# self.module_filter_entry.bind("", self.update_module_filter)
-
-# self.module_scroll = ctk.CTkScrollableFrame(filter_frame, width=300, height=200)
-# self.module_scroll.pack(fill="both", expand=True, padx=5)
-
-# self.log_text = ctk.CTkTextbox(self, wrap="word")
-# self.log_text.grid(row=1, column=0, sticky="nsew", padx=10, pady=5)
-
-# self.init_text_tags()
-
-# def update_module_filter(self, event):
-# """根据模块过滤词更新模块复选框的显示"""
-# filter_text = self.module_filter_entry.get().strip().lower()
-# for module, checkbox in self.module_checkboxes.items():
-# if filter_text in module.lower():
-# checkbox.pack(anchor="w", padx=5, pady=2)
-# else:
-# checkbox.pack_forget()
-
-# def update_filters(self, level, module):
-# """更新日志等级和模块的筛选器"""
-# if level not in self.available_levels:
-# self.available_levels.add(level)
-# self.add_checkbox(self.level_scroll, level, "level")
-
-# module_key = self.get_module_key(module)
-# if module_key not in self.available_modules:
-# self.available_modules.add(module_key)
-# self.sorted_modules = sorted(self.available_modules, key=lambda x: x.lower())
-# self.rebuild_module_checkboxes()
-
-# def rebuild_module_checkboxes(self):
-# """重新构建模块复选框"""
-# # 清空现有复选框
-# for widget in self.module_scroll.winfo_children():
-# widget.destroy()
-# self.module_checkboxes.clear()
-
-# # 重建排序后的复选框
-# for module in self.sorted_modules:
-# self.add_checkbox(self.module_scroll, module, "module")
-
-# def add_checkbox(self, parent, text, type_):
-# """在指定父组件中添加复选框"""
-
-# def update_filter():
-# current = cb.get()
-# if type_ == "level":
-# (self.selected_levels.add if current else self.selected_levels.discard)(text)
-# else:
-# (self.selected_modules.add if current else self.selected_modules.discard)(text)
-# self.refresh_logs()
-
-# cb = ctk.CTkCheckBox(parent, text=text, command=update_filter)
-# cb.select() # 初始选中
-
-# # 手动同步初始状态到集合(关键修复)
-# if type_ == "level":
-# self.selected_levels.add(text)
-# else:
-# self.selected_modules.add(text)
-
-# if type_ == "module":
-# self.module_checkboxes[text] = cb
-# cb.pack(anchor="w", padx=5, pady=2)
-# return cb
-
-# def check_filter(self, entry):
-# """检查日志条目是否符合当前筛选条件"""
-# level_ok = not self.selected_levels or entry["level"] in self.selected_levels
-# module_key = self.get_module_key(entry["module"])
-# module_ok = not self.selected_modules or module_key in self.selected_modules
-# return level_ok and module_ok
-
-# def init_text_tags(self):
-# """初始化日志文本的颜色标签"""
-# for tag, color in self.color_config.items():
-# self.log_text.tag_config(tag, foreground=color)
-# self.log_text.tag_config("default", foreground=self.color_config["default"])
-
-# def start_process(self):
-# """启动日志进程并开始读取输出"""
-# self.process = subprocess.Popen(
-# ["nb", "run"],
-# stdout=subprocess.PIPE,
-# stderr=subprocess.STDOUT,
-# text=True,
-# bufsize=1,
-# encoding="utf-8",
-# errors="ignore",
-# )
-# self.start_btn.configure(state="disabled")
-# self.stop_btn.configure(state="normal")
-# threading.Thread(target=self.read_output, daemon=True).start()
-
-# def stop_process(self):
-# """停止日志进程并清理相关资源"""
-# if self.process:
-# try:
-# if hasattr(self.process, "pid"):
-# if os.name == "nt":
-# subprocess.run(
-# ["taskkill", "/F", "/T", "/PID", str(self.process.pid)], check=True, capture_output=True
-# )
-# else:
-# os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
-# except (subprocess.CalledProcessError, ProcessLookupError, OSError) as e:
-# print(f"终止进程失败: {e}")
-# finally:
-# self.process = None
-# self.log_queue.queue.clear()
-# self.start_btn.configure(state="normal")
-# self.stop_btn.configure(state="disabled")
-# self.refresh_logs()
-
-# def read_output(self):
-# """读取日志进程的输出并放入队列"""
-# try:
-# while self.process and self.process.poll() is None and self.is_running:
-# line = self.process.stdout.readline()
-# if line:
-# self.log_queue.put(line)
-# else:
-# break # 避免空循环
-# self.process.stdout.close() # 确保关闭文件描述符
-# except ValueError: # 处理可能的I/O操作异常
-# pass
-
-# def process_log_queue(self):
-# """处理日志队列中的日志条目"""
-# while not self.log_queue.empty():
-# line = self.log_queue.get()
-# self.process_log_line(line)
-
-# # 仅在GUI仍在运行时继续处理队列
-# if self.is_running:
-# self.after(100, self.process_log_queue)
-
-# def process_log_line(self, line):
-# """解析单行日志并更新日志数据和筛选器"""
-# match = re.match(
-# r"""^
-# (?:(?P
## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
@@ -53,7 +52,7 @@
-
+
👆 点击观看麦麦演示视频 👆
@@ -186,7 +185,7 @@ MaiCore是一个开源项目,我们非常欢迎你的参与。你的贡献,
感谢各位大佬!
-
+
**也感谢每一位给麦麦发展提出宝贵意见与建议的用户,感谢陪伴麦麦走到现在的你们**
diff --git a/template/template.env b/template/template.env
index 06e9b07ec..c1a6dd0dc 100644
--- a/template/template.env
+++ b/template/template.env
@@ -29,8 +29,18 @@ CHAT_ANY_WHERE_KEY=
SILICONFLOW_KEY=
# 定义日志相关配置
-SIMPLE_OUTPUT=true # 精简控制台输出格式
-CONSOLE_LOG_LEVEL=INFO # 自定义日志的默认控制台输出日志级别
-FILE_LOG_LEVEL=DEBUG # 自定义日志的默认文件输出日志级别
-DEFAULT_CONSOLE_LOG_LEVEL=SUCCESS # 原生日志的控制台输出日志级别(nonebot就是这一类)
-DEFAULT_FILE_LOG_LEVEL=DEBUG # 原生日志的默认文件输出日志级别(nonebot就是这一类)
\ No newline at end of file
+
+# 精简控制台输出格式
+SIMPLE_OUTPUT=true
+
+# 自定义日志的默认控制台输出日志级别
+CONSOLE_LOG_LEVEL=INFO
+
+# 自定义日志的默认文件输出日志级别
+FILE_LOG_LEVEL=DEBUG
+
+# 原生日志的控制台输出日志级别(nonebot就是这一类)
+DEFAULT_CONSOLE_LOG_LEVEL=SUCCESS
+
+# 原生日志的默认文件输出日志级别(nonebot就是这一类)
+DEFAULT_FILE_LOG_LEVEL=DEBUG
From 7d2f5b51a7f6af71c44ff2333f759e671ce1479b Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Mon, 21 Apr 2025 15:25:29 +0800
Subject: [PATCH 113/406] =?UTF-8?q?=E5=90=88=E5=B9=B6openai=E5=85=BC?=
=?UTF-8?q?=E5=AE=B9=EF=BC=8C=E8=BF=87ruff?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/config_api.py | 10 +++---
src/plugins/models/utils_model.py | 53 ++++++++++++++-----------------
2 files changed, 28 insertions(+), 35 deletions(-)
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 3f323ff85..650cbf634 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -1,10 +1,10 @@
from typing import Dict, List, Optional
import strawberry
-from packaging.version import Version, InvalidVersion
-from packaging.specifiers import SpecifierSet, InvalidSpecifier
-from ..config.config import global_config
-import os
-
+# from packaging.version import Version, InvalidVersion
+# from packaging.specifiers import SpecifierSet, InvalidSpecifier
+# from ..config.config import global_config
+# import os
+from packaging.version import Version
@strawberry.type
class BotConfig:
diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py
index 5fd11692a..365b15a60 100644
--- a/src/plugins/models/utils_model.py
+++ b/src/plugins/models/utils_model.py
@@ -79,8 +79,7 @@ class LLMRequest:
"o3",
"o3-2025-04-16",
"o3-mini",
- "o3-mini-2025-01-31"
- "o4-mini",
+ "o3-mini-2025-01-31o4-mini",
"o4-mini-2025-04-16",
]
@@ -806,10 +805,8 @@ class LLMRequest:
) -> Union[Tuple[Dict[str, Any], int], Tuple[None, int]]:
policy = request_content["policy"]
payload = request_content["payload"]
- keep_request = False
- wait_time = 0.1
+ wait_time = policy["base_wait"] * (2**retry_count)
if retry_count < policy["max_retries"] - 1:
- wait_time = policy["base_wait"] * (2**retry_count)
keep_request = True
if isinstance(exception, RequestAbortException):
response = exception.response
@@ -989,30 +986,27 @@ class LLMRequest:
# 复制一份参数,避免直接修改 self.params
params_copy = await self._transform_parameters(self.params)
if image_base64:
- payload = {
- "model": self.model_name,
- "messages": [
- {
- "role": "user",
- "content": [
- {"type": "text", "text": prompt},
- {
- "type": "image_url",
- "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
- },
- ],
- }
- ],
- "max_tokens": global_config.max_response_length,
- **params_copy,
- }
+ messages = [
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": prompt},
+ {
+ "type": "image_url",
+ "image_url": {"url": f"data:image/{image_format.lower()};base64,{image_base64}"},
+ },
+ ],
+ }
+ ]
else:
- payload = {
- "model": self.model_name,
- "messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
- **params_copy,
- }
+ messages = [{"role": "user", "content": prompt}]
+ payload = {
+ "model": self.model_name,
+ "messages": messages,
+ **params_copy,
+ }
+ if "max_tokens" not in payload and "max_completion_tokens" not in payload:
+ payload["max_tokens"] = global_config.max_response_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")
@@ -1105,11 +1099,10 @@ class LLMRequest:
async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]:
"""异步方式根据输入的提示生成模型的响应"""
- # 构建请求体
+ # 构建请求体,不硬编码max_tokens
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
- "max_tokens": global_config.max_response_length,
**self.params,
**kwargs,
}
From 6e0a3cf8cf183429c3cdb9aca45c7213604d5bef Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Mon, 21 Apr 2025 07:26:08 +0000
Subject: [PATCH 114/406] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/__init__.py | 3 ++-
src/api/config_api.py | 2 ++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/api/__init__.py b/src/api/__init__.py
index 11c091489..f5bc08a6e 100644
--- a/src/api/__init__.py
+++ b/src/api/__init__.py
@@ -1,7 +1,8 @@
from fastapi import FastAPI
from strawberry.fastapi import GraphQLRouter
+
app = FastAPI()
graphql_router = GraphQLRouter(schema=None, path="/") # Replace `None` with your actual schema
-app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])
\ No newline at end of file
+app.include_router(graphql_router, prefix="/graphql", tags=["GraphQL"])
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 650cbf634..e39346176 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -1,11 +1,13 @@
from typing import Dict, List, Optional
import strawberry
+
# from packaging.version import Version, InvalidVersion
# from packaging.specifiers import SpecifierSet, InvalidSpecifier
# from ..config.config import global_config
# import os
from packaging.version import Version
+
@strawberry.type
class BotConfig:
"""机器人配置类"""
From c10b7eea61b9f352475363f4c92883cd6f4e055b Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:37:49 +0800
Subject: [PATCH 115/406] =?UTF-8?q?feat:=20=E6=95=B4=E5=90=88reasoning?=
=?UTF-8?q?=E6=A8=A1=E5=BC=8F=E5=92=8Chfc=E6=A8=A1=E5=BC=8F=EF=BC=8C?=
=?UTF-8?q?=E7=BB=9F=E4=B8=80=E8=B0=83=E6=8E=A7=EF=BC=88=E4=BD=86=E4=B8=8D?=
=?UTF-8?q?=E6=98=AF=E5=BE=88=E7=BB=9F=E4=B8=80=EF=BC=89?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/config/config.py | 28 +-
src/heart_flow/heartflow.py | 10 +-
src/heart_flow/sub_heartflow.py | 11 +-
src/main.py | 8 +-
src/plugins/chat/bot.py | 2 -
.../heartFC_chat/heartFC_controler.py | 82 ++--
.../heartFC_chat/heartFC_processor.py | 16 +-
.../chat_module/heartFC_chat/interest.py | 12 +
.../heartFC_chat/reasoning_chat.py | 412 ++++++++++++++++
.../heartFC_chat/reasoning_generator.py | 199 ++++++++
.../heartFC_chat/reasoning_prompt_builder.py | 445 ++++++++++++++++++
.../reasoning_chat/reasoning_chat.py | 43 +-
.../reasoning_chat/reasoning_generator.py | 2 +-
src/plugins/memory_system/Hippocampus.py | 6 +-
14 files changed, 1188 insertions(+), 88 deletions(-)
create mode 100644 src/plugins/chat_module/heartFC_chat/reasoning_chat.py
create mode 100644 src/plugins/chat_module/heartFC_chat/reasoning_generator.py
create mode 100644 src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py
diff --git a/src/config/config.py b/src/config/config.py
index d2fe6f0f2..83e478375 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -213,8 +213,8 @@ class BotConfig:
# response
response_mode: str = "heart_flow" # 回复策略
- model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
- model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
+ model_reasoning_probability: float = 0.7 # 麦麦回答时选择推理模型(主要)模型概率
+ model_normal_probability: float = 0.3 # 麦麦回答时选择一般模型(次要)模型概率
# MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
# emoji
@@ -407,10 +407,13 @@ class BotConfig:
def response(parent: dict):
response_config = parent["response"]
- config.model_reasoning_probability = response_config.get("model_reasoning_probability", config.model_reasoning_probability)
- config.model_normal_probability = response_config.get("model_normal_probability", config.model_normal_probability)
-
-
+ config.model_reasoning_probability = response_config.get(
+ "model_reasoning_probability", config.model_reasoning_probability
+ )
+ config.model_normal_probability = response_config.get(
+ "model_normal_probability", config.model_normal_probability
+ )
+
# 添加 enable_heart_flowC 的加载逻辑 (假设它在 [response] 部分)
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
config.enable_heart_flowC = response_config.get("enable_heart_flowC", config.enable_heart_flowC)
@@ -418,7 +421,6 @@ class BotConfig:
def heartflow(parent: dict):
heartflow_config = parent["heartflow"]
# 加载新增的 heartflowC 参数
-
# 加载原有的 heartflow 参数
# config.sub_heart_flow_update_interval = heartflow_config.get(
@@ -442,9 +444,15 @@ class BotConfig:
"compress_length_limit", config.compress_length_limit
)
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
- config.reply_trigger_threshold = heartflow_config.get("reply_trigger_threshold", config.reply_trigger_threshold)
- config.probability_decay_factor_per_second = heartflow_config.get("probability_decay_factor_per_second", config.probability_decay_factor_per_second)
- config.default_decay_rate_per_second = heartflow_config.get("default_decay_rate_per_second", config.default_decay_rate_per_second)
+ config.reply_trigger_threshold = heartflow_config.get(
+ "reply_trigger_threshold", config.reply_trigger_threshold
+ )
+ config.probability_decay_factor_per_second = heartflow_config.get(
+ "probability_decay_factor_per_second", config.probability_decay_factor_per_second
+ )
+ config.default_decay_rate_per_second = heartflow_config.get(
+ "default_decay_rate_per_second", config.default_decay_rate_per_second
+ )
config.initial_duration = heartflow_config.get("initial_duration", config.initial_duration)
def willing(parent: dict):
diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py
index c2f922ff9..50f0a735f 100644
--- a/src/heart_flow/heartflow.py
+++ b/src/heart_flow/heartflow.py
@@ -45,6 +45,8 @@ class CurrentState:
def __init__(self):
self.current_state_info = ""
+ self.chat_status = "IDLE"
+
self.mood_manager = MoodManager()
self.mood = self.mood_manager.get_prompt()
@@ -70,7 +72,7 @@ class Heartflow:
"""定期清理不活跃的子心流"""
while True:
current_time = time.time()
- inactive_subheartflows_ids = [] # 修改变量名以清晰表示存储的是ID
+ inactive_subheartflows_ids = [] # 修改变量名以清晰表示存储的是ID
# 检查所有子心流
# 使用 list(self._subheartflows.items()) 避免在迭代时修改字典
@@ -104,7 +106,7 @@ class Heartflow:
# await self.do_a_thinking()
# await asyncio.sleep(global_config.heart_flow_update_interval * 3) # 5分钟思考一次
-
+
await asyncio.sleep(300)
async def heartflow_start_working(self):
@@ -253,7 +255,7 @@ class Heartflow:
# 创建并初始化观察对象
logger.debug(f"为 {subheartflow_id} 创建 observation")
observation = ChattingObservation(subheartflow_id)
- await observation.initialize() # 等待初始化完成
+ await observation.initialize() # 等待初始化完成
subheartflow.add_observation(observation)
logger.debug(f"为 {subheartflow_id} 添加 observation 成功")
@@ -269,7 +271,7 @@ class Heartflow:
except Exception as e:
# 记录详细错误信息
logger.error(f"创建 subheartflow {subheartflow_id} 失败: {e}")
- logger.error(traceback.format_exc()) # 记录完整的 traceback
+ logger.error(traceback.format_exc()) # 记录完整的 traceback
# 考虑是否需要更具体的错误处理或资源清理逻辑
return None
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index c1a58dcda..9087b5763 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -5,7 +5,6 @@ from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config
import time
from typing import Optional, List
-from datetime import datetime
import traceback
from src.plugins.chat.utils import parse_text_timestamps
@@ -76,14 +75,14 @@ class SubHeartflow:
)
self.main_heartflow_info = ""
-
+
self.last_active_time = time.time() # 添加最后激活时间
- self.should_stop = False # 添加停止标志
- self.task: Optional[asyncio.Task] = None # 添加 task 属性
+ self.should_stop = False # 添加停止标志
+ self.task: Optional[asyncio.Task] = None # 添加 task 属性
self.is_active = False
- self.observations: List[ChattingObservation] = [] # 使用 List 类型提示
+ self.observations: List[ChattingObservation] = [] # 使用 List 类型提示
self.running_knowledges = []
@@ -98,7 +97,7 @@ class SubHeartflow:
# 检查是否被主心流标记为停止
if self.should_stop:
logger.info(f"子心流 {self.subheartflow_id} 被标记为停止,正在退出后台任务...")
- break # 退出循环以停止任务
+ break # 退出循环以停止任务
await asyncio.sleep(global_config.sub_heart_flow_update_interval) # 定期检查销毁条件
diff --git a/src/main.py b/src/main.py
index aad08b906..f113a732d 100644
--- a/src/main.py
+++ b/src/main.py
@@ -19,6 +19,7 @@ from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFC_Controller
+from .plugins.chat_module.heartFC_chat.reasoning_chat import ReasoningChat
logger = get_module_logger("main")
@@ -117,8 +118,11 @@ class MainSystem:
await interest_manager.start_background_tasks()
logger.success("兴趣管理器后台任务启动成功")
- # 初始化并独立启动 HeartFC_Chat
- HeartFC_Controller()
+ # 初始化 ReasoningChat 单例 (确保它在需要之前被创建)
+ ReasoningChat.get_instance()
+ logger.success("ReasoningChat 单例初始化成功")
+
+ # 初始化并独立启动 HeartFC_Chat 控制器 (使用 get_instance 获取单例)
heartfc_chat_instance = HeartFC_Controller.get_instance()
if heartfc_chat_instance:
await heartfc_chat_instance.start()
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index c3ba78b08..eaf829970 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -123,8 +123,6 @@ class ChatBot:
await self.heartFC_processor.process_message(message_data)
else:
await self.heartFC_processor.process_message(message_data)
-
-
if template_group_name:
async with global_prompt_manager.async_message_scope(template_group_name):
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
index 389e030a4..55790eb4c 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
@@ -13,6 +13,7 @@ from src.do_tool.tool_use import ToolUser
from .interest import InterestManager
from src.plugins.chat.chat_stream import chat_manager
from .pf_chatting import PFChatting
+import threading # 导入 threading
# 定义日志配置
chat_config = LogConfig(
@@ -27,43 +28,58 @@ INTEREST_MONITOR_INTERVAL_SECONDS = 1
class HeartFC_Controller:
- _instance = None # For potential singleton access if needed by MessageManager
+ _instance = None
+ _lock = threading.Lock() # 使用 threading.Lock 替代 asyncio.Lock 以兼容 __new__
+ _initialized = False
- def __init__(self):
- # --- Updated Init ---
- if HeartFC_Controller._instance is not None:
- # Prevent re-initialization if used as a singleton
- return
- self.gpt = ResponseGenerator()
- self.mood_manager = MoodManager.get_instance()
- self.mood_manager.start_mood_update()
- self.tool_user = ToolUser()
- self.interest_manager = InterestManager()
- self._interest_monitor_task: Optional[asyncio.Task] = None
- # --- New PFChatting Management ---
- self.pf_chatting_instances: Dict[str, PFChatting] = {}
- self._pf_chatting_lock = Lock()
- # --- End New PFChatting Management ---
- HeartFC_Controller._instance = self # Register instance
- # --- End Updated Init ---
- # --- Make dependencies accessible for PFChatting ---
- # These are accessed via the passed instance in PFChatting
- self.emoji_manager = emoji_manager
- self.relationship_manager = relationship_manager
- self.MessageManager = MessageManager # Pass the class/singleton access
- # --- End dependencies ---
-
- # --- Added Class Method for Singleton Access ---
- @classmethod
- def get_instance(cls):
+ def __new__(cls, *args, **kwargs):
if cls._instance is None:
- # This might indicate an issue if called before initialization
- logger.warning("HeartFC_Controller get_instance called before initialization.")
- # Optionally, initialize here if a strict singleton pattern is desired
- # cls._instance = cls()
+ with cls._lock:
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
return cls._instance
- # --- End Added Class Method ---
+ def __init__(self):
+ if self._initialized:
+ return
+ with self.__class__._lock: # 使用类锁确保初始化线程安全
+ if self._initialized:
+ return
+ logger.info("正在初始化 HeartFC_Controller 单例...")
+ self.gpt = ResponseGenerator()
+ self.mood_manager = MoodManager.get_instance()
+ self.mood_manager.start_mood_update()
+ self.tool_user = ToolUser()
+ self.interest_manager = InterestManager()
+ self._interest_monitor_task: Optional[asyncio.Task] = None
+ self.pf_chatting_instances: Dict[str, PFChatting] = {}
+ self._pf_chatting_lock = Lock() # 这个可以是 asyncio.Lock,用于异步上下文
+ self.emoji_manager = emoji_manager
+ self.relationship_manager = relationship_manager
+ self.MessageManager = MessageManager
+ self._initialized = True
+ logger.info("HeartFC_Controller 单例初始化完成。")
+
+ @classmethod
+ def get_instance(cls):
+ """获取 HeartFC_Controller 的单例实例。"""
+ if cls._instance is None:
+ logger.warning("HeartFC_Controller 实例在首次 get_instance 时创建,可能未在 main 中正确初始化。")
+ cls() # 调用构造函数创建
+ return cls._instance
+
+ # --- 新增:检查 PFChatting 状态的方法 --- #
+ def is_pf_chatting_active(self, stream_id: str) -> bool:
+ """检查指定 stream_id 的 PFChatting 循环是否处于活动状态。"""
+ # 注意:这里直接访问字典,不加锁,因为读取通常是安全的,
+ # 并且 PFChatting 实例的 _loop_active 状态由其自身的异步循环管理。
+ # 如果需要更强的保证,可以在访问 pf_instance 前获取 _pf_chatting_lock
+ pf_instance = self.pf_chatting_instances.get(stream_id)
+ if pf_instance and pf_instance._loop_active: # 直接检查 PFChatting 实例的 _loop_active 属性
+ return True
+ return False
+
+ # --- 结束新增 --- #
async def start(self):
"""启动异步任务,如回复启动器"""
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
index 37708a94f..38c687791 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_processor.py
@@ -13,6 +13,7 @@ from ...chat.message_buffer import message_buffer
from ...utils.timer_calculater import Timer
from .interest import InterestManager
from src.plugins.person_info.relationship_manager import relationship_manager
+from .reasoning_chat import ReasoningChat
# 定义日志配置
processor_config = LogConfig(
@@ -29,7 +30,7 @@ class HeartFC_Processor:
def __init__(self):
self.storage = MessageStorage()
self.interest_manager = InterestManager()
- # self.chat_instance = chat_instance # 持有 HeartFC_Chat 实例
+ self.reasoning_chat = ReasoningChat.get_instance()
async def process_message(self, message_data: str) -> None:
"""处理接收到的原始消息数据,完成消息解析、缓冲、过滤、存储、兴趣度计算与更新等核心流程。
@@ -72,11 +73,11 @@ class HeartFC_Processor:
user_info=userinfo,
group_info=groupinfo,
)
- if not chat:
- logger.error(
- f"无法为消息创建或获取聊天流: user {userinfo.user_id}, group {groupinfo.group_id if groupinfo else 'None'}"
- )
- return
+
+ # --- 添加兴趣追踪启动 ---
+ # 在获取到 chat 对象后,启动对该聊天流的兴趣监控
+ await self.reasoning_chat.start_monitoring_interest(chat)
+ # --- 结束添加 ---
message.update_chat_stream(chat)
@@ -90,7 +91,6 @@ class HeartFC_Processor:
message.raw_message, chat, userinfo
):
return
- logger.trace(f"过滤词/正则表达式过滤成功: {message.processed_plain_text}")
# 查询缓冲器结果
buffer_result = await message_buffer.query_buffer_result(message)
@@ -152,6 +152,8 @@ class HeartFC_Processor:
f"使用激活率 {interested_rate:.2f} 更新后 (通过缓冲后),当前兴趣度: {current_interest:.2f}"
)
+ self.interest_manager.add_interest_dict(message, interested_rate, is_mentioned)
+
except Exception as e:
logger.error(f"更新兴趣度失败: {e}") # 调整日志消息
logger.error(traceback.format_exc())
diff --git a/src/plugins/chat_module/heartFC_chat/interest.py b/src/plugins/chat_module/heartFC_chat/interest.py
index 5a961e915..4ac5498a1 100644
--- a/src/plugins/chat_module/heartFC_chat/interest.py
+++ b/src/plugins/chat_module/heartFC_chat/interest.py
@@ -6,6 +6,7 @@ import json # 引入 json
import os # 引入 os
from typing import Optional # <--- 添加导入
import random # <--- 添加导入 random
+from src.plugins.chat.message import MessageRecv
from src.common.logger import get_module_logger, LogConfig, DEFAULT_CONFIG # 引入 DEFAULT_CONFIG
from src.plugins.chat.chat_stream import chat_manager # *** Import ChatManager ***
@@ -66,6 +67,13 @@ class InterestChatting:
self.is_above_threshold: bool = False # 标记兴趣值是否高于阈值
# --- 结束:概率回复相关属性 ---
+ # 记录激发兴趣对(消息id,激活值)
+ self.interest_dict = {}
+
+ def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
+ # Store the MessageRecv object and the interest value as a tuple
+ self.interest_dict[message.message_info.message_id] = (message, interest_value, is_mentioned)
+
def _calculate_decay(self, current_time: float):
"""计算从上次更新到现在的衰减"""
time_delta = current_time - self.last_update_time
@@ -445,6 +453,10 @@ class InterestManager:
stream_name = chat_manager.get_stream_name(stream_id) or stream_id # 获取流名称
logger.warning(f"尝试降低不存在的聊天流 {stream_name} 的兴趣度")
+ def add_interest_dict(self, message: MessageRecv, interest_value: float, is_mentioned: bool):
+ interest_chatting = self._get_or_create_interest_chatting(message.chat_stream.stream_id)
+ interest_chatting.add_interest_dict(message, interest_value, is_mentioned)
+
def cleanup_inactive_chats(self, max_age_seconds=INACTIVE_THRESHOLD_SECONDS):
"""
清理长时间不活跃的聊天流记录
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_chat.py b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
new file mode 100644
index 000000000..95d3641d5
--- /dev/null
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
@@ -0,0 +1,412 @@
+import time
+import threading # 导入 threading
+from random import random
+import traceback
+import asyncio
+from typing import List, Dict
+from ...moods.moods import MoodManager
+from ....config.config import global_config
+from ...chat.emoji_manager import emoji_manager
+from .reasoning_generator import ResponseGenerator
+from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
+from ...chat.messagesender import message_manager
+from ...storage.storage import MessageStorage
+from ...chat.utils import is_mentioned_bot_in_message
+from ...chat.utils_image import image_path_to_base64
+from ...willing.willing_manager import willing_manager
+from ...message import UserInfo, Seg
+from src.common.logger import get_module_logger, CHAT_STYLE_CONFIG, LogConfig
+from src.plugins.chat.chat_stream import ChatStream
+from src.plugins.person_info.relationship_manager import relationship_manager
+from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+from src.plugins.utils.timer_calculater import Timer
+from .interest import InterestManager
+from .heartFC_controler import HeartFC_Controller # 导入 HeartFC_Controller
+
+# 定义日志配置
+chat_config = LogConfig(
+ console_format=CHAT_STYLE_CONFIG["console_format"],
+ file_format=CHAT_STYLE_CONFIG["file_format"],
+)
+
+logger = get_module_logger("reasoning_chat", config=chat_config)
+
+
+class ReasoningChat:
+ _instance = None
+ _lock = threading.Lock()
+ _initialized = False
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance is None:
+ with cls._lock:
+ # Double-check locking
+ if cls._instance is None:
+ cls._instance = super().__new__(cls)
+ return cls._instance
+
+ def __init__(self):
+ # 防止重复初始化
+ if self._initialized:
+ return
+ with self.__class__._lock: # 使用类锁确保线程安全
+ if self._initialized:
+ return
+ logger.info("正在初始化 ReasoningChat 单例...") # 添加日志
+ self.storage = MessageStorage()
+ self.gpt = ResponseGenerator()
+ self.mood_manager = MoodManager.get_instance()
+ self.mood_manager.start_mood_update()
+ # 用于存储每个 chat stream 的兴趣监控任务
+ self._interest_monitoring_tasks: Dict[str, asyncio.Task] = {}
+ self._initialized = True
+ self.interest_manager = InterestManager()
+ logger.info("ReasoningChat 单例初始化完成。") # 添加日志
+
+ @classmethod
+ def get_instance(cls):
+ """获取 ReasoningChat 的单例实例。"""
+ if cls._instance is None:
+ # 如果实例还未创建(理论上应该在 main 中初始化,但作为备用)
+ logger.warning("ReasoningChat 实例在首次 get_instance 时创建。")
+ cls() # 调用构造函数来创建实例
+ return cls._instance
+
+ @staticmethod
+ async def _create_thinking_message(message, chat, userinfo, messageinfo):
+ """创建思考消息"""
+ bot_user_info = UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=messageinfo.platform,
+ )
+
+ thinking_time_point = round(time.time(), 2)
+ thinking_id = "mt" + str(thinking_time_point)
+ thinking_message = MessageThinking(
+ message_id=thinking_id,
+ chat_stream=chat,
+ bot_user_info=bot_user_info,
+ reply=message,
+ thinking_start_time=thinking_time_point,
+ )
+
+ message_manager.add_message(thinking_message)
+
+ return thinking_id
+
+ @staticmethod
+ async def _send_response_messages(message, chat, response_set: List[str], thinking_id) -> MessageSending:
+ """发送回复消息"""
+ container = message_manager.get_container(chat.stream_id)
+ thinking_message = None
+
+ for msg in container.messages:
+ if isinstance(msg, MessageThinking) and msg.message_info.message_id == thinking_id:
+ thinking_message = msg
+ container.messages.remove(msg)
+ break
+
+ if not thinking_message:
+ logger.warning("未找到对应的思考消息,可能已超时被移除")
+ return
+
+ thinking_start_time = thinking_message.thinking_start_time
+ message_set = MessageSet(chat, thinking_id)
+
+ mark_head = False
+ first_bot_msg = None
+ for msg in response_set:
+ message_segment = Seg(type="text", data=msg)
+ bot_message = MessageSending(
+ message_id=thinking_id,
+ chat_stream=chat,
+ bot_user_info=UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=message.message_info.platform,
+ ),
+ sender_info=message.message_info.user_info,
+ message_segment=message_segment,
+ reply=message,
+ is_head=not mark_head,
+ is_emoji=False,
+ thinking_start_time=thinking_start_time,
+ )
+ if not mark_head:
+ mark_head = True
+ first_bot_msg = bot_message
+ message_set.add_message(bot_message)
+ message_manager.add_message(message_set)
+
+ return first_bot_msg
+
+ @staticmethod
+ async def _handle_emoji(message, chat, response):
+ """处理表情包"""
+ if random() < global_config.emoji_chance:
+ emoji_raw = await emoji_manager.get_emoji_for_text(response)
+ if emoji_raw:
+ emoji_path, description = emoji_raw
+ emoji_cq = image_path_to_base64(emoji_path)
+
+ thinking_time_point = round(message.message_info.time, 2)
+
+ message_segment = Seg(type="emoji", data=emoji_cq)
+ bot_message = MessageSending(
+ message_id="mt" + str(thinking_time_point),
+ chat_stream=chat,
+ bot_user_info=UserInfo(
+ user_id=global_config.BOT_QQ,
+ user_nickname=global_config.BOT_NICKNAME,
+ platform=message.message_info.platform,
+ ),
+ sender_info=message.message_info.user_info,
+ message_segment=message_segment,
+ reply=message,
+ is_head=False,
+ is_emoji=True,
+ )
+ message_manager.add_message(bot_message)
+
+ async def _update_relationship(self, message: MessageRecv, response_set):
+ """更新关系情绪"""
+ ori_response = ",".join(response_set)
+ stance, emotion = await self.gpt._get_emotion_tags(ori_response, message.processed_plain_text)
+ await relationship_manager.calculate_update_relationship_value(
+ chat_stream=message.chat_stream, label=emotion, stance=stance
+ )
+ self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
+
+ async def _find_interested_message(self, chat: ChatStream) -> None:
+ # 此函数设计为后台任务,轮询指定 chat 的兴趣消息。
+ # 它通常由外部代码在 chat 流活跃时启动。
+ controller = HeartFC_Controller.get_instance() # 获取控制器实例
+ if not controller:
+ logger.error(f"无法获取 HeartFC_Controller 实例,无法检查 PFChatting 状态。stream: {chat.stream_id}")
+ # 在没有控制器的情况下可能需要决定是继续处理还是完全停止?这里暂时假设继续
+ pass # 或者 return?
+
+ while True:
+ await asyncio.sleep(1) # 每秒检查一次
+ interest_chatting = self.interest_manager.get_interest_chatting(chat.stream_id)
+
+ if not interest_chatting:
+ continue
+
+ interest_dict = interest_chatting.interest_dict if interest_chatting.interest_dict else {}
+ items_to_process = list(interest_dict.items())
+
+ if not items_to_process:
+ continue
+
+ for msg_id, (message, interest_value, is_mentioned) in items_to_process:
+ # --- 检查 PFChatting 是否活跃 --- #
+ pf_active = False
+ if controller:
+ pf_active = controller.is_pf_chatting_active(chat.stream_id)
+
+ if pf_active:
+ # 如果 PFChatting 活跃,则跳过处理,直接移除消息
+ removed_item = interest_dict.pop(msg_id, None)
+ if removed_item:
+ logger.debug(f"PFChatting 活跃,已跳过并移除兴趣消息 {msg_id} for stream: {chat.stream_id}")
+ continue # 处理下一条消息
+ # --- 结束检查 --- #
+
+ # 只有当 PFChatting 不活跃时才执行以下处理逻辑
+ try:
+ # logger.debug(f"正在处理消息 {msg_id} for stream: {chat.stream_id}") # 可选调试信息
+ await self.normal_reasoning_chat(
+ message=message,
+ chat=chat,
+ is_mentioned=is_mentioned,
+ interested_rate=interest_value,
+ )
+ # logger.debug(f"处理完成消息 {msg_id}") # 可选调试信息
+ except Exception as e:
+ logger.error(f"处理兴趣消息 {msg_id} 时出错: {e}\n{traceback.format_exc()}")
+ finally:
+ # 无论处理成功与否(且PFChatting不活跃),都尝试从原始字典中移除该消息
+ removed_item = interest_dict.pop(msg_id, None)
+ if removed_item:
+ logger.debug(f"已从兴趣字典中移除消息 {msg_id}")
+
+ async def normal_reasoning_chat(
+ self, message: MessageRecv, chat: ChatStream, is_mentioned: bool, interested_rate: float
+ ) -> None:
+ timing_results = {}
+ userinfo = message.message_info.user_info
+ messageinfo = message.message_info
+
+ is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
+ # 意愿管理器:设置当前message信息
+ willing_manager.setup(message, chat, is_mentioned, interested_rate)
+
+ # 获取回复概率
+ is_willing = False
+ if reply_probability != 1:
+ is_willing = True
+ reply_probability = await willing_manager.get_reply_probability(message.message_info.message_id)
+
+ if message.message_info.additional_config:
+ if "maimcore_reply_probability_gain" in message.message_info.additional_config.keys():
+ reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"]
+
+ # 打印消息信息
+ mes_name = chat.group_info.group_name if chat.group_info else "私聊"
+ current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time))
+ willing_log = f"[回复意愿:{await willing_manager.get_willing(chat.stream_id):.2f}]" if is_willing else ""
+ logger.info(
+ f"[{current_time}][{mes_name}]"
+ f"{chat.user_info.user_nickname}:"
+ f"{message.processed_plain_text}{willing_log}[概率:{reply_probability * 100:.1f}%]"
+ )
+ do_reply = False
+ if random() < reply_probability:
+ do_reply = True
+
+ # 回复前处理
+ await willing_manager.before_generate_reply_handle(message.message_info.message_id)
+
+ # 创建思考消息
+ with Timer("创建思考消息", timing_results):
+ thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
+
+ logger.debug(f"创建捕捉器,thinking_id:{thinking_id}")
+
+ info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
+ info_catcher.catch_decide_to_response(message)
+
+ # 生成回复
+ try:
+ with Timer("生成回复", timing_results):
+ response_set = await self.gpt.generate_response(message, thinking_id)
+
+ info_catcher.catch_after_generate_response(timing_results["生成回复"])
+ except Exception as e:
+ logger.error(f"回复生成出现错误:{str(e)} {traceback.format_exc()}")
+ response_set = None
+
+ if not response_set:
+ logger.info("为什么生成回复失败?")
+ return
+
+ # 发送消息
+ with Timer("发送消息", timing_results):
+ first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
+
+ info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
+
+ info_catcher.done_catch()
+
+ # 处理表情包
+ with Timer("处理表情包", timing_results):
+ await self._handle_emoji(message, chat, response_set)
+
+ # 更新关系情绪
+ with Timer("更新关系情绪", timing_results):
+ await self._update_relationship(message, response_set)
+
+ # 回复后处理
+ await willing_manager.after_generate_reply_handle(message.message_info.message_id)
+
+ # 输出性能计时结果
+ if do_reply:
+ timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()])
+ trigger_msg = message.processed_plain_text
+ response_msg = " ".join(response_set) if response_set else "无回复"
+ logger.info(f"触发消息: {trigger_msg[:20]}... | 推理消息: {response_msg[:20]}... | 性能计时: {timing_str}")
+ else:
+ # 不回复处理
+ await willing_manager.not_reply_handle(message.message_info.message_id)
+
+ # 意愿管理器:注销当前message信息
+ willing_manager.delete(message.message_info.message_id)
+
+ @staticmethod
+ def _check_ban_words(text: str, chat, userinfo) -> bool:
+ """检查消息中是否包含过滤词"""
+ for word in global_config.ban_words:
+ if word in text:
+ logger.info(
+ f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
+ )
+ logger.info(f"[过滤词识别]消息中含有{word},filtered")
+ return True
+ return False
+
+ @staticmethod
+ def _check_ban_regex(text: str, chat, userinfo) -> bool:
+ """检查消息是否匹配过滤正则表达式"""
+ for pattern in global_config.ban_msgs_regex:
+ if pattern.search(text):
+ logger.info(
+ f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
+ )
+ logger.info(f"[正则表达式过滤]消息匹配到{pattern},filtered")
+ return True
+ return False
+
+ async def start_monitoring_interest(self, chat: ChatStream):
+ """为指定的 ChatStream 启动后台兴趣消息监控任务。"""
+ stream_id = chat.stream_id
+ # 检查任务是否已在运行
+ if stream_id in self._interest_monitoring_tasks and not self._interest_monitoring_tasks[stream_id].done():
+ task = self._interest_monitoring_tasks[stream_id]
+ if not task.cancelled(): # 确保任务未被取消
+ logger.info(f"兴趣监控任务已在运行 stream: {stream_id}")
+ return
+ else:
+ logger.info(f"发现已取消的任务,重新创建 stream: {stream_id}")
+ # 如果任务被取消了,允许重新创建
+
+ logger.info(f"启动兴趣监控任务 stream: {stream_id}...")
+ # 创建新的后台任务来运行 _find_interested_message
+ task = asyncio.create_task(self._find_interested_message(chat))
+ self._interest_monitoring_tasks[stream_id] = task
+
+ # 添加回调,当任务完成(或被取消)时,自动从字典中移除
+ task.add_done_callback(lambda t: self._handle_task_completion(stream_id, t))
+
+ def _handle_task_completion(self, stream_id: str, task: asyncio.Task):
+ """处理监控任务完成的回调。"""
+ try:
+ # 检查任务是否因异常而结束
+ exception = task.exception()
+ if exception:
+ logger.error(f"兴趣监控任务 stream {stream_id} 异常结束: {exception}", exc_info=exception)
+ elif task.cancelled():
+ logger.info(f"兴趣监控任务 stream {stream_id} 已被取消。")
+ else:
+ logger.info(f"兴趣监控任务 stream {stream_id} 正常结束。") # 理论上 while True 不会正常结束
+ except asyncio.CancelledError:
+ logger.info(f"兴趣监控任务 stream {stream_id} 在完成处理期间被取消。")
+ finally:
+ # 无论如何都从字典中移除
+ removed_task = self._interest_monitoring_tasks.pop(stream_id, None)
+ if removed_task:
+ logger.debug(f"已从监控任务字典移除 stream: {stream_id}")
+
+ async def stop_monitoring_interest(self, stream_id: str):
+ """停止指定 stream_id 的兴趣消息监控任务。"""
+ if stream_id in self._interest_monitoring_tasks:
+ task = self._interest_monitoring_tasks[stream_id]
+ if not task.done():
+ logger.info(f"正在停止兴趣监控任务 stream: {stream_id}...")
+ task.cancel() # 请求取消任务
+ try:
+ # 等待任务实际被取消(可选,提供更明确的停止)
+ # 设置超时以防万一
+ await asyncio.wait_for(task, timeout=5.0)
+ except asyncio.CancelledError:
+ logger.info(f"兴趣监控任务 stream {stream_id} 已确认取消。")
+ except asyncio.TimeoutError:
+ logger.warning(f"停止兴趣监控任务 stream {stream_id} 超时。任务可能仍在运行。")
+ except Exception as e:
+ # 捕获 task.exception() 可能在取消期间重新引发的错误
+ logger.error(f"停止兴趣监控任务 stream {stream_id} 时发生错误: {e}")
+ # 任务最终会由 done_callback 移除,或在这里再次确认移除
+ self._interest_monitoring_tasks.pop(stream_id, None)
+ else:
+ logger.warning(f"尝试停止不存在或已停止的监控任务 stream: {stream_id}")
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_generator.py b/src/plugins/chat_module/heartFC_chat/reasoning_generator.py
new file mode 100644
index 000000000..2f4ba06e6
--- /dev/null
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_generator.py
@@ -0,0 +1,199 @@
+from typing import List, Optional, Tuple, Union
+import random
+
+from ...models.utils_model import LLMRequest
+from ....config.config import global_config
+from ...chat.message import MessageThinking
+from .reasoning_prompt_builder import prompt_builder
+from ...chat.utils import process_llm_response
+from ...utils.timer_calculater import Timer
+from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
+from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
+
+# 定义日志配置
+llm_config = LogConfig(
+ # 使用消息发送专用样式
+ console_format=LLM_STYLE_CONFIG["console_format"],
+ file_format=LLM_STYLE_CONFIG["file_format"],
+)
+
+logger = get_module_logger("llm_generator", config=llm_config)
+
+
+class ResponseGenerator:
+ def __init__(self):
+ self.model_reasoning = LLMRequest(
+ model=global_config.llm_reasoning,
+ temperature=0.7,
+ max_tokens=3000,
+ request_type="response_reasoning",
+ )
+ self.model_normal = LLMRequest(
+ model=global_config.llm_normal,
+ temperature=global_config.llm_normal["temp"],
+ max_tokens=256,
+ request_type="response_reasoning",
+ )
+
+ self.model_sum = LLMRequest(
+ model=global_config.llm_summary_by_topic, temperature=0.7, max_tokens=3000, request_type="relation"
+ )
+ self.current_model_type = "r1" # 默认使用 R1
+ self.current_model_name = "unknown model"
+
+ async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
+ """根据当前模型类型选择对应的生成函数"""
+ # 从global_config中获取模型概率值并选择模型
+ if random.random() < global_config.model_reasoning_probability:
+ self.current_model_type = "深深地"
+ current_model = self.model_reasoning
+ else:
+ self.current_model_type = "浅浅的"
+ current_model = self.model_normal
+
+ logger.info(
+ f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
+ ) # noqa: E501
+
+ model_response = await self._generate_response_with_model(message, current_model, thinking_id)
+
+ # print(f"raw_content: {model_response}")
+
+ if model_response:
+ logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}")
+ model_response = await self._process_response(model_response)
+
+ return model_response
+ else:
+ logger.info(f"{self.current_model_type}思考,失败")
+ return None
+
+ async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):
+ info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
+
+ if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
+ sender_name = (
+ f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
+ f"{message.chat_stream.user_info.user_cardname}"
+ )
+ elif message.chat_stream.user_info.user_nickname:
+ sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
+ else:
+ sender_name = f"用户({message.chat_stream.user_info.user_id})"
+
+ logger.debug("开始使用生成回复-2")
+ # 构建prompt
+ with Timer() as t_build_prompt:
+ prompt = await prompt_builder._build_prompt(
+ message.chat_stream,
+ message_txt=message.processed_plain_text,
+ sender_name=sender_name,
+ stream_id=message.chat_stream.stream_id,
+ )
+ logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
+
+ try:
+ content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
+
+ info_catcher.catch_after_llm_generated(
+ prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
+ )
+
+ except Exception:
+ logger.exception("生成回复时出错")
+ return None
+
+ # 保存到数据库
+ # self._save_to_db(
+ # message=message,
+ # sender_name=sender_name,
+ # prompt=prompt,
+ # content=content,
+ # reasoning_content=reasoning_content,
+ # # reasoning_content_check=reasoning_content_check if global_config.enable_kuuki_read else ""
+ # )
+
+ return content
+
+ # def _save_to_db(
+ # self,
+ # message: MessageRecv,
+ # sender_name: str,
+ # prompt: str,
+ # content: str,
+ # reasoning_content: str,
+ # ):
+ # """保存对话记录到数据库"""
+ # db.reasoning_logs.insert_one(
+ # {
+ # "time": time.time(),
+ # "chat_id": message.chat_stream.stream_id,
+ # "user": sender_name,
+ # "message": message.processed_plain_text,
+ # "model": self.current_model_name,
+ # "reasoning": reasoning_content,
+ # "response": content,
+ # "prompt": prompt,
+ # }
+ # )
+
+ async def _get_emotion_tags(self, content: str, processed_plain_text: str):
+ """提取情感标签,结合立场和情绪"""
+ try:
+ # 构建提示词,结合回复内容、被回复的内容以及立场分析
+ prompt = f"""
+ 请严格根据以下对话内容,完成以下任务:
+ 1. 判断回复者对被回复者观点的直接立场:
+ - "支持":明确同意或强化被回复者观点
+ - "反对":明确反驳或否定被回复者观点
+ - "中立":不表达明确立场或无关回应
+ 2. 从"开心,愤怒,悲伤,惊讶,平静,害羞,恐惧,厌恶,困惑"中选出最匹配的1个情感标签
+ 3. 按照"立场-情绪"的格式直接输出结果,例如:"反对-愤怒"
+ 4. 考虑回复者的人格设定为{global_config.personality_core}
+
+ 对话示例:
+ 被回复:「A就是笨」
+ 回复:「A明明很聪明」 → 反对-愤怒
+
+ 当前对话:
+ 被回复:「{processed_plain_text}」
+ 回复:「{content}」
+
+ 输出要求:
+ - 只需输出"立场-情绪"结果,不要解释
+ - 严格基于文字直接表达的对立关系判断
+ """
+
+ # 调用模型生成结果
+ result, _, _ = await self.model_sum.generate_response(prompt)
+ result = result.strip()
+
+ # 解析模型输出的结果
+ if "-" in result:
+ stance, emotion = result.split("-", 1)
+ valid_stances = ["支持", "反对", "中立"]
+ valid_emotions = ["开心", "愤怒", "悲伤", "惊讶", "害羞", "平静", "恐惧", "厌恶", "困惑"]
+ if stance in valid_stances and emotion in valid_emotions:
+ return stance, emotion # 返回有效的立场-情绪组合
+ else:
+ logger.debug(f"无效立场-情感组合:{result}")
+ return "中立", "平静" # 默认返回中立-平静
+ else:
+ logger.debug(f"立场-情感格式错误:{result}")
+ return "中立", "平静" # 格式错误时返回默认值
+
+ except Exception as e:
+ logger.debug(f"获取情感标签时出错: {e}")
+ return "中立", "平静" # 出错时返回默认值
+
+ @staticmethod
+ async def _process_response(content: str) -> Tuple[List[str], List[str]]:
+ """处理响应内容,返回处理后的内容和情感标签"""
+ if not content:
+ return None, []
+
+ processed_response = process_llm_response(content)
+
+ # print(f"得到了处理后的llm返回{processed_response}")
+
+ return processed_response
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py b/src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py
new file mode 100644
index 000000000..d37d65459
--- /dev/null
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_prompt_builder.py
@@ -0,0 +1,445 @@
+import random
+import time
+from typing import Optional, Union
+
+from ....common.database import db
+from ...chat.utils import get_embedding, get_recent_group_detailed_plain_text, get_recent_group_speaker
+from ...chat.chat_stream import chat_manager
+from ...moods.moods import MoodManager
+from ....individuality.individuality import Individuality
+from ...memory_system.Hippocampus import HippocampusManager
+from ...schedule.schedule_generator import bot_schedule
+from ....config.config import global_config
+from ...person_info.relationship_manager import relationship_manager
+from src.common.logger import get_module_logger
+from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
+
+logger = get_module_logger("prompt")
+
+
+def init_prompt():
+ Prompt(
+ """
+{relation_prompt_all}
+{memory_prompt}
+{prompt_info}
+{schedule_prompt}
+{chat_target}
+{chat_talking_prompt}
+现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
+你的网名叫{bot_name},有人也叫你{bot_other_names},{prompt_personality}。
+你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},然后给出日常且口语化的回复,平淡一些,
+尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
+请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
+请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
+{moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""",
+ "reasoning_prompt_main",
+ )
+ Prompt(
+ "{relation_prompt}关系等级越大,关系越好,请分析聊天记录,根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。",
+ "relationship_prompt",
+ )
+ Prompt(
+ "你想起你之前见过的事情:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n",
+ "memory_prompt",
+ )
+ Prompt("你现在正在做的事情是:{schedule_info}", "schedule_prompt")
+ Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
+
+
+class PromptBuilder:
+ def __init__(self):
+ self.prompt_built = ""
+ self.activate_messages = ""
+
+ async def _build_prompt(
+ self, chat_stream, message_txt: str, sender_name: str = "某人", stream_id: Optional[int] = None
+ ) -> tuple[str, str]:
+ # 开始构建prompt
+ prompt_personality = "你"
+ # person
+ individuality = Individuality.get_instance()
+
+ personality_core = individuality.personality.personality_core
+ prompt_personality += personality_core
+
+ personality_sides = individuality.personality.personality_sides
+ random.shuffle(personality_sides)
+ prompt_personality += f",{personality_sides[0]}"
+
+ identity_detail = individuality.identity.identity_detail
+ random.shuffle(identity_detail)
+ prompt_personality += f",{identity_detail[0]}"
+
+ # 关系
+ who_chat_in_group = [
+ (chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
+ ]
+ who_chat_in_group += get_recent_group_speaker(
+ stream_id,
+ (chat_stream.user_info.platform, chat_stream.user_info.user_id),
+ limit=global_config.MAX_CONTEXT_SIZE,
+ )
+
+ relation_prompt = ""
+ for person in who_chat_in_group:
+ relation_prompt += await relationship_manager.build_relationship_info(person)
+
+ # relation_prompt_all = (
+ # f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
+ # f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
+ # )
+
+ # 心情
+ mood_manager = MoodManager.get_instance()
+ mood_prompt = mood_manager.get_prompt()
+
+ # logger.info(f"心情prompt: {mood_prompt}")
+
+ # 调取记忆
+ memory_prompt = ""
+ related_memory = await HippocampusManager.get_instance().get_memory_from_text(
+ text=message_txt, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
+ )
+ related_memory_info = ""
+ if related_memory:
+ for memory in related_memory:
+ related_memory_info += memory[1]
+ # memory_prompt = f"你想起你之前见过的事情:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
+ memory_prompt = await global_prompt_manager.format_prompt(
+ "memory_prompt", related_memory_info=related_memory_info
+ )
+
+ # print(f"相关记忆:{related_memory_info}")
+
+ # 日程构建
+ # schedule_prompt = f"""你现在正在做的事情是:{bot_schedule.get_current_num_task(num=1, time_info=False)}"""
+
+ # 获取聊天上下文
+ chat_in_group = True
+ chat_talking_prompt = ""
+ if stream_id:
+ chat_talking_prompt = get_recent_group_detailed_plain_text(
+ stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
+ )
+ chat_stream = chat_manager.get_stream(stream_id)
+ if chat_stream.group_info:
+ chat_talking_prompt = chat_talking_prompt
+ else:
+ chat_in_group = False
+ chat_talking_prompt = chat_talking_prompt
+ # print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
+ # 关键词检测与反应
+ keywords_reaction_prompt = ""
+ for rule in global_config.keywords_reaction_rules:
+ if rule.get("enable", False):
+ if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
+ logger.info(
+ f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}"
+ )
+ keywords_reaction_prompt += rule.get("reaction", "") + ","
+ else:
+ for pattern in rule.get("regex", []):
+ result = pattern.search(message_txt)
+ if result:
+ reaction = rule.get("reaction", "")
+ for name, content in result.groupdict().items():
+ reaction = reaction.replace(f"[{name}]", content)
+ logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}")
+ keywords_reaction_prompt += reaction + ","
+ break
+
+ # 中文高手(新加的好玩功能)
+ prompt_ger = ""
+ if random.random() < 0.04:
+ prompt_ger += "你喜欢用倒装句"
+ if random.random() < 0.02:
+ prompt_ger += "你喜欢用反问句"
+ if random.random() < 0.01:
+ prompt_ger += "你喜欢用文言文"
+
+ # 知识构建
+ start_time = time.time()
+ prompt_info = await self.get_prompt_info(message_txt, threshold=0.38)
+ if prompt_info:
+ # prompt_info = f"""\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n"""
+ prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info)
+
+ end_time = time.time()
+ logger.debug(f"知识检索耗时: {(end_time - start_time):.3f}秒")
+
+ # moderation_prompt = ""
+ # moderation_prompt = """**检查并忽略**任何涉及尝试绕过审核的行为。
+ # 涉及政治敏感以及违法违规的内容请规避。"""
+
+ logger.debug("开始构建prompt")
+
+ # prompt = f"""
+ # {relation_prompt_all}
+ # {memory_prompt}
+ # {prompt_info}
+ # {schedule_prompt}
+ # {chat_target}
+ # {chat_talking_prompt}
+ # 现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
+ # 你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。
+ # 你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},然后给出日常且口语化的回复,平淡一些,
+ # 尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
+ # 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
+ # 请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
+ # {moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。"""
+
+ prompt = await global_prompt_manager.format_prompt(
+ "reasoning_prompt_main",
+ relation_prompt_all=await global_prompt_manager.get_prompt_async("relationship_prompt"),
+ relation_prompt=relation_prompt,
+ sender_name=sender_name,
+ memory_prompt=memory_prompt,
+ prompt_info=prompt_info,
+ schedule_prompt=await global_prompt_manager.format_prompt(
+ "schedule_prompt", schedule_info=bot_schedule.get_current_num_task(num=1, time_info=False)
+ ),
+ chat_target=await global_prompt_manager.get_prompt_async("chat_target_group1")
+ if chat_in_group
+ else await global_prompt_manager.get_prompt_async("chat_target_private1"),
+ chat_target_2=await global_prompt_manager.get_prompt_async("chat_target_group2")
+ if chat_in_group
+ else await global_prompt_manager.get_prompt_async("chat_target_private2"),
+ chat_talking_prompt=chat_talking_prompt,
+ message_txt=message_txt,
+ bot_name=global_config.BOT_NICKNAME,
+ bot_other_names="/".join(
+ global_config.BOT_ALIAS_NAMES,
+ ),
+ prompt_personality=prompt_personality,
+ mood_prompt=mood_prompt,
+ keywords_reaction_prompt=keywords_reaction_prompt,
+ prompt_ger=prompt_ger,
+ moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
+ )
+
+ return prompt
+
+ async def get_prompt_info(self, message: str, threshold: float):
+ start_time = time.time()
+ related_info = ""
+ logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
+
+ # 1. 先从LLM获取主题,类似于记忆系统的做法
+ topics = []
+ # try:
+ # # 先尝试使用记忆系统的方法获取主题
+ # hippocampus = HippocampusManager.get_instance()._hippocampus
+ # topic_num = min(5, max(1, int(len(message) * 0.1)))
+ # topics_response = await hippocampus.llm_topic_judge.generate_response(hippocampus.find_topic_llm(message, topic_num))
+
+ # # 提取关键词
+ # topics = re.findall(r"<([^>]+)>", topics_response[0])
+ # if not topics:
+ # topics = []
+ # else:
+ # topics = [
+ # topic.strip()
+ # for topic in ",".join(topics).replace(",", ",").replace("、", ",").replace(" ", ",").split(",")
+ # if topic.strip()
+ # ]
+
+ # logger.info(f"从LLM提取的主题: {', '.join(topics)}")
+ # except Exception as e:
+ # logger.error(f"从LLM提取主题失败: {str(e)}")
+ # # 如果LLM提取失败,使用jieba分词提取关键词作为备选
+ # words = jieba.cut(message)
+ # topics = [word for word in words if len(word) > 1][:5]
+ # logger.info(f"使用jieba提取的主题: {', '.join(topics)}")
+
+ # 如果无法提取到主题,直接使用整个消息
+ if not topics:
+ logger.info("未能提取到任何主题,使用整个消息进行查询")
+ embedding = await get_embedding(message, request_type="prompt_build")
+ if not embedding:
+ logger.error("获取消息嵌入向量失败")
+ return ""
+
+ related_info = self.get_info_from_db(embedding, limit=3, threshold=threshold)
+ logger.info(f"知识库检索完成,总耗时: {time.time() - start_time:.3f}秒")
+ return related_info
+
+ # 2. 对每个主题进行知识库查询
+ logger.info(f"开始处理{len(topics)}个主题的知识库查询")
+
+ # 优化:批量获取嵌入向量,减少API调用
+ embeddings = {}
+ topics_batch = [topic for topic in topics if len(topic) > 0]
+ if message: # 确保消息非空
+ topics_batch.append(message)
+
+ # 批量获取嵌入向量
+ embed_start_time = time.time()
+ for text in topics_batch:
+ if not text or len(text.strip()) == 0:
+ continue
+
+ try:
+ embedding = await get_embedding(text, request_type="prompt_build")
+ if embedding:
+ embeddings[text] = embedding
+ else:
+ logger.warning(f"获取'{text}'的嵌入向量失败")
+ except Exception as e:
+ logger.error(f"获取'{text}'的嵌入向量时发生错误: {str(e)}")
+
+ logger.info(f"批量获取嵌入向量完成,耗时: {time.time() - embed_start_time:.3f}秒")
+
+ if not embeddings:
+ logger.error("所有嵌入向量获取失败")
+ return ""
+
+ # 3. 对每个主题进行知识库查询
+ all_results = []
+ query_start_time = time.time()
+
+ # 首先添加原始消息的查询结果
+ if message in embeddings:
+ original_results = self.get_info_from_db(embeddings[message], limit=3, threshold=threshold, return_raw=True)
+ if original_results:
+ for result in original_results:
+ result["topic"] = "原始消息"
+ all_results.extend(original_results)
+ logger.info(f"原始消息查询到{len(original_results)}条结果")
+
+ # 然后添加每个主题的查询结果
+ for topic in topics:
+ if not topic or topic not in embeddings:
+ continue
+
+ try:
+ topic_results = self.get_info_from_db(embeddings[topic], limit=3, threshold=threshold, return_raw=True)
+ if topic_results:
+ # 添加主题标记
+ for result in topic_results:
+ result["topic"] = topic
+ all_results.extend(topic_results)
+ logger.info(f"主题'{topic}'查询到{len(topic_results)}条结果")
+ except Exception as e:
+ logger.error(f"查询主题'{topic}'时发生错误: {str(e)}")
+
+ logger.info(f"知识库查询完成,耗时: {time.time() - query_start_time:.3f}秒,共获取{len(all_results)}条结果")
+
+ # 4. 去重和过滤
+ process_start_time = time.time()
+ unique_contents = set()
+ filtered_results = []
+ for result in all_results:
+ content = result["content"]
+ if content not in unique_contents:
+ unique_contents.add(content)
+ filtered_results.append(result)
+
+ # 5. 按相似度排序
+ filtered_results.sort(key=lambda x: x["similarity"], reverse=True)
+
+ # 6. 限制总数量(最多10条)
+ filtered_results = filtered_results[:10]
+ logger.info(
+ f"结果处理完成,耗时: {time.time() - process_start_time:.3f}秒,过滤后剩余{len(filtered_results)}条结果"
+ )
+
+ # 7. 格式化输出
+ if filtered_results:
+ format_start_time = time.time()
+ grouped_results = {}
+ for result in filtered_results:
+ topic = result["topic"]
+ if topic not in grouped_results:
+ grouped_results[topic] = []
+ grouped_results[topic].append(result)
+
+ # 按主题组织输出
+ for topic, results in grouped_results.items():
+ related_info += f"【主题: {topic}】\n"
+ for _i, result in enumerate(results, 1):
+ _similarity = result["similarity"]
+ content = result["content"].strip()
+ # 调试:为内容添加序号和相似度信息
+ # related_info += f"{i}. [{similarity:.2f}] {content}\n"
+ related_info += f"{content}\n"
+ related_info += "\n"
+
+ logger.info(f"格式化输出完成,耗时: {time.time() - format_start_time:.3f}秒")
+
+ logger.info(f"知识库检索总耗时: {time.time() - start_time:.3f}秒")
+ return related_info
+
+ @staticmethod
+ def get_info_from_db(
+ query_embedding: list, limit: int = 1, threshold: float = 0.5, return_raw: bool = False
+ ) -> Union[str, list]:
+ if not query_embedding:
+ return "" if not return_raw else []
+ # 使用余弦相似度计算
+ pipeline = [
+ {
+ "$addFields": {
+ "dotProduct": {
+ "$reduce": {
+ "input": {"$range": [0, {"$size": "$embedding"}]},
+ "initialValue": 0,
+ "in": {
+ "$add": [
+ "$$value",
+ {
+ "$multiply": [
+ {"$arrayElemAt": ["$embedding", "$$this"]},
+ {"$arrayElemAt": [query_embedding, "$$this"]},
+ ]
+ },
+ ]
+ },
+ }
+ },
+ "magnitude1": {
+ "$sqrt": {
+ "$reduce": {
+ "input": "$embedding",
+ "initialValue": 0,
+ "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
+ }
+ }
+ },
+ "magnitude2": {
+ "$sqrt": {
+ "$reduce": {
+ "input": query_embedding,
+ "initialValue": 0,
+ "in": {"$add": ["$$value", {"$multiply": ["$$this", "$$this"]}]},
+ }
+ }
+ },
+ }
+ },
+ {"$addFields": {"similarity": {"$divide": ["$dotProduct", {"$multiply": ["$magnitude1", "$magnitude2"]}]}}},
+ {
+ "$match": {
+ "similarity": {"$gte": threshold} # 只保留相似度大于等于阈值的结果
+ }
+ },
+ {"$sort": {"similarity": -1}},
+ {"$limit": limit},
+ {"$project": {"content": 1, "similarity": 1}},
+ ]
+
+ results = list(db.knowledges.aggregate(pipeline))
+ logger.debug(f"知识库查询结果数量: {len(results)}")
+
+ if not results:
+ return "" if not return_raw else []
+
+ if return_raw:
+ return results
+ else:
+ # 返回所有找到的内容,用换行分隔
+ return "\n".join(str(result["content"]) for result in results)
+
+
+init_prompt()
+prompt_builder = PromptBuilder()
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
index d149f68b0..be1c66280 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_chat.py
@@ -156,17 +156,17 @@ class ReasoningChat:
# 消息加入缓冲池
await message_buffer.start_caching_messages(message)
- # logger.info("使用推理聊天模式")
-
# 创建聊天流
chat = await chat_manager.get_or_create_stream(
platform=messageinfo.platform,
user_info=userinfo,
group_info=groupinfo,
)
+
message.update_chat_stream(chat)
await message.process()
+ logger.trace(f"消息处理成功: {message.processed_plain_text}")
# 过滤词/正则表达式过滤
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
@@ -174,27 +174,13 @@ class ReasoningChat:
):
return
- await self.storage.store_message(message, chat)
-
- # 记忆激活
- with Timer("记忆激活", timing_results):
- interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
- message.processed_plain_text, fast_retrieval=True
- )
-
# 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text
buffer_result = await message_buffer.query_buffer_result(message)
- # 处理提及
- is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
-
- # 意愿管理器:设置当前message信息
- willing_manager.setup(message, chat, is_mentioned, interested_rate)
-
# 处理缓冲器结果
if not buffer_result:
- await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
- willing_manager.delete(message.message_info.message_id)
+ # await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
+ # willing_manager.delete(message.message_info.message_id)
f_type = "seglist"
if message.message_segment.type != "seglist":
f_type = message.message_segment.type
@@ -213,6 +199,27 @@ class ReasoningChat:
logger.info("触发缓冲,已炸飞消息列")
return
+ try:
+ await self.storage.store_message(message, chat)
+ logger.trace(f"存储成功 (通过缓冲后): {message.processed_plain_text}")
+ except Exception as e:
+ logger.error(f"存储消息失败: {e}")
+ logger.error(traceback.format_exc())
+ # 存储失败可能仍需考虑是否继续,暂时返回
+ return
+
+ is_mentioned, reply_probability = is_mentioned_bot_in_message(message)
+ # 记忆激活
+ with Timer("记忆激活", timing_results):
+ interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
+ message.processed_plain_text, fast_retrieval=True
+ )
+
+ # 处理提及
+
+ # 意愿管理器:设置当前message信息
+ willing_manager.setup(message, chat, is_mentioned, interested_rate)
+
# 获取回复概率
is_willing = False
if reply_probability != 1:
diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
index dda4e7c78..2f4ba06e6 100644
--- a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
+++ b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py
@@ -44,7 +44,7 @@ class ResponseGenerator:
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
"""根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型
- if random.random() < global_config.MODEL_R1_PROBABILITY:
+ if random.random() < global_config.model_reasoning_probability:
self.current_model_type = "深深地"
current_model = self.model_reasoning
else:
diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py
index 4b40649d0..5ccdec5a5 100644
--- a/src/plugins/memory_system/Hippocampus.py
+++ b/src/plugins/memory_system/Hippocampus.py
@@ -1942,11 +1942,7 @@ class HippocampusManager:
return response
async def get_memory_from_topic(
- self,
- valid_keywords: list[str],
- max_memory_num: int = 3,
- max_memory_length: int = 2,
- max_depth: int = 3
+ self, valid_keywords: list[str], max_memory_num: int = 3, max_memory_length: int = 2, max_depth: int = 3
) -> list:
"""从文本中获取相关记忆的公共接口"""
if not self._initialized:
From ac7b300326d51424b769cc1f1ddd047558aabae5 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:46:08 +0800
Subject: [PATCH 116/406] Update main.py
---
src/main.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/main.py b/src/main.py
index 05196068b..12b818771 100644
--- a/src/main.py
+++ b/src/main.py
@@ -18,7 +18,7 @@ from .plugins.remote import heartbeat_thread # noqa: F401
from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
-from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFC_Controller
+from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFCController
from .plugins.chat_module.heartFC_chat.reasoning_chat import ReasoningChat
logger = get_module_logger("main")
@@ -118,9 +118,9 @@ class MainSystem:
await interest_manager.start_background_tasks()
logger.success("兴趣管理器后台任务启动成功")
- # 初始化并独立启动 HeartFC_Chat
- HeartFC_Controller()
- heartfc_chat_instance = HeartFC_Controller.get_instance()
+ # 初始化并独立启动 HeartFCController
+ HeartFCController()
+ heartfc_chat_instance = HeartFCController.get_instance()
if heartfc_chat_instance:
await heartfc_chat_instance.start()
logger.success("HeartFC_Chat 模块独立启动成功")
From 09ff1d9db592d8efc5ebf53b5ca00fb27fc6788d Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:46:40 +0800
Subject: [PATCH 117/406] =?UTF-8?q?fix:=E5=B8=8C=E6=9C=9B=E4=B8=8D?=
=?UTF-8?q?=E4=BC=9A=E7=88=86=E7=82=B8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/main.py | 1 -
.../heartFC_chat/heartFC_controler.py | 17 ++++++++---------
2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/src/main.py b/src/main.py
index 12b818771..99578591f 100644
--- a/src/main.py
+++ b/src/main.py
@@ -19,7 +19,6 @@ from .individuality.individuality import Individuality
from .common.server import global_server
from .plugins.chat_module.heartFC_chat.interest import InterestManager
from .plugins.chat_module.heartFC_chat.heartFC_controler import HeartFCController
-from .plugins.chat_module.heartFC_chat.reasoning_chat import ReasoningChat
logger = get_module_logger("main")
diff --git a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
index 51b1a05da..4dd49e2de 100644
--- a/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
+++ b/src/plugins/chat_module/heartFC_chat/heartFC_controler.py
@@ -1,7 +1,6 @@
import traceback
from typing import Optional, Dict
import asyncio
-from asyncio import Lock
import threading # 导入 threading
from ...moods.moods import MoodManager
from ...chat.emoji_manager import emoji_manager
@@ -51,8 +50,8 @@ class HeartFCController:
# 再次使用类锁保护初始化过程是更严谨的做法。
# 如果确定 __init__ 逻辑本身是幂等的或非关键的,可以省略这里的锁。
# 但为了保持原始逻辑的意图(防止重复初始化),这里保留检查。
- with self.__class__._lock: # 确保初始化逻辑线程安全
- if self._initialized: # 再次检查,防止锁等待期间其他线程已完成初始化
+ with self.__class__._lock: # 确保初始化逻辑线程安全
+ if self._initialized: # 再次检查,防止锁等待期间其他线程已完成初始化
return
logger.info("正在初始化 HeartFCController 单例...")
@@ -68,9 +67,9 @@ class HeartFCController:
self._interest_monitor_task: Optional[asyncio.Task] = None
self.pf_chatting_instances: Dict[str, PFChatting] = {}
# _pf_chatting_lock 用于保护 pf_chatting_instances 的异步操作
- self._pf_chatting_lock = asyncio.Lock() # 这个是 asyncio.Lock,用于异步上下文
- self.emoji_manager = emoji_manager # 假设是全局或已初始化的实例
- self.relationship_manager = relationship_manager # 假设是全局或已初始化的实例
+ self._pf_chatting_lock = asyncio.Lock() # 这个是 asyncio.Lock,用于异步上下文
+ self.emoji_manager = emoji_manager # 假设是全局或已初始化的实例
+ self.relationship_manager = relationship_manager # 假设是全局或已初始化的实例
# MessageManager 可能是类本身或单例实例,根据其设计确定
self.MessageManager = MessageManager
self._initialized = True
@@ -81,14 +80,14 @@ class HeartFCController:
"""获取 HeartFCController 的单例实例。"""
# 如果实例尚未创建,调用构造函数(这将触发 __new__ 和 __init__)
if cls._instance is None:
- # 在首次调用 get_instance 时创建实例。
- # __new__ 中的锁会确保线程安全。
+ # 在首次调用 get_instance 时创建实例。
+ # __new__ 中的锁会确保线程安全。
cls()
# 添加日志记录,说明实例是在 get_instance 调用时创建的
logger.info("HeartFCController 实例在首次 get_instance 时创建。")
elif not cls._initialized:
# 实例已创建但可能未初始化完成(理论上不太可能发生,除非 __init__ 异常)
- logger.warning("HeartFCController 实例存在但尚未完成初始化。")
+ logger.warning("HeartFCController 实例存在但尚未完成初始化。")
return cls._instance
# --- 新增:检查 PFChatting 状态的方法 --- #
From 94b1b3c0e6041739bce1aeccf9aee0f2a2635dd0 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Mon, 21 Apr 2025 18:51:53 +0800
Subject: [PATCH 118/406] =?UTF-8?q?fix=EF=BC=9A=E4=BF=AE=E5=A4=8D=E6=B0=91?=
=?UTF-8?q?=E5=91=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat_module/heartFC_chat/pf_chatting.py | 2 +-
src/plugins/chat_module/heartFC_chat/reasoning_chat.py | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/plugins/chat_module/heartFC_chat/pf_chatting.py b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
index e4486a795..92e3da549 100644
--- a/src/plugins/chat_module/heartFC_chat/pf_chatting.py
+++ b/src/plugins/chat_module/heartFC_chat/pf_chatting.py
@@ -70,7 +70,7 @@ class PFChatting:
Args:
chat_id: The identifier for the chat stream (e.g., stream_id).
- heartfc_controller_instance: 访问共享资源和方法的主HeartFC_Controller实例。
+ heartfc_controller_instance: 访问共享资源和方法的主HeartFCController实例。
"""
self.heartfc_controller = heartfc_controller_instance # Store the controller instance
self.stream_id: str = chat_id
diff --git a/src/plugins/chat_module/heartFC_chat/reasoning_chat.py b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
index 95d3641d5..5a9732d99 100644
--- a/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
+++ b/src/plugins/chat_module/heartFC_chat/reasoning_chat.py
@@ -21,7 +21,7 @@ from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.plugins.utils.timer_calculater import Timer
from .interest import InterestManager
-from .heartFC_controler import HeartFC_Controller # 导入 HeartFC_Controller
+from .heartFC_controler import HeartFCController # 导入 HeartFCController
# 定义日志配置
chat_config = LogConfig(
@@ -181,9 +181,9 @@ class ReasoningChat:
async def _find_interested_message(self, chat: ChatStream) -> None:
# 此函数设计为后台任务,轮询指定 chat 的兴趣消息。
# 它通常由外部代码在 chat 流活跃时启动。
- controller = HeartFC_Controller.get_instance() # 获取控制器实例
+ controller = HeartFCController.get_instance() # 获取控制器实例
if not controller:
- logger.error(f"无法获取 HeartFC_Controller 实例,无法检查 PFChatting 状态。stream: {chat.stream_id}")
+ logger.error(f"无法获取 HeartFCController 实例,无法检查 PFChatting 状态。stream: {chat.stream_id}")
# 在没有控制器的情况下可能需要决定是继续处理还是完全停止?这里暂时假设继续
pass # 或者 return?
From ce1247f2fd3ede228a4ff8e759b336f57d0802e9 Mon Sep 17 00:00:00 2001
From: Bakadax
Date: Mon, 21 Apr 2025 20:36:38 +0800
Subject: [PATCH 119/406] modified: src/plugins/chat/utils.py
---
src/plugins/chat/utils.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 3e4cfa52d..739fc6c7e 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -76,7 +76,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
else:
if not is_mentioned:
# 判断是否被回复
- if re.match("回复[\s\S]*?\((\d+)\)的消息,说:", message.processed_plain_text):
+ if re.match(f"回复[\s\S]*?\({global_config.BOT_QQ}\)的消息,说:", message.processed_plain_text):
is_mentioned = True
# 判断内容中是否被提及
From ea1a6401f86742aa78361692aa48171aefdde02c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Mon, 21 Apr 2025 22:24:32 +0800
Subject: [PATCH 120/406] 1
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 26cd30f61..7eca22601 100644
--- a/README.md
+++ b/README.md
@@ -98,7 +98,7 @@
-
📚 文档
+📚 文档
### (部分内容可能过时,请注意版本对应)
From 5b894f7f598e39abe08092394d1cd3291ee83ed9 Mon Sep 17 00:00:00 2001
From: tcmofashi
Date: Tue, 22 Apr 2025 01:27:04 +0800
Subject: [PATCH 121/406] =?UTF-8?q?fix:=20reply=E4=B8=AD=E7=9A=84format=5F?=
=?UTF-8?q?info=E4=B8=BA=E7=A9=BA?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/chat/message.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/src/plugins/chat/message.py b/src/plugins/chat/message.py
index 87380e7c0..b7afa8179 100644
--- a/src/plugins/chat/message.py
+++ b/src/plugins/chat/message.py
@@ -309,10 +309,7 @@ class MessageSending(MessageProcessBase):
def set_reply(self, reply: Optional["MessageRecv"] = None) -> None:
"""设置回复消息"""
- if (
- self.message_info.format_info.accept_format is not None
- and "reply" in self.message_info.format_info.accept_format
- ):
+ if self.message_info.format_info is not None and "reply" in self.message_info.format_info.accept_format:
if reply:
self.reply = reply
if self.reply:
From 55254549beffe8ccdb8a5ab401cec188e54fb480 Mon Sep 17 00:00:00 2001
From: SengokuCola <1026294844@qq.com>
Date: Tue, 22 Apr 2025 02:01:52 +0800
Subject: [PATCH 122/406] =?UTF-8?q?feat=EF=BC=9A=E6=88=90=E5=8A=9F?=
=?UTF-8?q?=E8=9E=8D=E5=90=88reasoning=E5=92=8CHFC=EF=BC=8C=E7=94=B1?=
=?UTF-8?q?=E4=B8=BB=E5=BF=83=E6=B5=81=E7=BB=9F=E4=B8=80=E8=B0=83=E6=8E=A7?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/MaiBot0.6roadmap.md | 16 +
src/config/config.py | 2 +-
src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg | Bin 60448 -> 0 bytes
src/heart_flow/README.md | 14 +-
src/heart_flow/SKG`8J~]3I~E8WEB%Y85I`M.jpg | Bin 93248 -> 0 bytes
src/heart_flow/Update.md | 11 +
src/heart_flow/ZX65~ALHC_7{Q9FKE$X}TQC.jpg | Bin 90138 -> 0 bytes
src/heart_flow/heartflow.py | 429 ++++++++++-----
src/heart_flow/sub_heartflow.py | 206 ++++++-
src/main.py | 6 -
src/plugins/chat/bot.py | 1 -
.../heartFC_chat/heartFC_controler.py | 94 ++--
.../heartFC_chat/heartFC_processor.py | 45 +-
.../chat_module/heartFC_chat/interest.py | 503 ------------------
.../chat_module/heartFC_chat/pf_chatting.py | 2 +-
.../heartFC_chat/reasoning_chat.py | 153 +++---
.../reasoning_chat/reasoning_chat.py | 1 -
src/plugins/person_info/person_info.py | 6 +-
.../person_info/relationship_manager.py | 4 +-
19 files changed, 708 insertions(+), 785 deletions(-)
create mode 100644 src/MaiBot0.6roadmap.md
delete mode 100644 src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg
delete mode 100644 src/heart_flow/SKG`8J~]3I~E8WEB%Y85I`M.jpg
create mode 100644 src/heart_flow/Update.md
delete mode 100644 src/heart_flow/ZX65~ALHC_7{Q9FKE$X}TQC.jpg
delete mode 100644 src/plugins/chat_module/heartFC_chat/interest.py
diff --git a/src/MaiBot0.6roadmap.md b/src/MaiBot0.6roadmap.md
new file mode 100644
index 000000000..54774197e
--- /dev/null
+++ b/src/MaiBot0.6roadmap.md
@@ -0,0 +1,16 @@
+MaiCore/MaiBot 0.6路线图 draft
+
+0.6.3:解决0.6.x版本核心问题,改进功能
+主要功能加入
+LPMM全面替代旧知识库
+采用新的HFC回复模式,取代旧心流
+合并推理模式和心流模式,根据麦麦自己决策回复模式
+提供新的表情包系统
+
+0.6.4:提升用户体验,交互优化
+加入webui
+提供麦麦 API
+修复prompt建构的各种问题
+修复各种bug
+调整代码文件结构,重构部分落后设计
+
diff --git a/src/config/config.py b/src/config/config.py
index 83e478375..bf184a002 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -28,7 +28,7 @@ logger = get_module_logger("config", config=config_config)
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = True
mai_version_main = "0.6.3"
-mai_version_fix = "snapshot-2"
+mai_version_fix = "snapshot-3"
if mai_version_fix:
if is_test:
diff --git a/src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg b/src/heart_flow/L{QA$T9C4`IVQEAB3WZYFXL.jpg
deleted file mode 100644
index 186b34de2c8115074978456317fd795e4e5f2ac3..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 60448
zcmZ_01yoku);&%ssdU3bmvnbZH_|8#(o&*ycXxM*fYROFEhR`dO1FUj{k-q@{qBAL
z_kLqMV>kx*u+Q0N?X~8bbFLGnsx0#ig$M-(2IiTZtmG>g82ED-7&vVtIPeoyvxG(%
z7;+dnNpTHV*q>R*sW^SN{U42W@M$XP)wHOak@}@nyND}7wuFd|$0LUY->sD{IN
zAzf4?*7>gfh-OovUp_#+}NQ@Xa9h`k1uCEle=a0U9dq2_o
zaKbeEgIm-7p}nH4WB>El;UCQ|HqobczrNQYk~sLDS_?DcxNB7yv{}s*_n?a|bLM=;
z7T*l`fT;9p@&Y67xKBI*GmaH@6;1p+KX)ItGgmGj;$8>+!38BhC4P?V`Oes%>yAHm
ztL>!7Lv$aHSM*=M&dABh$;eNOf1jTWSBkul==_MN;p`
z9F{9Bym0jDAo6FZkrr;5dA)W|_1Bform}9t+n3>k+q0M!BQ-)lzUsGhcXu;|bnYa;
zAjkgiComC)%45mF)hK3c#UdO**7skZ?w7uPwyw5PMbu(&|N5*c`Np-4J!QqRU1?qX
zFJFmkvn#tioY&UYUiz&V_rJ?P*AsI^Vyi}v`BIYjuQkAWt;lTE*Kd0`|Fr~I4g&(0
zNjy+L>;3!JbphGK;Qz5un`uVd1kS(i1}qa8B8G72gj}aZnRlb#@?o9t@oxMk+G$Jb
zvqp%}nZ)QJJCodsCvGsTB*G%C{$S;hXW97}hFAi(EEW;{D#t__aWCEN4r=R})2ISU
zIz)Av5;S)AgYLge17nG~T`%V;=&(rarD8(2{wiol3%cN)@
zHmB@Q2RJFp4{KD6uSLslOK|Q>yl}s8=ax^f*WH+nyTAAJv=_69i^Ued&Pqw4^LZO1
z1VYW`d+p~|db3!K6c%Ad?XSy?j+=2Z5JJ|KN@{+xLC$hSZtFO+(IB```a)WGIjJ(a
zua-FeN)f1eBvQgdA0Bj8Rqh&2I=%vQp!^OGd@J(|pQ(
zQ$g*f50@wuV<23jW{?$5y~VyS(Vjj=70=04A;U_!4jZ7X)avnk}YJ`{1O!pbVUq1e1b2v+7brQZj$0N
zn-OZPlosdwheatfXMFz*pD>u+{;Dst`BJF6DUja?>i-N&SGdTZcGz{%c2^?*zu!RM
zNTRSIuhkg-@#kEzjm(b6f-V02T*;CPArLGs)$6r<_?2SlZFS5k^>61(3~rsGb8w{|
zm-ceC*`@41!yAU`_sK%IyxhN?RN2l9aEddOsIj^zTmH+H#r^5yLF)h0UL7~^vHxR|
zQ{`DQbp{{JkkP&*B+C6N#t!+An@MhbfI~~-GagCYWW=Cmqq8<9*yLQ~x6}-0?8Pz~
zW)D&l)mGw?70DQQpCm%)YufZ`mEYA(rlF0g)utzxL1v#op9+C=;-Ud_hhEUR4H^;6
zEQ5i0Gj&s|z;vyZx>~KQD{|j}1B#nx9yvNsg;TF14&?+q{TW*DrC7b3T)QXQp}314
zbskCTY$WOvj5s;`Y4hr*`j#ac)mDyqPxs@lDPq%G3v>6y3(ypXjVglw5I9hr)4q&y
zDkJ{`GY-q|0%;2%pt9cvs-!|A|
zNh^cbzCZEmzwBk?&mL387c{U~CG9hBrLa)@n)>c+lf~?O_5sdr)
zvhH8$5Cx>ehCGv^7ak!D|NhsokZ@t6`;n8H=#&4ED21T^zw6uF+`Gl<=Z)m;%Pf0qPRw>=+vl&0E5Rv^sA*hj`Gsg#2a3N!HCjQCeCItIg;Ye>+OI;s{Al;`0L5{g(16(80ZnUYVj`vH
zrCDZ5%C1L@VUrWh!DT69l>5muy1z?d=U64=TZt6bat|~b)MS;4z1K$sG4%gu>And6
zV#IVM=%b!hBY==fEkw=<0hQ%
za<)^ZqeRcU2A*wUw@d-*K1Z6g9O3>f(JjKHS$jOkQrWgJWbNimJeZAg*tTMPFj}@T
z|AhCJl&7YR2%)vU{On-x>etc}B*yi=Cj%dUE>Q3@DtFm?>RZfz)o-7@ealvkI7^sN
z*ga4P`BtP5FNcppPHDz*c8x
zXX!k42b$4*(P;4p(?u2!cbDKKi(E{;-0UMxrrw2j3%<;iW{zKjynRFO8!BPVafxc^
zBo%=wAA$P$P_7+XymI&b-X=h6S>5u8#v6bTG^o?wQ{aO1Vs0i=7z%=#DESFu1#?+z
zy7T73JKqYH*Y&nl*J+ghN(flwekT}4W@P`h9@y7(sqlkBT=Sn_Su0Pe|MEQYzf~8{
zxBvaG{a@9kJ2B;O?#q8!!^q5j$$F+(sZ6_~{y(qazh88ykv@y#oILetWRZ;{iBAUC
z=`Dxv|HD+T@?^fzf7W)A@0<#iHAq3}(ICgVlz+S%=Gm8+WSBRFfV!|JMeT^@D8J&F
zB($SMD}%!&tZHKrzg)g^IjB?|jr1#?_nDNjf{4l_#uFuU^24M=H(GTWoXh>@64rxu
z$m=t=i*z&&FPb6F)cMUa-3n@pb1EHIO`SP;R{{Zd*WXva;%hd(MK91!1al900YsOp
zKhggCXsJ|_QLZgsUCX16{MC8@QPjS;elxYzdVv~xI0pkmB(1vS))E)UC?zu5GJyx4
z8E>KagFt#~WS|nFAF9}_3WND~5EgDofpkJnzFOT|Ms<68d$Zy@M@d2FeNQ}>XB4tHx=8%?5i`tQMoTmvMv4#mK7G#Kz6loC
z+c!I>0B{Dl2?x{AOtATec1{+ea(^?s@qVqVdzm4J)Eh*zJLxAtet=9la{m~99qwzpbucQQU@yITu|)ut-GbC;5%mo$jBv^{gf
zxy)@&%@=O?>H2Lhj+&-Od*XW4=g*W3nb+O^CqvIuDHj*+JL6H^5O3F!WM&D1^soiU
zg9E7Gljwi8=tTD?^Lo}p$X4oXwBHZD2#w!@huxPlBmADGnpoJ4*cbdaET8(|rUvH$
zB1zuqV4bM;s>rYNIeN*}iiiDg4D~eSwGcAzR8gNtV*A$93ZI9=w(FIb&VnD0B8^BJ
z!qt`epENu}I2FB{iaBA!T9xaaXiO!&%&>3Qt$919ZRmA*Izaa4m%Y#H=JyQQiSLhB
z+AJorEixVZbuF@z_7|$ZdVxCsQY<-UmkL`l*{?AGDg2QYM^Wkt@#@Jm@
zW<-D0&YXM>T;=?M>zsqY5eJ1gr5mun)MQA_$xL3nL|&_!yx5U>=?<>yx_=r!EpeMRN&-s(-sdHRP#JajBK839!Er$V;n(-Y--_laet>pZe>Shc+J-~L%8qy5
zw3~gn)KKBqZybm&{oC_wh)zmMYB$@11wzN(&g+|{39uwWY$u2U?*-$KAD-u9uCg&H
zCOMvGa%cgHb;_-Qt`%__!+77-Oh)dIGWc_|$BWtO9364bVz(%m{?)9N>s<
z6&MxElURs8e|7KW^fe1=V@+-qZS^TE#^r?G4um%WEbhZKr;3nO5p
z`$#-~R`LFLKXbC))A4v`6WP{P;@)Gz=1I26mcpd#|GIPkQ;vx1$(s2{GT~h3fovxl
z2!XeJHFz@!Y?|A%+J;edT&agoC-#q9&y|^8zSRpX5K)W^>k@6OSbNY5ym58d<{P4h
zE@ws#E2JVWdOu?Lcs9y8At@?k{jEg0m8dKZwSP-7TZqtk-*&kXQ-!5K>_Jm)5#aXeoOHC_?j2%Us23As?3=3FyLPZhUS>4B4c1@}(l+C?h#!#ZbXL
z@kp14R=J*c@s@xIb;bf9qm1;YcUQdtSX3r03h-3QkKR}vc!g@?IQ
z2$g(j{bD943imp9VBbkq*opKt4n#s>`S(Jq8gXg)UH9@rUTI@0(8Bg0P^QvJYFN=w
zqf~NLZI$x{DSYRR(Xo=q9&3eEy-F%H7-A@6*2DOGA|BO$k;|eVUwd*o9S~&7+Hvp1
zWaw|P9-0xyTuGd4;^TjvKnU
ze;O(U6VWc`*;zS6Ezi`|9{>Kwh-v|zF#SLwwV{ZC`$-_=2T^f*SB*mzUqhdeWthLe
zKS=T@oadq51T|^8C_R=Oq1Scka`>;N0)&WYba&3>Z_cL522$~x-c?|>@<+d1h&?f(
zhz<~Wd*RGtM4UtB#uh&Qk({)fNX}({8h+7IU&1(;X6P2C#TYOH5rv7(lcJBOF%&O@
z*JkNQMFTWVH?uf>WuBrD_#&^dG;Lv_N%SGu5r`+gl4OB%Z=+ui+ALQC1cn%>sae@I
zQHz=VKe|Q`M>9qh!pzAHfGb
zUWuxwh&fhz?pE0s-~bV)I1{p(3aFEvQe-uzrMW$WRZMVK&BBwTM|4$RmjDIF@O0+j
z8Sk?1qJ3sI-aQv>Z!araW8Ip$Vfm8(`b!3hM2sK{t3`{=ZM{QsY$#WPfd{99fnqGImHbR!hZTtz
zfOU1&TpGk}(!;!VHO=(DAsBnMqM
zabQGRfYe$TUACA`%+Ls1Ij7DrQPImBAbBy9#F}Cwe?aNTH2AP+
zo~lX9+3gNX%-FPM=RW#9tn0eSZi%-oS%-F2B1b|NXa|GXY2St~bxpE?!Fx$5p|BeXc=BW`6wv
zl!{eQNz<-frF@xKSqIARe};@Tx9CWjnZl>=88snyLS0MdBY$yDV^WQ2gov0
zs;)bn1yFY2XG=cET6{zGd!KG(IwP~_dMDk#9vHO}j6K2^
zgSNZv6hr0aLNR;?kQO(j;FuUdra9DkqQ}@5Cq@b@Qd2?I^`}Q{EdX7llhk~pH*C+L
zCzqEY3S^}h23ySsrDYC?N~Pr9snET=!0Z%Qb?x@|ZUoQwPX3vt+{5o{Rz}8`qLEUE
za4+JjWphP9V!w2AU|m3vjwa!Mzc)EOJ#94dt@yd2X*B;1!LoL3(x5*ylauqq?Zs7+
z%ue_`o^ORCk9|7jrWF;17gG^wCottBSw=)V!NiG`@f{G~Js$)lP4oGW64o(Q~21&^zs$>w;#y$-XK*cTu^>0qnH2Q?$vUxu1H5+`R<26g~5;3A!`HMNiA)4E?;gFIWYMlID
zXH&&R9xO!5rTUhv_a}B=U!N9n-5c|8TJ??-W;cVd6L;=+y8t0~Xt%hMAe=~^kMXW5
zhD!VhitqR+=KHmsJyko6P!cPF*`SpmufJTk2J5>(4Z4Z<#Egoleyy7SAToue0ca|X
ziqzs0JUs;pp`BfGn-f>`<*)QGG3?Ht2%xrez3oG;9sU;DGyF@(78X6~mAa6^UrZhi
zoX`Cx*{J}-oQ39amTp?*_z_5!yynA+qt^Kb9z>uLSa<^BIx|mfKh(^qVwkkvh
z5S{IZr}bGNs+kcc&)+z7J%P3Wde-R5MO4y}HyaVG)pp_aYbT$uaG`f4pL$kZUq82E
z@qz$>ZPENgJ0Ee3&&|ntE~ZbW%wLDY1%Zuw5j-!iMe~%Rui=%6k+WTnqV5NuKXp^4
z3e?0x+m=0>x$;}kcL3%LPHYXdVT>zeq(ZI^Ftvq-a8H}OZ`MQXJ03pV6{*4kL1_*n
zRlvm>P))ZzpV@9eKOzLevq&N3ZPdf!WGp7&GB^UvGnN62{aRQ^y`=YRb_Kv}(B-&+
zynH>}xe3iwxmhs3jp<&Mqk3
znHb18R<)4V=KNgm2&X;fc|nEbR}*QB_eMW6F?Fd@}Agn_R0-Pbmdl
zmvU5mtKOG!nF@&83{=kB@kpebw~T2R(8P6oFRUN
zcFU&6P?AtSXE9U9QwG;Q&
z3tYDnemfZVMqDDvm=z^GdrOeg2nXLgftI9h?|9#pEq)SFaYc{`y4`i+Qt^QGmKLTT
zPNk;H0g;Kvxrpfa^cn_$lf>&_;&+wzFGP$-%}Tyo)w$Aa$WC5}B}vPj4XOtLk&AZI
zdj41TN2Uq|-+>v$HE6j67wmmilf$huFpZq$>?{~l_U;6+fUM+m_
zl_#Z7rWF~4kZH+$?%8+p6hM@vLsO5F(+X(+cxq-!!6^6DZ
zn-!op1Lv>qI!zv=gB3T#2JF`RT=BllSNC&UZ7$hHN`_Zmsm9kTEOx8w;qx|J-!jrE
z*&axa>edp{H*{Y>LDNqPUvKH94CS<0^vxgeLiYXG^mi!82vK6xmj2Tvh#jq_3I#A)
zzo-ELKt|$oe>zSjR~+H~?9eT=B{J$bW#WF&@nqpt6Vv)v%>@!I4F5
zXfs;yx#tNU+!_DG|9SzmeYw<7z(McF6ccssC$zA%)#|_K`TetI0-&WR-2i32b%ZES
z9UNq872i0oGc;WZc_gFGp(JgC&u=c$hxbvhBRh0!EU84ablIKVjW0H(gzpCA`S*wA
z5;hFr%ODMv24*=4>JI%c74Yz{_S{GON1D+^EjJ5S_6Zfq5`Nwye#B9f+I35(t41T>Ix>;w8ovfTT<7o<35W#L>G
z&4=P?+Ofm=hBi5`X+-oNvwT#!$8q7AKMf3`#zGd&p&UNsd~2&4BFHFx
zwD={3@a#@G%4#o^6~-$oMRDlok8S>n(61&T
zJCYNvobNh)7YvrkKAMq@Iou4&g$!`i)3da_*J*2Tw82YKsVEmN+^saY?76w>tKKi#
zuvXfEDb@qr`XX4D3e%u_$XD^!qF}xHK|$@p%DsH}adSZVC}WAAX#2O2E-O)!>-FFr
zN7;bOQ3faP)pp(rGRZO9@Eun3ZU!AfqOuHE2OZsqk1ACnG!mXar8`8KutL7DmQ+;`
zYH86{$HLqhE4pcDXmk`L_zgCD6NH)xxw4ERXqD?Y4pM{!RzGp&3%@Y}l)9`ePoVB#
zKu5d~Uyu;JQZ}MR$K3&=TsBCh8c+wv=;0}MT0ZqBR84K|P4&|Z2a59EZ-6AYsdKe33t19&2=!PZwL0E2N@q>&PIt%8t_-{@=;xeNnqu)B+
ziQ7wqRz(Hd1FhfOCh=MYcM_sE^2s9g8OBY+4Wh|}*u%mL90aY-R{Zo_E@y%7gy=H2
zL_=17?>2d;STW6ifU%x=R(){_U0X|Q!^BU&wX?9>%)mug>wMUEQ+%dCJHBw8uc&*V
zXcx3{KA`Gj^}i5r$!fZAct()qf8HVH-j)#dp$FR;wBPKsb#ddI@74?1AqAGiJ`cpo-E(4=t4*JUIyq6*FZUu(0Rp+EKEMY4;
z2i@B&o4W@14{(-!i&F_M7VOkdYpU_|8_alv?SI`G{q*D-W~~aa8dzho;1B(FnxOF$
zzDk1B(IKFFVc3sk*6OX^WWNXVmwnXq4N
zZ%f%N2r(9As;cnq@z(LD|Msbpg!%}TF6;d?$k9=|*A&(Y*vo{0Zw^J`*!(I42ZKA1
zC-e~G2ILj6&TRLmYm9rI60qjkLPIVWn1<9$B{=59apeY0ni6B
zxitUfbx4(3{^-+teF5VI;n}_bh5U*2WA88b_nA+$U5-3w;n@AQYbk<)e*O649$7EV
zy0uliZ)i|=vV-kxy<<4lWa&Fme)TL?GsR`h{|T}D+w#?V%d0}UmoVeC}PINoG+xkRB{Hiz51!6*nGi5erh*^ZH7^NBIhdrm0yO&Ca_I>|;k-&3tkTB#4g*;=vA^*k-
z!ig5ivCu}SG`>^%Xqf=TyMO`Ij=x_-w$fYL;nronx-P&bCiO
z6Gr=OY}^&-1B_b-u!4mj7T^)!npADo7V96fcVJ%@ZG8e^)Vm_9Tf#7
z_FZRvjdiz_dD0UqycG>D+k@InS+$|j1AmG>({L0iajAjJ%bd&VwL*6LFGoTpjTe1S
zoEdH8pIhfX6>WMT%U^zDJgT%UI0PZy|qVJZh4gg81{
z=NbhLE(CM%tlhMjk?P5XfD6Y|FK@qI@qkif68BvJh$+d*DE5*(jta5(xWX;(yP5eP
z6r6`oP`xhPJl~(V6K2;9MAfjqI0(sYA}~Foi#*!o9rsqL8+RgM-Ity);k^nD6Yk}|
z>-0XoX~wsXGmb$UaI_qW@@T;iav3D0u`WNzU^FPI>)(jj%=THQ#Ecf&9)3Xy#DgBokDOe;5mI>OSlP|N1UXb-UU}JRWo>Wr_ZoYpM7fw
zJ)HU{pS=EkO4RO?I`r`~1YIeyZH-*s#=eAS=R$~OpuE;11<$8r=HA@#2x*{8){VZbGLuxq!vN3fz-eYDej>6U`AOxo(%>9XD;LjR#|Sx_|*H13w}leIW<
z5_HQT&!(sEsbm0Q?_tp*$7@m?Q7N5kzr1E#LUg&4j!AbOoyxWoq1PZ0LpQ8Zg0E@b
zaCKGsq!6!7n1#IptJhH@#2a0d%GDgW81}x=%S}5-iJZDSuHdCE+SZ
zOtOx{vFC`yA2_DotC5rp+Pw}vW*;JgkaF9vypibivhLe@T|9?_mMwUC?@tobgko)T
z@@46vi97Xzqf1NG^Zn1gK`vy{v?MmElb4-EoN;v$XQ+Pbao;`*eoksZa7M#M9&ZnJScNj>dV
z0xn@o{R8T-Jg*Xo-fIU}%vCb?y}Zz~O$ralP`fw5M^B+B|3h~UH9pu+nqlcU^3w@4
zAR0=!-%y{BZ+>P!B!eAQ^qmy@4Hdk`2-
zD|<0dF0;zz1U^?=^RqHdOgMEI0asTmL<1PZTodvXcg?9J=qu=a
z4C*5wqGpndMVe2pFXJM!)A$S79tl0lNB1(KFYB3;I9Ffn&9u;q3Vdr^NenoCGuu26
z!2RUq?pN<>m9~N7OA&iZMC|FIC`!??`3O&8GF8O`wWi@W@h5SvBM(4w?n0$F9rz(5
z5Y5yv?!t8UruC8HG*je*t@15bE0c{%Xc#-E*Qa%4ulJOm*ILWCHOo2$HYS`JD|YB-
zpNXRqW-NxY37JU?-l=L_24;k}w0H^~ED$`ewGjEeSMs1XmEx6?@nl2+Y|sG1
zF9niDa4roeF`i}zoQE<-oxsPCZtwW+ZmzRj$ie}@D$dC*AMdQSo}h}Z81PXoP|6oS
z7AizsAN^Pw*<)EMvcUh+XU>#Xk4#ueaW4V6$89n2sT=(ah&Ib|W4W)$ksDUTbMUZw
z)J<=Y66BgPA)+2_fY7(p*VQFt$+!t-B>n7NG9hmFTgX-gQvG{2&JNvL@Tc2ouj
zErZX|#8YVwlNm}JesOQZ3cl=&Xid}^$cjJtkhmR1DbZKJH_C|eNi+QtD@ghaA@^za
zK5fCutBE>?<@6a^fw-cXBbS^g>zCitbBmSBvR1i$l=Xub{!FF0sY)q|$qi+HcXWu^
z)IB$vGq2y0+U*xNGpw-ImAbUB0Wk+|=Hjf!lN>aCVX
ztskk&wNr+0FqY|l%j;MWC-a&0E+t2jx;9ld-LlN&-9Y
zq1v#AJ{5RDo0TRX&r2y&d7?89mj@2w4n&b>p166ZF6KRSfbnDJ!e$rt=Ih2Efhu1F
zFBu?7pSj6m`ae;W_AlHvvz9;XdZ*1asPB?6$!Kb(d#GP;N9;a7{GR9hd``9OMNYV^
zz}<7%_dV&VyILl*rNX}-hIf@Geob-@G!?RE53t54>(1aWN-g)@xeo0Tn%#70
zt@Zq9W%X`9uqtWhX$*cuBfm4J5}ZE_ZM&p|>todbB%-1bk<60~qPRi++t`rKF>3F8
zYyxea(pURS;F{sXfp#MUxg>JXW8u?~p!}c%O2;r0dA!T}(KIwzMYeE@e0tavly(GD
zX9G@G3c1J|TOHg#&H0rs*Ni(-GBXv~&V9b(+8X3dMa|WvPH*LmLz?$le{tC0CA!$H
zkaFS*G-Tn1Hane#T#C-RaaoLE7xfrU40V(U2_%Zv&yEvMIe*hH+EpnDfBazQ4o_Aw
zU$3Zu?BkV+z6;gc1uegRa8H-zc3okDl
z_LliB@r(Q7rlks;`eVNSMp63%uT#-fj_ut%fS1MSJVCoN+*_r50~ulNK?Y+4rys~o
z++9WPj6dzxkX><$g1wYnAM;g!-*o78cK6(L38sg{nFy7}6p8GMEN&CN~0@tOU_oAC+BnJq5apCR=f3@uA3&YoQ96xqENy5!T^}+uZVtN3KAz
z_8A5)B>iNIVBQUnw^XL?U0)5|-Y6g-*p&u6D{dyN_gk^3G-(q#bdtCIKvbR|>1BH%xdNSEUMU-jHs5mj0(&SMU
z1Ip>2=<~i|e}Ggrz9xX62NLPO^ndfQ{QbUPX^hC-AdU1-BB;Mm?r(n~TmHDaCOm$m
z(cvAV^bccY>QSvNf$bW`IZn~7orPDqRaN8TYU6^*iU&U|Jd@~OmN)8hm=3&SswK=S
zoMwM7B9OaS_kJ&$W;3R*%HJ4wX_<&;oIWhNoe@&=$?W>J*{h1{bG_r0NpjBi2dTM(Rg3qiMxI#uHi(N1&5K8hDcA_=4wH^iuBBMq3zcaq6@%_mFJ0hVuO6uln
zYj3el87a(H@4+jSbj(-1ahlCvH}mu&n}8I;9K5lWn0WQI%&kYTA@f_)&w$K#rufPS
zq){8rCNmb4^XWJ?U(imJ=
zstjQbJ=Dt-PvduJT#}{BgJRzhO>o{K{L(QUA7^KaDfQlVHuQCb>8Y#!RaRZnX1`IZ
zlG?F|$xq28g%P?oAB97;p32vPUL49&GVnenDk;qONL}Up;Pp^-A-{$PTry(MpoDnhsxa;2$MrbKO#XRllM}~
zB;Ndi?MOA1SR)LLGtO5W&gC?s>jnI4+_#7Wcsg#2wB^pb<9}`^e3XUj9zDtpaL#_G
z`0zJ)nt!x$@LXD<5q8F7BKXmCyC<2$K<~Dvcew7oWE`UsZeE(jPdf47e0P0hVl@4HTVAYPs(WDA8Tm2P58dPVI4^u2N7fAODsgDx`s`@b>
z*D^E^a?z8Nlmr!wiqx|Q>%DKL6QD9q6Lsx5mS2#9M~n%Q?A3Vlu@Bzg&1z
zHrWRc_W*{tj5mUA)aNj_x;Tu^k~(7v3y`R1ixkieT)LTs{yE%b9gAvAQ_P@VoV(R;
zZ`b^+#d*5&si#QCud1H@(RZDQ<>h_6EN@JH6^fib{yzS5^sz%>uS{z@xJ{PjX(<}i
zrtJULF>gpv7}YhI&f(+9ncq|W8>h(rR@srZQ6~yk4lUDw;K6C<`_qAfhN|~Gv~hV4
zCp%*q_^bw=KvI{DB<2Fj2Ba}TTCP3r)AlSD?_3FD1A3S}(laovAxIHbiKFdd&ePXz;vHYGU5dCn}6JM&O_%a6?J{Y#Tm^_Oc{ZQ3~s<^C+Ap5LvGB
zp27n4Dc5{3;+2EONt~hNPTOG7Or4EE445TgkUzWB(N?H&1Xt?0mK=2xF+d8oEPm
z8#mJ)&O*&fXBnF5?tZa%x>pb*kTF_Iege2cQ#V%6^Emi0ezj^$%+g%#5mrj_cnb%2
zmqta}bXxnYGOCSbJ}2yR+wVw5+c>S0kwfe=#pETprniwtKU$ozwsXISG|Q4ZBV_^t
z#kG}k&T%ADYu^$WEdkM{FC)hkfWQUDbSenxBW7fCwJQvO6=7b-=q=9WL0JXhB-7HS
z@ap}64myaSS7#lxmi^$+8nu8vz_}jh3rybj#Blnl2P6?!A*u2dek*Mr6Ip`1LAZgH
zd&lj+-vOTn=yg9J)v0p;9peV6k}SG`aHplWL^d`|w2z89_5)Nr#jdqo#7gjlQt26D
z9o`R>)kIpZ$_=qFvwwijYGQ};h(bc1po)TQ#s#Ywd4BBkhq%gCx}n-QDudewFIe*N
z;*+S|!a!;ULL~Hx5I7mFa9|^AM1h^ISS4qrM8#q!L=0Y$qsGn_7hJ{(lF>feC=66W
zKvdEDj^p?A_j>J=MV~j&J%3|v+}2{<%vN}78bk-_6{vGM_L%Gd_t;skct9xF2493o
z%*MU^h)HzfGL|*mb*~Mm3uLX+4|ZP8O;-WCABNJOh*tpeIc!F(fvlbc
zwMf2aH)PU!R{59Ez1|Zd`gpV1H5vaFS4HTwx6=5Tt~=wHyRI;}H(HF&PT8xT?pR_T
zTQN}~;MP#AWL%2PgG%@%#+)U(f7xap@#u!S+}U-|kgn&^s+lt+fN|+L9GJ$s1;)!j
zHTw0~42~pp;fqPA&a_DSN(&y9*3S$~DmEY=)y*3K1u6!(q1cdWZRX$ghGL=%ornP+
zjM5XNH*VWWR%O|LY3}T>icg%op5QM7#zS|M+yeNAfH5Se0yt1?gFEXHbHvQHhm%}|
z9RbgMR%A1Uwb~aY1rgG^(2pcdK|Hw3PmmTyI>JWb1YUB)WiT8SrAN*EJ?P|dj^R@#
zhsTxeXvfYrPnwq}u(%NOzUytNIZ-iEe0(u;B-KtDiihw2Q`$qVHH2P%gS^f4{i)2{
zm)9wk=ZX6OEr-(
zoBp@EAej=7MB0gfn$*HK@pP8%
zGq(wKjTT~?zj#l{yKl<^5YPtuXKi57t{Wm}m
z%pby%R3`s>CYLcPK67hxhJrhdl5$>iVv6tU@xt;mdcW89Z7z$|<^-Drs$QQHT>#s6
z-0|lTxVK$iFd8pH%>ZjJ*OFhqOXIXq{xELmvt-wh1}%AV&v0Z%V_
zZZ30=bhPec7)aO_X@0MlvwgTB9s=`z>%jdIv$g_Yw-JK(b<3);=dR1VK0cfN=cIru
zlVEP;zF56s@1jjQwGJjB6*ygSn3%7xYc1n6AK0Ec<+g#m>^0YJ6pNdkB_3sFmLuLj
z>R+sw%4Yn%)+&KcW!%4BIwLfPq5b}B&Z^u4mO
zqhB4tz>zgWR8Xu~DY#t~ZsX*N1Thexf6h`mjIc(*oPemn$TKrtkR-{3-MPL;n47>c
z#O8yvP#p(D8`2e_((&ivn4RKPe*@V>{E0XI-Fl=w192)l^b{8h?d1kU3!TXVf2Ubkd!RkH=cJ
zfmRI-OtU8HSR!b8|Ej|d8=-`UgR>Blit+;X9wQR><=xe<2=+Zfo~hqayepym`G!6Z
zmryUgc%O_Jgl$eM9DB{!Tw?YU|orrv*5(4|?9VV`vL=y}C
zT_3AA#k&`&b!%WMAf`c&AZCpYDRMuP*B)YU8w-PVvB^tS^
z#{4o+z%Pf~!1J^ZY6$62pf+0}mu#n3bXV)%X~aqY7EsD9@5J5PL~M}oncOLj$^X;0
z{8O#&p&6QmxBu$}=uTRxG7UMDy=RHsvY}gsht6&LmR=tC*-plNW8^n?oTQWsLbv{(
z0qUTbB1B%SatU9zBV$3uR8w7wLS#U|Td`P-rusG1ZS_YPm{zfF#U}%qqg1sj{S6xm
zd$|l1B~USPcNl=mK&I+Q5k;FoUlqB{$YAN7y9{+ZCqsmEz;xsiD1~fMqf+?#W|+^d
zHo_UdioJY0o*BlQwG~|$EgRi!wrV*YG-;g>I?2Uv(F1VqvQMBMbLblu9IKU9eurFl
zYHC#n`<^9g0%uyEBmWUFL{tHM|MB?_D1=bZr~-yc(z&eS|Mm&NSn9-74B^mp3D>q@
zbi1db+3?$~v=VOWa7
zG}n7mrXa0PPB0|E+wk31(EBAR8B-g@!z2@Sb=SJK`eWRJ*&a4smw1=9hpVN%5ca}#
zFz`h8be&q4NK_eapQdwAr0rg}g$Irf%uRF$p`P9o$Fu=JQk#@94AN*CTph4B5F~y)
z_{NZ=S!Mgz$p=HTuf^?yWpMWvF<3seijd+8-K+(C%5)kA0=9Ex&w8YpKxlO&6k6I7
z!-(e%m!VxDrlJt|b$3gQz6j7IVk6!Gc5e(em&+|r(2w3M64mqp?Lr@DGEtIzEMFmH
zN5F6cUeHiX?G5BVHf7TFGtjn@ZfV_vA%VV#4xLczv#p`f;{c|l@3LZ9z(1EU3+h0d
z*$~!W^9N$(krr33PYy~7q9t<4I;D*^f~cbT;Kr|@_>pz|sVW5x^aTxK90(9V+(U;w
zd@xrL&(|NI%A+>{x-Of&*V_uq>`2nvMK&zprjssHt3iiCTvmhDMlhNK24R3ZZE(6s
z!Ar#G-~vE4-_Q1J@LW4#JwJ}XU=pt;=OG1{k%28hU@L>oHYD*p=|VbUXJmiN$EMfz
z(Oky|#l|*@h}(KJmEHC39L$t`6J(u$!esEGh@7z!ogkREFM83tKuKBwV?7ZRKY?P<
zYtQk5Oi?YnwDd)*L2%WlR1pA?+dHWp8A?O@;-U8=hGf*-1hlnlC{nwaQ96|A_5}f2H?H0&xbSg!I}wT;m~^0Wnx0oE^N9v7>Yx=4@HBZDoKRK
z%pz2@p2?3&nKqgOOwZCkW7_`2v#zrj6-t)hR+k%NR|)}V5Yq?tqf?->Sx@En{XXsxb=>F$#%0>s%W6+7
zSt>uIxoxc3*I|LyY!i&pn-#}Sk@PYKiSjOkRII(N5KuS9w2cYGBxCKSgWk{{;xCDS
z8Va4fVw^WA^Z>=v@h5;^{jZ+FS)KXtzFQ9U%V;z;c02=V(nR*bG)6esjd{wUT$Ya4
z21aR)T(13MYYd+(W!EYe;->au_O?EhkO?j~3&T}UFsvMsFkh*j;nM_Nu&|M7Mdv4QF~cg}!O^oacQ97&$ZA&JB1D
z*8^1Fp)pM91o4{u+0n+>X2{kz8o75`QX)i`q4_8CHZzPvi}$j2c3(
z{3s4>^hK5+*15u+)^0{uB}Bsl`brK!Q>3uS(dyGvP$~wq@zfiik@T*<40+zysB*6p
zshO`VbSfz{0Yvt8;iZ2-Cmmf`@nD4w*0V&s1ZZXPwso>iX{fh9k*c-w
z;L80q`W*Wy|F5xA$v?y~K{A;LMmS;U1UzYyVMUAD@zVIy`G~{N7CRvl5*gmb<9+Wc
z1lzTtg>lb8T>ybHQMLZwZ|}1u>a2_D|ki79C%#=
z-S7?9UoWVEc#f>Lx^cFz4xsWtJCU=$I}t)>3S7yJDNO2hWncDO*WkdVQ7}R9y}>@_
zGuDYIJ67lvNNR?yzOWyIb|WGXmHiD}*O_%fNKmUi!N7>l<%j=|wYPw(vg`J~C8VWG
zx)G3)mM-a*?oKI1VH46R-62RzBP{|F(jd~Q)CL3u=|;bG;r-mtbKY~G@tt>k-xzx^
z9Kw!kUu(@Z<3E4%2bud0v_r0O>jzum$Cs(euKV~OR)oac`)g@=9-OV?&Eo4=Q6`ZjV2A>B
zeDE^kv7)MEgNedHGcuzWRVz$AzvUq$%NYyt;lq?k`-L?U8SFPS8Y)D=^#{CZ<3bt{b`=<_J5KDvh@@ahLK
zc-7B6ZEKmJ
zdl|229f(B2)RXQitRFA6`L_{fwuhjvb5A0G^<;M$`|`>X^z(vEG;!{=@uRnPq*?eY6mZzw^+
ze%@b!x&Mo{K;wzbRZHM!<^%fM0-2*DA?>+8@|ZG|n7DOS*&EUGgaQH+A;;b-^60Ea
zzLN3po(?RI`Hw&J&A8LP1;HM`q001A+6GIt-QQBIL3g7k>&7)_gpvsnxi=xy7lNiA
zHG_NYCcHthJJK$y{5Nl>dDAabUsQzRs5kS6gCr4$CIaH@6biA1Ms+U98v=wGBpA)@
z>wt%*;ZFYr?Ld?^1G4!kByZZt*c~KG3JmB34R{0L1PGPXQ6UkoJervz#fEGo!;-JrI#>(J0&=H^>ThmQUp3%Z+#LW^8hGR*
z+L^j0+yRY=L?5}}=EvnW5;(*iuYrLB85FvsaBSlCm&Eg-*p$t7qjbzKkrMKVwO50j
zdXDUpkcWF9LPV$CgMiU#hx9qT5>E{q@A~hg9UYE?fM|k7++^ODYbL>LMjIAbv)lDW+Hf9JsGl6
z&J%Q3K~*#bfBOxOxgJjG)Y_z7YK#5bC=<$uGI#ACH07;?5I@~2`pm{FMyV;|uKO5{
zL}o4ne{i>VcHShK+Ew7moYg-QgeF7pkOTRu`9J+uOr#-C6LMq!e(W)Uev-^PBR6a5
z&x!&|$rDYclb&x)alX!eyvBMkNw*8*&wl#rC(UgrtXI$M<@e6{KOi~k(kpKDj;|37c_@v%pva~yjRJ#+`k0#}Eq$>DpoASi#j
zZESf2ok*aUYB?EnCOJ4bRAW6cH#cv&0j)u?Fa%lyV4Fx-+;Cb!T!u1bybG#Sm1>P;
zUCTuexr@(o2uTX3X$(EO0Ia@B;DSyn6#mn?T2-FY$5hPDKpxN9L5|@*-RV_a29}D}
zy9^i9Oi>d``^7d*W&b<<#AP0%3UBoq#3KJ~Wjs1ren*rHIqQEv`~{#Mg-j6lg3!?9A9y!F
zd(oghKV$RXmosjMCjAVJ-qo>YjKVKjs&hr{wzcpBjSR>-ydz^AeI-JM*MFWiO>e;L
z(iS-x?2g1iC?Uh@1KLhF677?c;!P^zW1IWS75C%Mh95dENewzxrJQ;OinKobhGZ-k
z7<$hR=08{f7<&$>KZRZ(-2GtJ77(KWkXRlAu0fTMF0#Rj4(xZo=WRJT90=y-afCe={StC6DPrTdRDf)+cLavDka7DAK
zTN%o-nm=@kmQ6yNJHz`5FwUa3+|Sv2hc42Q
z#1hX85+iD9X(3&QgoN0IJKiLZ=l-~C2rdvQhrE^;V0iMtqE$n5aCFozXL_I4Jz3Bl
zGI0!FTg!c`gnf0KW3UV|(hK*4U#H^BUs%|9bODz|A+Y^33}8ssqcfQ^vp6
zWNr#-@amL2d5-;mz?9xXrYFT7|DO;@f@IA9Y~gHzX0N$KWBv!~*Y~7VqMwNjelKn@h_WDpox6nL0suk_f#pSz%48PV6=|Yx5Q_a&{I0nkQX;FDn~1U-)U`(*K@h%jeusE~OyMIN
z4{CmHC7}GC$e0d-)yqrK9=OXOC5{mA0<0j?XBeY(
zcK-yPtZy^N76EqFp34oli0;2|i+oXrnR$h9{>%CY-e@$`{zwPGNB%#9!veljU@jmd
z48ar#|FYJ9*=$Y7W_SJnL@xxnWsvy!2gDg2&>j*vR0PY2q%fsq5fy`G!rjb`m%lG_
zZq{3riRfZC{|%KXv0navOJ#avyYcyTTd6XLhY2JSOXMFwtjjonph)BhF1mY|_s4TA
zz%2Pbx;=SMrOi4*R}Rp?mcGx##SsKZ1FiPZd;cdsesFuT`4u&xSYG=v8K@sqLeL=f
zPnMO&UJu7%g04U94{Q!4AyP-lb9rfXs;E0eJtGh51Dq(d;5&qU;k8S=CBUQ*qIL_y
zUt&eSkBg*=?@eQ{fd0?_Ueg;_my}(^79`vqad5HV(
zx{AP8LQc$lcE*ocdgmZgo&}&mM**>bYAW$oAjcN@mE?gO-|Bh787ovL4=jX)dg>97
z2I+lC0}&*8m5v8VC#MriWeti~y-`9+nF%m@h4(fA&|pNB#JUUwg2)It?=U{e{LA_I
zYdc{;+ldFH;=KSn_#ZqL@LNOk?b<&($}p(-$9+MKWA-8eH6N!GhJsdp_5=Xajg+V%B3r0NPjKkwojX$By*@rS^PY4E{}!4lP(ubbOF~bi)2VP#2%ysyeTT7p9$x;QnEgGd=qm#5s?ns>fO
zciu=A%@Mp6xGu{fN)e4j?neI~K@?5E7m)6!h7r^t{1?l63o+|X&+{NqfQM4q|HD13
z5C13b-~TmL7KBfried5J-DD8oH4qgEcmIP24eB5N;4a~IbQ;gZNnRZfuu$31#|~Vy
znuO@}LVoqLO|X3Q9C`l8n(kXNCT#`ZcPHZleUC~bz~yCllGLcgtA5}X-%;JKUSb5u
z*=UA(FK^eKeY7LQUg6|_Zw$AU1w0X<$Ug}5lMONd5sM>biAd?UepR}@70v>!TYXbZPf5mrDV>$mV5G!GA<58AVGyxga
zR4NQ6>qp7JGQ
zrTi=tL^@wX;G`iW=%|MSCB_MGhdcGUgD$op;ptj3P^Dv9X8(eg)09XwsEW#J{2mLv
zFrBQ-^05O^8Ex#|+u)ac&A0=)qlrCl;#YqVT-?G*kO)FI@;hJ4dHKt0vCS7KLS~n6
z4w3%7_TzY=sIK$hKyd#D0ia!O>G!XnzfHgsK@S{HlWtGXtG}P}5E5C~TQuQyK7ay8
zb$b75Dz$BJ_R~3?^#bjFDH8Qc@F(eO@0IUPEYRXt3SubH5bjEqAy1c@IC!X
z(s2#^O40?}V)5$9zg|D?5oEK^Jh$&He~5)53IKP})#Frs&{TyJ;r;iA2!)D4l8uhj
zyW#XPH^D7SK(Kl18FG*}i%>ShzjnMlI=DKPjsYp2Y=4Gwk|(yVIs0PmD1W(j5?{cp
z;AF_YWkyE!>4J@-pq$cHGcx|`w2dkuvg40LZur=nsFI<5iqZE)5d
ze;LmGcYyu`nNBZNiO_}-KPCDG!nvPe--$IP3ECJH;YY*tBXJm_e^78K3ps3J{O6W;B{S+PQSP~clScGnuu#Bxb`YNEvDnQ99
zcWJ@f0FgcsglHLgvQYDSt+#lMFEVdMoH533dX?=_C93T)WrfpylB^t!;8>$(0^|w!=3z)6Puz~D`cc@uh0+=CIkNw0Motr%bC_be2V{<
z7GTHlwF}DQTwNTh62!V2D#riC7mGG^
z+RPg)B@fl~j~(4;2}yZn){oIP;%`l}DY(}c71F~%#snO5>(AJ2$#5!hkv)w~byR(f
z1Ekm2*A3*1&=;}j#C0m|CQFGT$+#_o%aGqWQsHrEgA1iosKQk6Axe6DM3c#(03|tT
z5WC>?ybx5IRVQekzSs&Ndf!UPLKXi)W($ueI&`!o`&rZw#a;h~;?7v!_wXJ>amT>K
z*amrDShLD&*%og4i
z`&O-LqJ`7#X&l*ZRO&65MX*Q^ewn7%p*9zW0*I#V4{BLGO5o^Zd3E38gB0T#B77N(
zX|$`d?*LQ{<-{m%2S`vrEBH)t$`5HBV$WjcH^I3HasLy$>g|8`Bd!Vyg@
zRF?^6fa)^JbW$JSDq8k!0PG(Q-+2IxYB`KiD3cnGc*PXA{e`}hj3golVl-9oDl6&-
z9oJYQ2LhDSed=Y1N*2`)y~WX}+(?f2;zRu4Pv1xLL4j!w$O<+pE0hPVY~I-LC_HNcSoSU&?W#5);Fny-n2yghV=6c7X30bdF4VJx}8
zE>MDC(Ii5QYam?gCW*b}kk0_B2jw?ZYJpt}Ql%$9d6K^(<4(N4q!n}4Px`V3KGG`X@0)|
zVe4clX#pMi^lYx(T&Q0IUR)b^t7lm`lb_vx)ivu#h^bd02neMDo&jT*!*Nh9(1eT?
ze4Murq<7RomrEB2$+v?f+IyD;Y2B1#5>dv11-6Iq_dVa5J+}WsVlpCFnzz0gToPkz
zeHHC|E5JXt%dH{U(mjhejR`2QZm^3dAT9tY>K-7UyF??!-o9IBH`_3ll9e%>ryTDX{QY@_TQJSG1H
zT+MZtxT&W($V3Df3u!#_Zx=ui`=HQro0esnDbRZ;NXV?+|1OWsCr*pCizIWIvo_jy
zsbAKuRg{3vN-99|!`_{3Jk#IscF=EUTEP`&6yfUMv#H??0%cv?i;ps_MXWz_cqE^0+czmj+S}eml
zsFMc?NCr+$oUm%zWvQjP5NR`7=8Xrsl~^;fRQKJZ*ncG*q7l<2Ll7Kw=RXh}fKg-^
z3|XReEx2(V$O7y@-{2<3b}n%y(ZcGBpxIhmr9kh)a0cM*(6Z$6b?fiz0)pbb!Kkc|
z4`WPt>C+%niSF8(z%Zre(F=9Qpm>QD#}
zl^au&@Smne+UWC4Ftg|N9TvL9=U~KzD<&CTYIe9;fRYlnY=Ak~;Q}n7pNs1jKyoq&iGKw9)neBymt}s}rdRQ+2Ej
zyac$=)B_@iJM@9?z%^am$;5))cSa|$!M(dnI;v@OmbYWlUatghPRVvS7xo5dP`(Cv
z7=k&0B~)&v4WLS*21{KyfJx?Z~gPtLO8}ha$fzrRxX?6_kg>iHX4=!K<{kL)WDbU(LD7y+qs)?Dv>nw2hhwi2Dv_mt#$`F
z>Uza5gZBv_gscEv^?m*(WGTEUOdqV>pzTMGQxFkJ!vM%EW@CZnp70Y{_np$ZT-$Dj
zxEtZO&2ONs{ZqzDjzE?INBI@jF3;$l*&<}*2Tl|*wILSS6fym756r=*p^=3wr5Sfn
z2k`tr(ET7r;(I5|90BF@?nZ=@IwIn<@e5<7Xkb6SkR|5VFclsKL53hsLK=9TF88?C
zao7UlLXQyzf-b9@DLu;Y+G(;t2E%#=T={r+8pY81rbypSz15BxSgA*mg~acm?+Z6T
zJ?yv>TgO!#*a09qwTW|^NpfQLcM}u<6i6^(1!!LTuxvWGh3+~T@kb*EO4HQ1uPK(d
zHakvpoOiaU|M;L;*%!1#y1g)92tb6y6Whtm(J%}Q4E+D1{?_Mstfh4bSj`MF(IWjH
z;xI>l0dR>B*4{oi8|1-^~*ER#aDD&-J>R+1wjU2^
zBcxalf7jS7uk()A9e+GmIb8-+(B9z+V-#BnT#rV#9VRF7i~-tNj?#mW*yA8D>uxW&
zcHR@<3j<;H3x36X5`Y0W_Nxb*I-e?7|vWq
z;0pJvGg^=&`J-V&&t$&*@LtaRLCOw&BRFFw>t8rZfI7%Tw~Uke`_W|3xho_~jTQez
zn$F|bm6ipuyg~888zkz=doJkkg@`U6Ed<^q1&+4R=R&A?MY$@u6(TvTlyT-NH4;P)
zhFGiCmk7#USkob)hf4~E`$KxOSN7R__fhvn!`7@`>DSiM-W8#aNtFcWA#~Q>Lbtm)
z!MJbwjTCb%rhKQEPGOl(YAfVY1&9Hc{6I1*N?nJynKQmy^_l!lJpC@cb;=k^A$Co@#
zqT`)4|Fr2%lUmgZ49R8f{HeQ)Yi#ey47|(zN^!*tUmgeH9S4+aNXXtA*DV94$jd#;
z(tV6Mj1M9X5&~Ov*IJeT5iZz7fNxmdVJ<4RkT;)p{|@S?uS%SaZCT;
z9~pP)D-+~?MfKp2(Tv28TVEY?cbhC?wXoW{{*(>v4N_|-3j*8;|6cxq&*fmyhr?Ty;G@M>A)AgkQ*ePPSvGt2qsLktfHt
z4BcB+pAV_t-4t>=-a?O1nh{)0_^dlL$Rb$T#N~#PlaICQdu#uhI=a=eAVo0?FnCAb
zM?IZlm#8>f9;$JxKv}?k+$BNp%x?&&6kkfTn1R|77hj&goJ2*;2Oc;}uk@?NZ0MZ=
z?OW2Zho=+h+QNsJb4&{u{9@W1Nz_OfHM4HWZ%XI%l<$mysrE?|~HkWbx(8O5?(>sCqQ$V06o)
zlLjQSnre_&ng=dhz~tvzS`SS3~m
z{P=RR>HI^LWTD;|(=#81dAOg5rE3lvSM2R13B}&!qx2ddpVX{`*$*#X3bhFqTkWp=
z;`#6>{{&krlzp+)3Jc{IR`v()7rbiEwsm9P?`6#|bkGGI&g>remC*HjxogU%tR+Yg
zgKSt%ch5+9kt5c2;w>%Xb5&Kp)Xz^WWeKtFn+i4<4o|d+_G-T$C{Nd#vwUm~dV?3v
z>7x=cw`KdWc8!DIlj|X__b$sIU3b%yUr)A0HFD(o{S%LjSoS}olrTLM_N
za0OHG{Mn0vOTr0C<%^dnrwa5|2rSHT&2ECKo8zB{hj$*YUx`JJ3cl_=A5vet-=Syl
zs{cKp21ZZzL297~nG(o@qvq1|-KJGjD~$^AsN#;WHN4)nwQv9iOJFyK{kmNIZLu;K
zOu$D9;y!vT^J^mc)!!Ms>5PvobaHX;E=lQm=#M>Glu}PRTuoc5iRW4Vq(vadU#_oZ
zXdIPl+THL>YU4sf>MQ!?Z_)V6v4Iew^RCGG^%l$5tYqA^3v&@n4y=l5tYtEKruBC4
z<>Su`AL$`~bhgpORQWA4Gt1|JLp07#Q>TNhgIc=`3-kkbb+kw+DkRGIw|y5;GFK_Y
zqB?~Dqw?ut8D>1(fv87QKp)(GLHi|IUoUQsdD+iFk((=(#~;Jk{O(6MS!8DcT>kuv
zYziRZ)8EHSztYv1q}+s5WUrHJ>vR{K$&@ulo%T}EKSEM{HUaUK&}(?@uM64%dS
zpN=O!GTl*T*yn}0nlaWj7cvXptgt#G>$iHT3|+8KleDOSGFx@LrWhO$LUp+6y0E*p
zU@DsX)VX!z!h=~D-pu)XW#5$J?(DnWmfhdoX<0QjH9VgM^MQ;lVZ|KNDFF*m+b2&Q
zWcfk94M^UaflnMutk7Hu15nEt1qD_PD@~k?Dy4*u}@rWnkj05~0mdpFcz^L2@IWexlV>83bjk02woAlJH(W4oYd
zY^-<1S(XhRJF-S%*sMQ`_!et;N@e7#8FMJ#viUm&95`I(Tl>!aw-h(z{4Z;^(N
zU|zxc=hxdFqNUy*4EX!ayfSOm2A!C_EbR2Hb5^}*X`NPFsAD${v{A(tM=<6~Ao&^*
zJ%6g7}
z2-l+|E+?5L9^_o&EsOvxKZwJTQ2KOcBT4!}dnN`%&W1)CfZyO_znCzw{JJ3Mjq&Ks
zqIuHO3EvfrvfgBs?FW87s$Q}yhb5vgaM>aNrAtI)Um{YkVz~(p3#OZtCUD8|YcYYU
z>2~HVDunE3V+y2NF_I@8I%Ha>v&KjaiOc0)wsS3|83s>w+Lh>mww{HR736nQ8l(XJ
znm0v4QWD`&;-dQ?JJ8ihS`OMxY@~e7x?*AKc%}MucE|N{@q@&4+z|&1Fy$zg%)QDjgQv|WA`(#YzI5J
zeIGWqDT9d@)l|}M*dlxFB&Js{Priw_MQ(lmozu?9AkSE5CwUtk)0iWWcv?)y?n7A}
zQ1YaFsmwr|?5TMoX{g*zT|HG#gAvhyQQ@KRHH0ap*&id=;bWBHPUmhud);k+9p_NM6&SA28vBk%rRk0xR4oI3*;kM`1qT)~)wfT6R
zD7s`tz5QyJrtctmr|WYOm4Y~OV@@L(Uq35-|ERDJcSci*orUc~k9`bW@;j~6(dHM>Uxbt_`+Q+(6b`;KkMaT
zHl^5ZaeeTe4N_umuHu__r`d<)Obae?%ohFRDp#}Zq?e?B^Fles+^K}L-gAW?`C{{_
z4inYnMlulo;W(6;n{r3@mxdJB^V*V+7`tdrJ+Fvj^EYUK5%Ol
zsA})o^D@;dTjdx%FUil6sdgq#=aNr99uu);iYz;Uk*806RS9pN^&^jG(B7i&&SwhnF=W4Kq`1fGRc^n!vyW&worIY0<-6H5HPFAgY8~^V
zd>--Avm0xp>7(aZ5}l>WPC-kv(u%Gh%-xNIM0oOGAm%G-Ox?;$+3C+!q_fg1Slixc
zF{#HW2p~xT6pLA4){M5U@gzBZpk`4T4(h7w-i4DouLe*JOY}ijhzwWD#C=K$Q%+Ee
zHSlt(%Vn9?TmQO>KgE>pp0dHo_&~I^$FSiRKJCj39j081YQk3PT)bjyMb^h`gJUoG3P_Uug3Wy&5>TNFB7hV|r*gO>=$u*eR~pV1`5cxg
z+Ax%k?4B-xC5ks_hMHBRUOT
zA2A0*(ud7nw??hJm4mG+P$L2D#-h|I1etZPpB;tLQDJj_C&a^g-e*Gw-kV<)Nqimr
zuB+BxY~$zj*yw!(QTsEyO}Vk!(;bX~&^;r#G5yk1BB;Hdg=!@MhO9s#@|2Hc57y~W$
zM;#7)Civ5-p;4kG`s{Sx$Cm5j2*bg4JiQ};EG|sdNG+1M@I!8F+*5b5R!|Mes!R(f
za-69jhx#>4J%v>vuYa%TfzMpD?K(gvP_wbIc~dQzYA)3FZroyQ_~iOc_O-D7C0pkO
z9{nR;UfzPP$CFlZvE>Xqj9aN3G=<+Vj*%-?9jE9`j#80y?Eoq!rJ25@e(7`LtnyQ~N<+Mp>>_0yW84NpG?4
zWm510+%U$|1FKPY;kLRV16;-xX#mV;6t4|a0YV=U=CzQ3f~Id{PeCXICIQi?DsH^d
zho(c?PX*y8%Rex_M;z4=PvI*Ofhk!XX)d%GgZqa5r1&_dCW-g1`=cltw#E~D*{+^<
zd^??lTcc0u13kqlhmRb*t+mEVTMWKSo=X10UvgknfR#~;#cdUBKn>OT+Y}Uu^gYWy
zpdDJa7`JjIv~O2ltQE~z`Ugs
zqi4I|atL4%7nu>yChNr0Zl|NJrCUjl^d~YK8X5xQtEnZeC{@hh
zr7xAXz(gLd>0j`3fU(1*DURzYPY+mwC-|aIK$-<))}{}cPft1#Q~0}J=Pk$C;p*(~
zqxa~{=^K-Bh^`(qqG#xyMp6k4-B(DcdkrTn7&x}eV3wo7>?tM<8=slE=a=h6S5t=K
zAoOH>W?b2V#^pgR8Uwz=Lus_j{iJ8>{0@vj5lEAc$nLAlG0#kAMK>asl=(hdO*C&e
zorL*7AN7peKzM;o4(+4!@$dUe`
z^<{|Krs3QhUFaEHTflUNqkw?XcuwQ0blW{#n3fs$-I7W0F=(qx60gAV%GdD2#{1Ix
zT)Qf7%}1~`GAATaLKPxP@EwBDPSDUD!%Jn^5DBY5Z@j5h?YL-ifk%aIdN}x8Tud9s
z)26*5LZddcJH?|7a)c;-~`RF;w
z-biy2?Qlm3o4J&I5Eg$^Yi%@ugDBV=obnaEMRamB1s$SY7kyrIY;cJ;MMdjN9YOIF
ziCu1j*gM10;DgOVIhd8m^B#t-@k8x$`geRZS?G0|vHLRK
zN>5kb>;5?AaB5oyc`Z*6SQPD`XiLXAHY%^G23>x}iiZAAcXSXb
zx>;Ium0*r3yOW^p#B(D0-6k~r3Zvh3$ASVI(N6h0i~W5K`Vf`SO4gqheg-PJ*>606
z{4D^s%odcM1n()wTMnafe**U9NiUy;YZ{Xp)zmH=<%QxfO_(@LHJ?<0-b7VzT_
zLa~47b7R{A`gnHb+m?P2@3NN3q;0oD?b;nU%&bT~ftiA^7&>BD`p3!nwk6$48T!8}
z=R!)xNKCgZ(Yd#l(zZQn&7Vbo4HF@Pm&xR{$M4(+{>f?6XS(v=U_PFhWe=85>Zu^L
zxVdjE&qfF|$O$iv8MqK_=ff_3fLnrt`E(0NA8ss&j&K|3q#hm~o)Xg%lBVe*aTu=l
z1Go?Xv3MC91U87!s$2P6fdKW9#IAGK7V4BC-kQ)Afi+au84x|4nT
z6x>+0S}@KLgIr{bF`yh5ZBs-j3x}EQTe~uFrmjHQ;AbpBg}c85PT*xywTZ-}*qg|I
zX#ut*U0uW~7OTyrvEyur6xvAw;$r&oR$V~_V7)AX_ogqQ5Nu6DLGJA}P;<81;Ys#J
zZYEj~4m}RkZw&P2Dc=~sSU41%nhBEl-UA1I+?h*|T({>{nw<(P5*hw-3bqCwvU1{^V)p~+S+r|VaD>5Ev2
zd(fY%o{}X)QOh5@1p`fD$~L@>4=h;mktu>MokNZhgDfGtgD=*p_?;eij~m#p{1GBO
zeDpZ~B%qMNRP*+}1VTI)@C6(qYX)9Y1s@}tD?(4%5RJOktU|M7=`M&}JkLIW2~~Tg
zX}~BF+WXDkCqMK-jpE+dr>)lJ^eJ?yWuk5?$O9g=o9v<$6&2w>C`0TaRf99w4l;}h
z%0Sf+s@NBMC+Z=Z*%={Z@^m#jAPH6zY7hQ){2y*^aLC1v+z%m
zy=RWtYYANTX@)M7FX>^Rk+BGX`pi3#mpxm5q0gPEE`ao&rR6H66Q}dJJ?LFl
z%&^7*UoS&UEv8CiNh0hgk5`jHxPoclQ=sxN(#I>{Qz&W=;P
zg@Xn(Tj}O{oHaxpqEx?KK>sHgD79$~#HXgwlivEW
zDk->0#2>ciG+PtW;roypXn2JJKB_o7^OsWHDuX3`FG}o%A}1Opuvu-<@7G2n22g!`
zz*Z#^CBh@Tgn}uqu-XLI+BK&uySo&s=s`uj
z21v!#XW@>7eERAKJDpJ0fP)@q(}SiF^WPSeH$fVBH#X?qZnN>+;X}A%-n!JaBsSqE
zwGXx#^uXl>7ZEf?H!(hl3^HhW*(Xa>wd<$uYx*MpYF&~@gW@U!oK;tjwlfRhMl8_3
z5h^F~A6^2(dvbpNuochrp3t|1KKadq4>Cm*+|1nk;BP7Dzp$CNn%xh)1jr$0Fa>R@
zTwe$!d-Cs#RhOm-oqhR^mS=x6g)QX-mp}Vml?ZjbxO-w6@DtJH^6v6ufZnF>)ASC^
zRc$#s1Ov-bdMb}J<-8>OkTc=#=1X$1t^JVF4yY6EWi^`g?fqs6yaeTaaZnD+K3s5o
zgseUT4;dJ-$?u0di5}mJN50>OYHi=d9T^zx#1kzuP-K))(1_9JJ~43I7o`Q}3&pkX
z)|&R!YR1S{4iLnHl@wocb>>Ja&gBt_0}Z|PjnRq8ze*_VK19URC<`JtLEnCB*M7)j8^?%
z0*PEa!PPN&!C3a`(l|jp>tB&Y!xxtRSj^3dRp1iHOHC@DK39T=>-GYrQvlLy3UmQt
z!5Cp#5I{&u23(CMjd)xFs1F-UW#duH;b-BrPOc{ZD}
zLigy6(3{~qP3{T_-?sWuA0}_Rl{LBYXeS(ZJ}NTHLteO}$@e6kJ^zZEsFGVDiBO8Emej
z-~~p1U7Mi8x^LgGuy%KDbT)t1@16oU4sSd3#nA?#+^IQ9kK{!NKK9h8CoxS=9=CSo
z+~}(PYwJiLZbBVdtB`-~O(d}U*gey@Z@H*Nc`=vgE6YYngoBBT*&V)h6mj)XV>EaK+y`tDmkwW$
zDFY`wk*<+Br3I@2vJ}aUR3LvXwEWIro7E}`5-`PEw@K3Dj#ZnO)uEo|oDbOs_N2i6
zTs`<1C_xjt&ny*|?wS;GyWN!{M-Q=Uv|S|J2d*HCT*rX=};E>uQ`@t~T=3iv*A^Yf#_3rA({v;-Qr
zQu7~B78#<0+<3MqdB-&nS;41E9UdeFLg@&rrurfl&0H$RR^^F7E&d^qC%oH|r~8i!
z)e~A8VXOfN{2?%rOr-Afcl${NSC5hymM@k|P<1cjgDbaY4X=QA6UJbLZ1rg-_-4`&g0ufe9MmSPS>QnzOgY#E#HNr6hAn9CduT=3#11$
zid){8@t>rxKnen_ilr-_B7n_}R+vtt0%zU*F@9QtmW?Zr0gbGF#Kl(%F$peTm{UTN
zC=+TZGR~F~g*nUMF0%iL9j3~<4M7URMouZ-eDRo5Va)J)PcqlxSe`Wod?^Pbps77z
zbYz;XxfD1Sl7u}>hY|Nm1?a>n*;`C@iYJ#&{Ap(0LC63k+layGuJFGS4%40Og#0
zYA;xQfNK;b(lkPmzeg7zDnYV+F
z)Cj}+h}NH+ec2nf;0yz7>>L23(Q5W-pEOqdWBXvsFXy0&*j1Yent7V
zNn+Y9T0vQ)pR51nwH4z&VLH=XE?JdF(U$?jJu
z7zYwbaheQNYzj2S-v^w(-NE?3uYQ+12pY0?fvtG2OvAX_XHD1xY8Jrv`}Ho?_kJ;p
zOyPm2LDh3y)xlD?d}4uKT`DJo$3M_Z)=(4W*!dky981Z*b~l{I_pi1cI!{)1XzfhAhS2ix@OJ5t;XahZ^D+KCZR
z0*;mS4iAw5BB`OcWzQ>U@;hh<(KL*Rux#yuP2^8uNY%@xI(Y(SPBP(
ztWGRHOzQKFF}}122h-zwSm8?ZtS~}+nkFE?x!mWm0g)M=ov=;-@&X#5*L~ElvFZmb
zriY$;W&)WeA|{50fCl^PClIT6aypoeLYDRx;@wXy@vC9>sNhth*R5D6RZr=JNr+rw
z=N)+ZD64rjIF}l<42E|PI=^1v4bIJIe$Z<=j;f-$_cq|?l|t@)ZbIXL{lEUg9B7~K(3xf;rc&+I^82z{a1be8=hUSgh=v6GK*?N&r@
zE9iU)>K!^jA9V9thg$BEH91OhbAdF-B<{=SlM*1SeWy!p=}zSZ$5S2v5{@5wkiPbd
zg1^fbgF4{lEc#tO`BR31>A8@4lL;_|j?visF1|3%)|-3bq|}D};qu+wBasejgZCHl
zrS+L*D`%9V4Gz7BYqG!fMV&j7^H&;o>|QrH{rp_|DHef`JkYyMy$pr%DLM+mt1cne
zrCXhAW_|lQ`v5*mRMuynP2r+taO24nqkS6kq8bh^AQWLw1TulWiQ#b+oDGqvLtY6M
zhhqv+*bzX+CA>UyNQ8&slve%8lJVYUBJB=$5V-#8PRl;1!RO*bHnLPaQGWleiIMK_
zu`Pw^0iTwg^ybh{=P5nMlXtdXxcF4GIhm`p97g6TBp+5e=>6IjO>L`s2%9Iu_k(~Hl_TI+8^aId=LS1FsSQcb+n56Tcp2iqorp=7UL0php6a(GYdNHQM-=3478kd
zR&Z-AM6J;fmNU-))6&}y!C4%k$dJy#Ao-P@M6-<|d9}5DMt5;9)ZEBeXaBfEm_(q`
zt0i}R&i&N0<;gftX2Llk=U0^lb$uU+*>oatb__DNY^0SZ?K4yqkLrcewJn=%<=`1_Z&syAwUQI;{3J3DbZj1
z;-1q3?%c`(8S#oDb+s=K{k|NvKdn=cs?8I&to`5!GsvVOtWZy|ZG{frn_L}6k8|D~
zGy2#cA<-giGc*M6RjR%R2q*<_CNab#z%0f~P#J-1RRKh1!C&A-vtwV=4
zj4luCPG%kYFD<5AUi~_5*Gy&*lFsoV+|rn|@?$9UrqgPPlo*s^=Zej8VG`R^}2hkn8m#5Ocs9hggrPbSLQ6%lyu
z=jAwC?P-5}Au2nq^TbKa<19JDN)IO}Fc%alUBq6LdKnI9McnSGuF-esglX_!-9=a5
zO6vT`fJ=sGAx44a7>^Z;(=M^8lR+_wzfL$)YZ`neKct>^PMl~
zC02GIKys%kH5+KL*6s>R_MvdGBO&!B7ufMoHxS9MT$3XRE8UK9oUZKF>SE6JT!9W)
z%7Il{d^G+5T%eRj8KX!9V?L*$HaBM*5mhKJ;DG)XlW}6p!kJ+yNTMO=vSA04b*gb$
z;d;w^xOh{VO_xLeSHzmfTCz2JNv-y-f$lt_rO4Oc0V>yL0~z~|Ru@`kx1XSDC9chX
zo=h#E`mx-Gwyj&P*HPUThU}$Zg^zozqDLrvrC%KsPg^;Pv%Ng^s-DV=glg-XQ?h%
ze(7Bo)v^%^E?g4D9=`Vb9*!CNp-*n{d+Sl(5=xRl{K|ye>``(x5$C9To-0ER;ze!X
z(Uj5k`@quuYnFahgwF2w4+0@ULtXrl4r8}N4chL@HZvnP`yEbWrond{ea?~QzR%Ae
zoc{by`#kUSjrX5-j6Kf4qwKx*-fPZzUB9{4a9=RYn!>lq`oz@YL(OMbX_l@4taMN%?Y1k_yL`w%U{KZNcXH*N(Kmy@n
z(DO?2f*$;fIgbT^qwf*9`U4)H^Jo)Yp*X3(kX|qSv_d;ZP
z)uJY3&!boD99ZaZUcSla=m*wQdUcL(n_OCOBFrq&PJ)lPo?3smy~MhSE;R12!)ob
z21dOOkxmWJ;srLQvRW`394|G5{B4CETK8Q)HjP0)I@xGyLwq3#%Av(oo$v~*#Cy){
zF$t9~_6!mL-6A&trfy9^OtteN;XuF!E3VA!)H{D{*V`u`6+3}WqcL47xqDHN(x?!q
z0RS|cASVsn!q<)N`2^{g^LM0YaBy#JOO!8kJ{3>xZHxhORZ4m4J}BXP&eJG(9{wAC
zcHT+)B57hzwIFD^?)tn;k(LP>GDy~AFPVfcWQH=&%g@gbs2rQa+hjBd4YQ}MEW0Dk
zpvv~OI&pJQLG|o8(60$0$H0RcVSy&L(ny{!a-p+4fH#80{W%PZ%W^8XnC`
zyPx>}9Lky67)@r#6*ngeZ1uU27uuHF1yiu=BjXhEsWcmE0HaZr(;MUE`&l9K`TnBW
zmT@CJ+oxxc2ZItAD%9BC>tGYY9n+r#BX2()L2PfLBx8U>QwBHa1543Iqq+O;e#~GK
zoCYGc8DeJOVLekI=?cr(MfFxNqis9~xwgL)J~9J;>vz&Q9-eHGBalqW9_M#R)EkPi
zzVdNd@LS}y3g*_}D?SIEyIGlR5x%0(Z?+M*ID#Apwh_;4lOd#~IaXW;46xw7UOUqM
z%EazO@*uc5Sfch$Ram;;kLeVwCve5Z_0mGrA_xV}QtV$;H=88}oYSj{sL>D}@0-o}
zL%G?!P2I0))Q%_bd-CJm&3H#YJ&qIS@o#Fd+&q*P4pa|78fQ){{VqZufQh_mUV_L+
zd^F&@^^*DvJsX2=Z}Wt#YO$FY1kb%9fjH}vG!QTkx*)It_~N<-L7WoFst=ng5&a`X
zhALg_LoLVUH{F(zwRkE|cT!AS_lD(b?>41o_OrS#FKtaW#O!@tQ9SjVWv)AIq0Rp!
z`($4y0(`7&!eC<=-i>lR1jstr#-vD5E9c@hoE}fprEPkcvnuE<0FBX>@jbF5A8co3
zyPb8Q#NQo@M+vvjLsNK#%olL0ZCoISUy(opkYSd6`7=dK`lvK;t_dh(=$qsJLlP^!y@(JN_x63!}S?D{Xg@tiL*l6A@3B`qo_#b>k?n(sYZ
z>5hh9tnam?g-CHbkmdiKPH4OYgL&{NG5Q%0MN=@_CGHos_vk*_cML)c%J%*Y=oNh0bWfIEM%MDD4yL&ct
zdap~R_SKHP!98;bPw(-yEsf#D+JAXJ^J0uLI3;uRZ4o+WJ7CYbX;P#?rzVWP|HfpW
z+F#7`nH{mPexZVf22t*tG+tg_1RLMd(o#Ufoxx6Tc({|Flky>$;tf3SRY0CZHv2X4
zu{f^H1(MM{?$5iBoTB(7qfnX?CD_+qEYmi<*D!Md_!IZxkGJg%O=4U~0u;pkds+oi
z$__p;ydgk2z{WR_Wj%v^yGVXJAY^8On)8KRHmmLK+(93=>51>w_r1HugGmdA7s_%0
zI3>O8Q>PDFXPP&40vMQC!j17+3<_+$O`np!Uo?EQonz6Sl_g8=01OJh6AM22ZSrw&
zz2<_@Lit1EyBBw){d
zD%-Q$!P#ivon{9Hdpyvs4DveaH$@+zOF}>$1PW!6tb=*01Qo5zc-twxc-Deydr|%i
zmB9*rVieI>cVtU;f+ZX7S$vh-thOWhHaj!O803d@7HVL`2@e>nx7u33F%Oz$FnY59
z(Af{+Y!i>c1PgP|gDJm!{GEE^v3w11Jadw_<<|maMWY^B-LsA0_pVngi|M1l-U!)Y
zhZQ+-#s&Eqc^9+gc>B?|WyMH;wn+N7;D~tGbe>(LbDI>^8h|l5Pe6^zYLa&?5%>nO
zctar>5BIO}J{@%MTE7vwPj5#icRn5MO7sK7BOF=J_9jONdT2aeG@$_
zny}w86oXSFMrGKc(rGLp{cm0A5=o!1g8fAM!@@+6Ja*zT3-Um^XzQl3MQ&t%5NRWI
zFX&KJV!ZK-M|3QGVAa>c$f$|{T(p%60W`qXyh&UkmYcq}`9-O14qxNaxNO=JnP#;o
zR#%QMm*V^OitO
z-FH@?mG>^mvM?5XB$rYNX4USc)Nc{q`=6pxc!D3~$UQN39(Nm@RRqrh1b<_~3z<0-
zy=5bZu%@U!5kfkQK)z;6=g*VwU+HIiQKaY^3zCh-8*k+*$E7Wu30sds|tXYH1u39?xNs{w*Po}3SI!%u2=q6tmN
zTik12@@DJ~4V^Klap3cBI=kF)F|`OKDa>jigYMXMeF+91R}0RNLeXV#P0+Oo{k^s?
zA(E0SAE|~&HgfqwI6`-sqb2x`HykdS(SAmfBmkr3j2D=4i<|K>YdxW!`=W{a0*nIc
zqszu>Au4wPB^@}O%hVg-uHf{9;LylF+gUiDHW7gNXKanuj9KKZ^Vv!zzMJo(MWaBy
zCn*9dO{Ms5Y6FB_0?TZYNk|;R+8p`|Bzn-5OL#SrunF>Xg9ZPbLL%PZ@u-D04ybS2
zN5e3I0BwyL94-!i99<7@-v~r-t_31JOmNtt4BG9e)iTe1`udr(0;5!aF1euq9O;iL
zh{m`;!DDYOxkZi3meBeBMhIeeMf{lt0pIy_UJuqW&l*Sbvj-0}zuAyaovFB+wHu-E
z*~opm2ir|sghRPe^&kfz085FOgWPM;?6JdxB9Q%M2B}EzJV05cgY+X%OodmCzpMn=
z?oU&fUyb)^{g@qvU-@9$a-d5=Ul%So
zjsiS$98RnNz!h$!XP5`$Fl_q9>vKR43btS<2f_K1mMCn7qHs@K1$!Yys~kK7bS^o8
z52mOuip;}R^2Y@Pdq>4yo2Y=vYQ
z>ikd6l7o@tLla}-@&qvBAlDwmp)8LYyc?RCq0y%$8qQ(r@lCfN(#F2t5)9Bf9A0$p
z8=Y@&C>Ch8fW#9>(_}dEQUzy5%)iMFY$kwW{V60Aj+K~Ye=J8hocbSbSpWC7dThUh63HT
zX9o#aDk>^Qk35iFVmDdl3Lsk`0_{F2ed#
zs6b)b+w6l;ca&plcl-zt&dOPn)3VLz$?DkjM%)8a
za~9LeB}MZ;!?A^dfu6Uc%Uim}L0taS-4i?r!G4of2jp)ijaI_@OYqg~e^jTt#4l_U
zN#BmCcm4QsL81gg*-mOtIKO0dcLL>240s=NZJx0OI((?^Vl$NcMd{wUkMfbf${YU0
zl;4MP?$*Q8)}9itbl5HUP)ymssfU4e0}aSal%9DV<`As=A+#(8Qur7i8%VH1a@VgR
zKc2+*Buc~9@PtV_7&@+1C8wBWKrtfVK6|-s1CKu9r$g5^MXQicXoKU))Mp2m7*Ej_
zhXR>t*k(fz0BQLx99jk}&buv|^ng)H$0!HGXWZzJDDW8Ee_4@pgcu_5a!Nc}ZR%jM
z`RF7>$OCl`_+ohkS3Bk%S^GbUj07UUfiMfF
zdK~ZlLSDi-jhb}uoY}>k3=c6~e*|>Ux6Om8Xhbq8R1pAVCI(L|kV2&R@CpIEx}MhN
zbTdP_4N21?8!w?1B!;9A;0PTib*+aKCQJ`gt1o7+EJ}A9ZHqGF)#z
z0UWNV0ni@O?m+TF5Uv5rhcHMU85wEvLV-l{*GUxpa1XXYr=Bf&f;2QT0*Y$xDUix=
zLNjRCW6o-CSHuXKVO_9@zgFz(9U^%!W@9~UnB%g4)bZ?$Vz)!Wc^|5gc_P6;xAQvT
zBC@u5y}P{&p!~uGsD@2*qq`Id>PEgAqvy@SH|S|P5BiJ+0&~E1;pW|4vI9>qhM?*7
z2(NKK1#ND#k7&V2nu1G%kQ&42QkavgIHs35XeV&
zyp??Je$7VRo3wqrBFE3MxeDPmp#SeC`6q
z=CqVAUy3HG&)#90dI{n*mbOsMKnAhHHKdly>Egf+Mtg}BfjNbj|Db87zc**=>aH6}
zNJs*ErvMYycxzS%#ukDS%w_E@LE|gsg>Q>#5T-^@XWMTU_>;`{-HnPfU7#-jDTecP
zZ7)tT0cXAxjXsbLo74=4!j!;jGj-Zj#y)hL!>^kPvg2VEJkP2-f6+!T8H08ksn%_|
zW>`AJ0xM*Sop4MV#ptoH?@~ZJ1@ZESJjzjH=`n4}Mv1Hr5{XNDtJYyE$MU7jSmk}&
z$4GqYsN(hv-V`TD+vdXfQ)wT%NJvo)-~g|_3ZfW+p0xVg@B)|^<@AH}TA5%1-OMGg
zoz+Xq%22TB>e-uRqLAmps{jU8z3OfzUX0JC@%SOtbHQ~olQvF;5~i!_f^qLiM;f&`
zjdiD(h8h+UCB85opg#p}%w;*Y2bo~r{e#~fb3hwvYwuOX8E-7|`%b`Z!OQcbgNr_p
z2L(d99@|-{K1Vi-2XgEmLWE>G{fbs|o#$ZUlNC^ozQ`>nHtVEJ|Aj!e!jzmpL#O0S
zXE&soyix)%D^=ez0#m0n%tV*S!Q9tgI6HBw#zVMMIJBQi)?1P9o=kt{A4EbsH3Pg^
zP-Y??R~z*MFr|%as9B5xO4Cb
zmHbiMY*V4m^B{y*4>n;&xw&Jwc^~AXvDr2&T0A364W8P!g(uiK+KoF&E8)k!S1-=4
z_#|`0b@C!11N+#&YkUtoU3%Evl^x_HeBP(~qi|co1%d;)P!v&<8yZ4G0k(nvdpH$u
z6NR>_#NwzVH{8_eM70Tf%C1tcQ>)5G+%V9^%>dZ##+g!aW-=U$O_^YiY1@8i;U9_V4{T0gQ8{-9*O!6Fs{FT?%B6&Gh
z?zX4Y{;BjN4hooJG0Yu|G}s|=R83P@J|Ae(XEYyoXJi=Us@0a;w-pC0N2VxfSAwcZ
z+O0YsLO8*rQ36N~JnJ4R!Z%5xk5KTay|1{HrCeL=Si+Q)S^#IcI9@t%ly8`euk|`i
zG*EeXVe0WOLy8YosU_d4e-~ta%nRil1RCyBq&lEzfI<`v4L3hVsz#{2`7ikCjILY4--#o#!KwX;JeeI=wL*ps=z&R=ZuBU+(FeR
zurg(9O5^F;xy}$K>UWOg@s))hPC}QtF%iham@SK+xa*2^lty26ZyYgda8)2CRBcy^
zh!50Z5wRFKTZAH=M7vi%)^;OSPo95AdhZoCUe*&vPSgxYn|p4PIL8?60%n6e{*5Y{
z7m>AZ#I#I9O0k~N<`UZQUM-?s*d$pIrUR1i-ui>uM?5hl#c>rDx4(h?WD6C$+k+u|
zNh=jI37
z;7|e+gC%TrmPYO!-|xo-HCaUbS(KPg9~h!{If_^Rc)2`wFMs+VhXA5moBOw{V>dm$jQzvH%LG9a?_IPMdUa8priyQ)FcgO
z$-l-SGkwBU160Bq^x|!~M6U$0gnIAA3fhi{
zS(}8ceNcsj(6vOEifRJHRf27C*mT}zvfPJIy
zy~lj}qt+%~*0{5F+Y9l<6%@vu$H$wSID;$o`j>V`;qC|b7Qg%CI~y%eaPHun#1YWa
z`KIP@*{0VT&E+l+X4({!<E16fwcpC-y;71VkBG}>
zOO0{brfgu(a{lg-OHjf0kX5@q2&e*;ZXy!|!Hr-JEJoPxzALQ14!FP2WHPycs!L#(
zfd*T6fz?Y(_iQ6xh>0?Wk!(h1?&(GAiu7vCsGRc@v%j;R;-#Tj)k$tk%E^k{VaC}}
zPuim^1LNV6w-lU`WlM{tmZ?$_7$AHfm)(kz@`w>$4RnovEC8=SJ@$4)JU7MzRxI0de^
zv7w=2FhBz&0~D`T>}X+pU!5G|8Tz_%Yw4D#@mMAE%#UryE2U#8opu-j*8&2qg(zP!ATn2i
zK!&V#m>r}DyMA~;^Aoc&b|+2&9}6-GczJmg&A2NK+H#Fw$ErT@zC0?tvpMN+Xc*!A
zykj|}^J5v-(_ayC95GSwZ4=^*LmE_BPs1@)NgS{rt5(^c9LUOYi;06onzUSPm3jGR
zi+-PsVx@;FYppsnbI%mmK9PKc5*2128ZiF-B+>!aOb{N?{nI}E-nL&ZjgTl?LhV<*
z>YtWf%Kkn~5^^zUzfur4U-*clv=`#0BRymSiUvY?KUl4c-LtjD2t
z*qw8MYaPJ~ztCGP+{-vi5!vNv%%8zPdw6pu3KC90YvQLL+vnwjM5R>GJFw50b2EuL
zKYL0NUgQ`uk>_UTJu`mIov&>TulZ}lGkR~5*>A4a?uewy-|g(6PW#UBPOV|f |